diff options
Diffstat (limited to 'drivers')
1109 files changed, 82624 insertions, 22591 deletions
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index 495fd0a1f040..b508df2ecada 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -21,7 +21,6 @@ #include <linux/module.h> #include <linux/atmdev.h> #include <linux/sonet.h> -#include <linux/atm_suni.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/firmware.h> diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 0c13cac903de..9e4bd751db79 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -1784,12 +1784,6 @@ set_tct(struct idt77252_dev *card, struct vc_map *vc) /*****************************************************************************/ static __inline__ int -idt77252_fbq_level(struct idt77252_dev *card, int queue) -{ - return (readl(SAR_REG_STAT) >> (16 + (queue << 2))) & 0x0f; -} - -static __inline__ int idt77252_fbq_full(struct idt77252_dev *card, int queue) { return (readl(SAR_REG_STAT) >> (16 + (queue << 2))) == 0x0f; diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index eef637fd90b3..933e3ff2ee8d 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -680,7 +680,7 @@ static void ia_tx_poll (IADEV *iadev) { skb1 = skb_dequeue(&iavcc->txing_skb); } if (!skb1) { - IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);) + IF_EVENT(printk("IA: Vci %d - skb not found requeued\n",vcc->vci);) ia_enque_head_rtn_q (&iadev->tx_return_q, rtne); break; } diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c index c920a8c52925..21e5acc766b8 100644 --- a/drivers/atm/suni.c +++ b/drivers/atm/suni.c @@ -21,7 +21,6 @@ #include <linux/timer.h> #include <linux/init.h> #include <linux/capability.h> -#include <linux/atm_suni.h> #include <linux/slab.h> #include <asm/param.h> #include <linux/uaccess.h> diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c index 87760aa60446..12aca34e8db0 100644 --- a/drivers/bcma/driver_mips.c +++ b/drivers/bcma/driver_mips.c @@ -52,13 +52,6 @@ static inline u32 mips_read32(struct bcma_drv_mips *mcore, return bcma_read32(mcore->core, offset); } -static inline void mips_write32(struct bcma_drv_mips *mcore, - u16 offset, - u32 value) -{ - bcma_write32(mcore->core, offset, value); -} - static u32 bcma_core_mips_irqflag(struct bcma_device *dev) { u32 flag; diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 4e73a531b377..851842372c9b 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -425,4 +425,14 @@ config BT_HCIRSI Say Y here to compile support for HCI over Redpine into the kernel or say M to compile as a module. +config BT_VIRTIO + tristate "Virtio Bluetooth driver" + depends on VIRTIO + help + Virtio Bluetooth support driver. + This driver supports Virtio Bluetooth devices. + + Say Y here to compile support for HCI over Virtio into the + kernel or say M to compile as a module. + endmenu diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile index 1a58a3ae142c..16286ea2655d 100644 --- a/drivers/bluetooth/Makefile +++ b/drivers/bluetooth/Makefile @@ -26,6 +26,8 @@ obj-$(CONFIG_BT_BCM) += btbcm.o obj-$(CONFIG_BT_RTL) += btrtl.o obj-$(CONFIG_BT_QCA) += btqca.o +obj-$(CONFIG_BT_VIRTIO) += virtio_bt.o + obj-$(CONFIG_BT_HCIUART_NOKIA) += hci_nokia.o obj-$(CONFIG_BT_HCIRSI) += btrsi.o diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index 88ce5f0ffc4b..e44b6993cf91 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -24,6 +24,14 @@ #define ECDSA_OFFSET 644 #define ECDSA_HEADER_LEN 320 +#define CMD_WRITE_BOOT_PARAMS 0xfc0e +struct cmd_write_boot_params { + u32 boot_addr; + u8 fw_build_num; + u8 fw_build_ww; + u8 fw_build_yy; +} __packed; + int btintel_check_bdaddr(struct hci_dev *hdev) { struct hci_rp_read_bd_addr *bda; @@ -208,10 +216,39 @@ void btintel_hw_error(struct hci_dev *hdev, u8 code) } EXPORT_SYMBOL_GPL(btintel_hw_error); -void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver) +int btintel_version_info(struct hci_dev *hdev, struct intel_version *ver) { const char *variant; + /* The hardware platform number has a fixed value of 0x37 and + * for now only accept this single value. + */ + if (ver->hw_platform != 0x37) { + bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)", + ver->hw_platform); + return -EINVAL; + } + + /* Check for supported iBT hardware variants of this firmware + * loading method. + * + * This check has been put in place to ensure correct forward + * compatibility options when newer hardware variants come along. + */ + switch (ver->hw_variant) { + case 0x0b: /* SfP */ + case 0x0c: /* WsP */ + case 0x11: /* JfP */ + case 0x12: /* ThP */ + case 0x13: /* HrP */ + case 0x14: /* CcP */ + break; + default: + bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)", + ver->hw_variant); + return -EINVAL; + } + switch (ver->fw_variant) { case 0x06: variant = "Bootloader"; @@ -220,13 +257,16 @@ void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver) variant = "Firmware"; break; default: - return; + bt_dev_err(hdev, "Unsupported firmware variant(%02x)", ver->fw_variant); + return -EINVAL; } bt_dev_info(hdev, "%s revision %u.%u build %u week %u %u", variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f, ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy); + + return 0; } EXPORT_SYMBOL_GPL(btintel_version_info); @@ -364,13 +404,56 @@ int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver) } EXPORT_SYMBOL_GPL(btintel_read_version); -void btintel_version_info_tlv(struct hci_dev *hdev, struct intel_version_tlv *version) +int btintel_version_info_tlv(struct hci_dev *hdev, struct intel_version_tlv *version) { const char *variant; + /* The hardware platform number has a fixed value of 0x37 and + * for now only accept this single value. + */ + if (INTEL_HW_PLATFORM(version->cnvi_bt) != 0x37) { + bt_dev_err(hdev, "Unsupported Intel hardware platform (0x%2x)", + INTEL_HW_PLATFORM(version->cnvi_bt)); + return -EINVAL; + } + + /* Check for supported iBT hardware variants of this firmware + * loading method. + * + * This check has been put in place to ensure correct forward + * compatibility options when newer hardware variants come along. + */ + switch (INTEL_HW_VARIANT(version->cnvi_bt)) { + case 0x17: /* TyP */ + case 0x18: /* Slr */ + case 0x19: /* Slr-F */ + break; + default: + bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)", + INTEL_HW_VARIANT(version->cnvi_bt)); + return -EINVAL; + } + switch (version->img_type) { case 0x01: variant = "Bootloader"; + /* It is required that every single firmware fragment is acknowledged + * with a command complete event. If the boot parameters indicate + * that this bootloader does not send them, then abort the setup. + */ + if (version->limited_cce != 0x00) { + bt_dev_err(hdev, "Unsupported Intel firmware loading method (0x%x)", + version->limited_cce); + return -EINVAL; + } + + /* Secure boot engine type should be either 1 (ECDSA) or 0 (RSA) */ + if (version->sbe_type > 0x01) { + bt_dev_err(hdev, "Unsupported Intel secure boot engine type (0x%x)", + version->sbe_type); + return -EINVAL; + } + bt_dev_info(hdev, "Device revision is %u", version->dev_rev_id); bt_dev_info(hdev, "Secure boot is %s", version->secure_boot ? "enabled" : "disabled"); @@ -389,15 +472,14 @@ void btintel_version_info_tlv(struct hci_dev *hdev, struct intel_version_tlv *ve break; default: bt_dev_err(hdev, "Unsupported image type(%02x)", version->img_type); - goto done; + return -EINVAL; } bt_dev_info(hdev, "%s timestamp %u.%u buildtype %u build %u", variant, 2000 + (version->timestamp >> 8), version->timestamp & 0xff, version->build_type, version->build_num); -done: - return; + return 0; } EXPORT_SYMBOL_GPL(btintel_version_info_tlv); @@ -455,12 +537,23 @@ int btintel_read_version_tlv(struct hci_dev *hdev, struct intel_version_tlv *ver version->img_type = tlv->val[0]; break; case INTEL_TLV_TIME_STAMP: + /* If image type is Operational firmware (0x03), then + * running FW Calendar Week and Year information can + * be extracted from Timestamp information + */ + version->min_fw_build_cw = tlv->val[0]; + version->min_fw_build_yy = tlv->val[1]; version->timestamp = get_unaligned_le16(tlv->val); break; case INTEL_TLV_BUILD_TYPE: version->build_type = tlv->val[0]; break; case INTEL_TLV_BUILD_NUM: + /* If image type is Operational firmware (0x03), then + * running FW build number can be extracted from the + * Build information + */ + version->min_fw_build_nn = tlv->val[0]; version->build_num = get_unaligned_le32(tlv->val); break; case INTEL_TLV_SECURE_BOOT: @@ -841,7 +934,7 @@ static int btintel_sfi_ecdsa_header_secure_send(struct hci_dev *hdev, static int btintel_download_firmware_payload(struct hci_dev *hdev, const struct firmware *fw, - u32 *boot_param, size_t offset) + size_t offset) { int err; const u8 *fw_ptr; @@ -854,20 +947,6 @@ static int btintel_download_firmware_payload(struct hci_dev *hdev, while (fw_ptr - fw->data < fw->size) { struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len); - /* Each SKU has a different reset parameter to use in the - * HCI_Intel_Reset command and it is embedded in the firmware - * data. So, instead of using static value per SKU, check - * the firmware data and save it for later use. - */ - if (le16_to_cpu(cmd->opcode) == 0xfc0e) { - /* The boot parameter is the first 32-bit value - * and rest of 3 octets are reserved. - */ - *boot_param = get_unaligned_le32(fw_ptr + sizeof(*cmd)); - - bt_dev_dbg(hdev, "boot_param=0x%x", *boot_param); - } - frag_len += sizeof(*cmd) + cmd->plen; /* The parameter length of the secure send command requires @@ -896,28 +975,131 @@ done: return err; } +static bool btintel_firmware_version(struct hci_dev *hdev, + u8 num, u8 ww, u8 yy, + const struct firmware *fw, + u32 *boot_addr) +{ + const u8 *fw_ptr; + + fw_ptr = fw->data; + + while (fw_ptr - fw->data < fw->size) { + struct hci_command_hdr *cmd = (void *)(fw_ptr); + + /* Each SKU has a different reset parameter to use in the + * HCI_Intel_Reset command and it is embedded in the firmware + * data. So, instead of using static value per SKU, check + * the firmware data and save it for later use. + */ + if (le16_to_cpu(cmd->opcode) == CMD_WRITE_BOOT_PARAMS) { + struct cmd_write_boot_params *params; + + params = (void *)(fw_ptr + sizeof(*cmd)); + + bt_dev_info(hdev, "Boot Address: 0x%x", + le32_to_cpu(params->boot_addr)); + + bt_dev_info(hdev, "Firmware Version: %u-%u.%u", + params->fw_build_num, params->fw_build_ww, + params->fw_build_yy); + + return (num == params->fw_build_num && + ww == params->fw_build_ww && + yy == params->fw_build_yy); + } + + fw_ptr += sizeof(*cmd) + cmd->plen; + } + + return false; +} + int btintel_download_firmware(struct hci_dev *hdev, + struct intel_version *ver, const struct firmware *fw, u32 *boot_param) { int err; + /* SfP and WsP don't seem to update the firmware version on file + * so version checking is currently not possible. + */ + switch (ver->hw_variant) { + case 0x0b: /* SfP */ + case 0x0c: /* WsP */ + /* Skip version checking */ + break; + default: + /* Skip reading firmware file version in bootloader mode */ + if (ver->fw_variant == 0x06) + break; + + /* Skip download if firmware has the same version */ + if (btintel_firmware_version(hdev, ver->fw_build_num, + ver->fw_build_ww, ver->fw_build_yy, + fw, boot_param)) { + bt_dev_info(hdev, "Firmware already loaded"); + /* Return -EALREADY to indicate that the firmware has + * already been loaded. + */ + return -EALREADY; + } + } + + /* The firmware variant determines if the device is in bootloader + * mode or is running operational firmware. The value 0x06 identifies + * the bootloader and the value 0x23 identifies the operational + * firmware. + * + * If the firmware version has changed that means it needs to be reset + * to bootloader when operational so the new firmware can be loaded. + */ + if (ver->fw_variant == 0x23) + return -EINVAL; + err = btintel_sfi_rsa_header_secure_send(hdev, fw); if (err) return err; - return btintel_download_firmware_payload(hdev, fw, boot_param, - RSA_HEADER_LEN); + return btintel_download_firmware_payload(hdev, fw, RSA_HEADER_LEN); } EXPORT_SYMBOL_GPL(btintel_download_firmware); int btintel_download_firmware_newgen(struct hci_dev *hdev, + struct intel_version_tlv *ver, const struct firmware *fw, u32 *boot_param, u8 hw_variant, u8 sbe_type) { int err; u32 css_header_ver; + /* Skip reading firmware file version in bootloader mode */ + if (ver->img_type != 0x01) { + /* Skip download if firmware has the same version */ + if (btintel_firmware_version(hdev, ver->min_fw_build_nn, + ver->min_fw_build_cw, + ver->min_fw_build_yy, + fw, boot_param)) { + bt_dev_info(hdev, "Firmware already loaded"); + /* Return -EALREADY to indicate that firmware has + * already been loaded. + */ + return -EALREADY; + } + } + + /* The firmware variant determines if the device is in bootloader + * mode or is running operational firmware. The value 0x01 identifies + * the bootloader and the value 0x03 identifies the operational + * firmware. + * + * If the firmware version has changed that means it needs to be reset + * to bootloader when operational so the new firmware can be loaded. + */ + if (ver->img_type == 0x03) + return -EINVAL; + /* iBT hardware variants 0x0b, 0x0c, 0x11, 0x12, 0x13, 0x14 support * only RSA secure boot engine. Hence, the corresponding sfi file will * have RSA header of 644 bytes followed by Command Buffer. @@ -947,7 +1129,7 @@ int btintel_download_firmware_newgen(struct hci_dev *hdev, if (err) return err; - err = btintel_download_firmware_payload(hdev, fw, boot_param, RSA_HEADER_LEN); + err = btintel_download_firmware_payload(hdev, fw, RSA_HEADER_LEN); if (err) return err; } else if (hw_variant >= 0x17) { @@ -968,7 +1150,6 @@ int btintel_download_firmware_newgen(struct hci_dev *hdev, return err; err = btintel_download_firmware_payload(hdev, fw, - boot_param, RSA_HEADER_LEN + ECDSA_HEADER_LEN); if (err) return err; @@ -978,7 +1159,6 @@ int btintel_download_firmware_newgen(struct hci_dev *hdev, return err; err = btintel_download_firmware_payload(hdev, fw, - boot_param, RSA_HEADER_LEN + ECDSA_HEADER_LEN); if (err) return err; diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h index 6511b091caf5..d184064a5e7c 100644 --- a/drivers/bluetooth/btintel.h +++ b/drivers/bluetooth/btintel.h @@ -148,8 +148,8 @@ int btintel_set_diag(struct hci_dev *hdev, bool enable); int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable); void btintel_hw_error(struct hci_dev *hdev, u8 code); -void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver); -void btintel_version_info_tlv(struct hci_dev *hdev, struct intel_version_tlv *version); +int btintel_version_info(struct hci_dev *hdev, struct intel_version *ver); +int btintel_version_info_tlv(struct hci_dev *hdev, struct intel_version_tlv *version); int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen, const void *param); int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name); @@ -163,9 +163,10 @@ struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read, int btintel_send_intel_reset(struct hci_dev *hdev, u32 boot_param); int btintel_read_boot_params(struct hci_dev *hdev, struct intel_boot_params *params); -int btintel_download_firmware(struct hci_dev *dev, const struct firmware *fw, - u32 *boot_param); +int btintel_download_firmware(struct hci_dev *dev, struct intel_version *ver, + const struct firmware *fw, u32 *boot_param); int btintel_download_firmware_newgen(struct hci_dev *hdev, + struct intel_version_tlv *ver, const struct firmware *fw, u32 *boot_param, u8 hw_variant, u8 sbe_type); @@ -210,14 +211,16 @@ static inline void btintel_hw_error(struct hci_dev *hdev, u8 code) { } -static inline void btintel_version_info(struct hci_dev *hdev, - struct intel_version *ver) +static inline int btintel_version_info(struct hci_dev *hdev, + struct intel_version *ver) { + return -EOPNOTSUPP; } -static inline void btintel_version_info_tlv(struct hci_dev *hdev, - struct intel_version_tlv *version) +static inline int btintel_version_info_tlv(struct hci_dev *hdev, + struct intel_version_tlv *version) { + return -EOPNOTSUPP; } static inline int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 5cbfbd948f67..5d603ef39bad 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -399,7 +399,9 @@ static const struct usb_device_id blacklist_table[] = { /* MediaTek Bluetooth devices */ { USB_VENDOR_AND_INTERFACE_INFO(0x0e8d, 0xe0, 0x01, 0x01), - .driver_info = BTUSB_MEDIATEK }, + .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, /* Additional MediaTek MT7615E Bluetooth devices */ { USB_DEVICE(0x13d3, 0x3560), .driver_info = BTUSB_MEDIATEK}, @@ -455,6 +457,8 @@ static const struct usb_device_id blacklist_table[] = { BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0bda, 0xc123), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0cb5, 0xc547), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, /* Silicon Wave based devices */ { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE }, @@ -2400,7 +2404,7 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb) return -EILSEQ; } -static bool btusb_setup_intel_new_get_fw_name(struct intel_version *ver, +static int btusb_setup_intel_new_get_fw_name(struct intel_version *ver, struct intel_boot_params *params, char *fw_name, size_t len, const char *suffix) @@ -2424,9 +2428,10 @@ static bool btusb_setup_intel_new_get_fw_name(struct intel_version *ver, suffix); break; default: - return false; + return -EINVAL; } - return true; + + return 0; } static void btusb_setup_intel_newgen_get_fw_name(const struct intel_version_tlv *ver_tlv, @@ -2444,6 +2449,44 @@ static void btusb_setup_intel_newgen_get_fw_name(const struct intel_version_tlv suffix); } +static int btusb_download_wait(struct hci_dev *hdev, ktime_t calltime, int msec) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + ktime_t delta, rettime; + unsigned long long duration; + int err; + + set_bit(BTUSB_FIRMWARE_LOADED, &data->flags); + + bt_dev_info(hdev, "Waiting for firmware download to complete"); + + err = wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING, + TASK_INTERRUPTIBLE, + msecs_to_jiffies(msec)); + if (err == -EINTR) { + bt_dev_err(hdev, "Firmware loading interrupted"); + return err; + } + + if (err) { + bt_dev_err(hdev, "Firmware loading timeout"); + return -ETIMEDOUT; + } + + if (test_bit(BTUSB_FIRMWARE_FAILED, &data->flags)) { + bt_dev_err(hdev, "Firmware loading failed"); + return -ENOEXEC; + } + + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long)ktime_to_ns(delta) >> 10; + + bt_dev_info(hdev, "Firmware loaded in %llu usecs", duration); + + return 0; +} + static int btusb_intel_download_firmware_newgen(struct hci_dev *hdev, struct intel_version_tlv *ver, u32 *boot_param) @@ -2452,19 +2495,11 @@ static int btusb_intel_download_firmware_newgen(struct hci_dev *hdev, char fwname[64]; int err; struct btusb_data *data = hci_get_drvdata(hdev); + ktime_t calltime; if (!ver || !boot_param) return -EINVAL; - /* The hardware platform number has a fixed value of 0x37 and - * for now only accept this single value. - */ - if (INTEL_HW_PLATFORM(ver->cnvi_bt) != 0x37) { - bt_dev_err(hdev, "Unsupported Intel hardware platform (0x%2x)", - INTEL_HW_PLATFORM(ver->cnvi_bt)); - return -EINVAL; - } - /* The firmware variant determines if the device is in bootloader * mode or is running operational firmware. The value 0x03 identifies * the bootloader and the value 0x23 identifies the operational @@ -2481,50 +2516,6 @@ static int btusb_intel_download_firmware_newgen(struct hci_dev *hdev, if (ver->img_type == 0x03) { clear_bit(BTUSB_BOOTLOADER, &data->flags); btintel_check_bdaddr(hdev); - return 0; - } - - /* Check for supported iBT hardware variants of this firmware - * loading method. - * - * This check has been put in place to ensure correct forward - * compatibility options when newer hardware variants come along. - */ - switch (INTEL_HW_VARIANT(ver->cnvi_bt)) { - case 0x17: /* TyP */ - case 0x18: /* Slr */ - case 0x19: /* Slr-F */ - break; - default: - bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)", - INTEL_HW_VARIANT(ver->cnvi_bt)); - return -EINVAL; - } - - /* If the device is not in bootloader mode, then the only possible - * choice is to return an error and abort the device initialization. - */ - if (ver->img_type != 0x01) { - bt_dev_err(hdev, "Unsupported Intel firmware variant (0x%x)", - ver->img_type); - return -ENODEV; - } - - /* It is required that every single firmware fragment is acknowledged - * with a command complete event. If the boot parameters indicate - * that this bootloader does not send them, then abort the setup. - */ - if (ver->limited_cce != 0x00) { - bt_dev_err(hdev, "Unsupported Intel firmware loading method (0x%x)", - ver->limited_cce); - return -EINVAL; - } - - /* Secure boot engine type should be either 1 (ECDSA) or 0 (RSA) */ - if (ver->sbe_type > 0x01) { - bt_dev_err(hdev, "Unsupported Intel secure boot engine type (0x%x)", - ver->sbe_type); - return -EINVAL; } /* If the OTP has no valid Bluetooth device address, then there will @@ -2538,7 +2529,8 @@ static int btusb_intel_download_firmware_newgen(struct hci_dev *hdev, btusb_setup_intel_newgen_get_fw_name(ver, fwname, sizeof(fwname), "sfi"); err = request_firmware(&fw, fwname, &hdev->dev); if (err < 0) { - bt_dev_err(hdev, "Failed to load Intel firmware file (%d)", err); + bt_dev_err(hdev, "Failed to load Intel firmware file %s (%d)", + fwname, err); return err; } @@ -2551,22 +2543,28 @@ static int btusb_intel_download_firmware_newgen(struct hci_dev *hdev, goto done; } + calltime = ktime_get(); + set_bit(BTUSB_DOWNLOADING, &data->flags); /* Start firmware downloading and get boot parameter */ - err = btintel_download_firmware_newgen(hdev, fw, boot_param, + err = btintel_download_firmware_newgen(hdev, ver, fw, boot_param, INTEL_HW_VARIANT(ver->cnvi_bt), ver->sbe_type); if (err < 0) { + if (err == -EALREADY) { + /* Firmware has already been loaded */ + set_bit(BTUSB_FIRMWARE_LOADED, &data->flags); + err = 0; + goto done; + } + /* When FW download fails, send Intel Reset to retry * FW download. */ btintel_reset_to_bootloader(hdev); goto done; } - set_bit(BTUSB_FIRMWARE_LOADED, &data->flags); - - bt_dev_info(hdev, "Waiting for firmware download to complete"); /* Before switching the device into operational mode and with that * booting the loaded firmware, wait for the bootloader notification @@ -2579,26 +2577,9 @@ static int btusb_intel_download_firmware_newgen(struct hci_dev *hdev, * and thus just timeout if that happens and fail the setup * of this device. */ - err = wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING, - TASK_INTERRUPTIBLE, - msecs_to_jiffies(5000)); - if (err == -EINTR) { - bt_dev_err(hdev, "Firmware loading interrupted"); - goto done; - } - - if (err) { - bt_dev_err(hdev, "Firmware loading timeout"); - err = -ETIMEDOUT; + err = btusb_download_wait(hdev, calltime, 5000); + if (err == -ETIMEDOUT) btintel_reset_to_bootloader(hdev); - goto done; - } - - if (test_bit(BTUSB_FIRMWARE_FAILED, &data->flags)) { - bt_dev_err(hdev, "Firmware loading failed"); - err = -ENOEXEC; - goto done; - } done: release_firmware(fw); @@ -2614,41 +2595,11 @@ static int btusb_intel_download_firmware(struct hci_dev *hdev, char fwname[64]; int err; struct btusb_data *data = hci_get_drvdata(hdev); + ktime_t calltime; if (!ver || !params) return -EINVAL; - /* The hardware platform number has a fixed value of 0x37 and - * for now only accept this single value. - */ - if (ver->hw_platform != 0x37) { - bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)", - ver->hw_platform); - return -EINVAL; - } - - /* Check for supported iBT hardware variants of this firmware - * loading method. - * - * This check has been put in place to ensure correct forward - * compatibility options when newer hardware variants come along. - */ - switch (ver->hw_variant) { - case 0x0b: /* SfP */ - case 0x0c: /* WsP */ - case 0x11: /* JfP */ - case 0x12: /* ThP */ - case 0x13: /* HrP */ - case 0x14: /* CcP */ - break; - default: - bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)", - ver->hw_variant); - return -EINVAL; - } - - btintel_version_info(hdev, ver); - /* The firmware variant determines if the device is in bootloader * mode or is running operational firmware. The value 0x06 identifies * the bootloader and the value 0x23 identifies the operational @@ -2665,16 +2616,18 @@ static int btusb_intel_download_firmware(struct hci_dev *hdev, if (ver->fw_variant == 0x23) { clear_bit(BTUSB_BOOTLOADER, &data->flags); btintel_check_bdaddr(hdev); - return 0; - } - /* If the device is not in bootloader mode, then the only possible - * choice is to return an error and abort the device initialization. - */ - if (ver->fw_variant != 0x06) { - bt_dev_err(hdev, "Unsupported Intel firmware variant (%u)", - ver->fw_variant); - return -ENODEV; + /* SfP and WsP don't seem to update the firmware version on file + * so version checking is currently possible. + */ + switch (ver->hw_variant) { + case 0x0b: /* SfP */ + case 0x0c: /* WsP */ + return 0; + } + + /* Proceed to download to check if the version matches */ + goto download; } /* Read the secure boot parameters to identify the operating @@ -2702,6 +2655,7 @@ static int btusb_intel_download_firmware(struct hci_dev *hdev, set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); } +download: /* With this Intel bootloader only the hardware variant and device * revision information are used to select the right firmware for SfP * and WsP. @@ -2725,14 +2679,15 @@ static int btusb_intel_download_firmware(struct hci_dev *hdev, */ err = btusb_setup_intel_new_get_fw_name(ver, params, fwname, sizeof(fwname), "sfi"); - if (!err) { + if (err < 0) { bt_dev_err(hdev, "Unsupported Intel firmware naming"); return -EINVAL; } err = request_firmware(&fw, fwname, &hdev->dev); if (err < 0) { - bt_dev_err(hdev, "Failed to load Intel firmware file (%d)", err); + bt_dev_err(hdev, "Failed to load Intel firmware file %s (%d)", + fwname, err); return err; } @@ -2745,20 +2700,26 @@ static int btusb_intel_download_firmware(struct hci_dev *hdev, goto done; } + calltime = ktime_get(); + set_bit(BTUSB_DOWNLOADING, &data->flags); /* Start firmware downloading and get boot parameter */ - err = btintel_download_firmware(hdev, fw, boot_param); + err = btintel_download_firmware(hdev, ver, fw, boot_param); if (err < 0) { + if (err == -EALREADY) { + /* Firmware has already been loaded */ + set_bit(BTUSB_FIRMWARE_LOADED, &data->flags); + err = 0; + goto done; + } + /* When FW download fails, send Intel Reset to retry * FW download. */ btintel_reset_to_bootloader(hdev); goto done; } - set_bit(BTUSB_FIRMWARE_LOADED, &data->flags); - - bt_dev_info(hdev, "Waiting for firmware download to complete"); /* Before switching the device into operational mode and with that * booting the loaded firmware, wait for the bootloader notification @@ -2771,29 +2732,74 @@ static int btusb_intel_download_firmware(struct hci_dev *hdev, * and thus just timeout if that happens and fail the setup * of this device. */ - err = wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING, + err = btusb_download_wait(hdev, calltime, 5000); + if (err == -ETIMEDOUT) + btintel_reset_to_bootloader(hdev); + +done: + release_firmware(fw); + return err; +} + +static int btusb_boot_wait(struct hci_dev *hdev, ktime_t calltime, int msec) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + ktime_t delta, rettime; + unsigned long long duration; + int err; + + bt_dev_info(hdev, "Waiting for device to boot"); + + err = wait_on_bit_timeout(&data->flags, BTUSB_BOOTING, TASK_INTERRUPTIBLE, - msecs_to_jiffies(5000)); + msecs_to_jiffies(msec)); if (err == -EINTR) { - bt_dev_err(hdev, "Firmware loading interrupted"); - goto done; + bt_dev_err(hdev, "Device boot interrupted"); + return -EINTR; } if (err) { - bt_dev_err(hdev, "Firmware loading timeout"); - err = -ETIMEDOUT; - btintel_reset_to_bootloader(hdev); - goto done; + bt_dev_err(hdev, "Device boot timeout"); + return -ETIMEDOUT; } - if (test_bit(BTUSB_FIRMWARE_FAILED, &data->flags)) { - bt_dev_err(hdev, "Firmware loading failed"); - err = -ENOEXEC; - goto done; + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long) ktime_to_ns(delta) >> 10; + + bt_dev_info(hdev, "Device booted in %llu usecs", duration); + + return 0; +} + +static int btusb_intel_boot(struct hci_dev *hdev, u32 boot_addr) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + ktime_t calltime; + int err; + + calltime = ktime_get(); + + set_bit(BTUSB_BOOTING, &data->flags); + + err = btintel_send_intel_reset(hdev, boot_addr); + if (err) { + bt_dev_err(hdev, "Intel Soft Reset failed (%d)", err); + btintel_reset_to_bootloader(hdev); + return err; } -done: - release_firmware(fw); + /* The bootloader will not indicate when the device is ready. This + * is done by the operational firmware sending bootup notification. + * + * Booting into operational firmware should not take longer than + * 1 second. However if that happens, then just fail the setup + * since something went wrong. + */ + err = btusb_boot_wait(hdev, calltime, 1000); + if (err == -ETIMEDOUT) + btintel_reset_to_bootloader(hdev); + return err; } @@ -2804,8 +2810,6 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) struct intel_boot_params params; u32 boot_param; char ddcname[64]; - ktime_t calltime, delta, rettime; - unsigned long long duration; int err; struct intel_debug_features features; @@ -2817,8 +2821,6 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) */ boot_param = 0x00000000; - calltime = ktime_get(); - /* Read the Intel version information to determine if the device * is in bootloader mode or if it already has operational firmware * loaded. @@ -2830,6 +2832,10 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) return err; } + err = btintel_version_info(hdev, &ver); + if (err) + return err; + err = btusb_intel_download_firmware(hdev, &ver, ¶ms, &boot_param); if (err) return err; @@ -2838,59 +2844,16 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) if (ver.fw_variant == 0x23) goto finish; - rettime = ktime_get(); - delta = ktime_sub(rettime, calltime); - duration = (unsigned long long) ktime_to_ns(delta) >> 10; - - bt_dev_info(hdev, "Firmware loaded in %llu usecs", duration); - - calltime = ktime_get(); - - set_bit(BTUSB_BOOTING, &data->flags); - - err = btintel_send_intel_reset(hdev, boot_param); - if (err) { - bt_dev_err(hdev, "Intel Soft Reset failed (%d)", err); - btintel_reset_to_bootloader(hdev); + err = btusb_intel_boot(hdev, boot_param); + if (err) return err; - } - - /* The bootloader will not indicate when the device is ready. This - * is done by the operational firmware sending bootup notification. - * - * Booting into operational firmware should not take longer than - * 1 second. However if that happens, then just fail the setup - * since something went wrong. - */ - bt_dev_info(hdev, "Waiting for device to boot"); - - err = wait_on_bit_timeout(&data->flags, BTUSB_BOOTING, - TASK_INTERRUPTIBLE, - msecs_to_jiffies(1000)); - - if (err == -EINTR) { - bt_dev_err(hdev, "Device boot interrupted"); - return -EINTR; - } - - if (err) { - bt_dev_err(hdev, "Device boot timeout"); - btintel_reset_to_bootloader(hdev); - return -ETIMEDOUT; - } - - rettime = ktime_get(); - delta = ktime_sub(rettime, calltime); - duration = (unsigned long long) ktime_to_ns(delta) >> 10; - - bt_dev_info(hdev, "Device booted in %llu usecs", duration); clear_bit(BTUSB_BOOTLOADER, &data->flags); err = btusb_setup_intel_new_get_fw_name(&ver, ¶ms, ddcname, sizeof(ddcname), "ddc"); - if (!err) { + if (err < 0) { bt_dev_err(hdev, "Unsupported Intel firmware naming"); } else { /* Once the device is running in operational mode, it needs to @@ -2947,8 +2910,6 @@ static int btusb_setup_intel_newgen(struct hci_dev *hdev) struct btusb_data *data = hci_get_drvdata(hdev); u32 boot_param; char ddcname[64]; - ktime_t calltime, delta, rettime; - unsigned long long duration; int err; struct intel_debug_features features; struct intel_version_tlv version; @@ -2961,8 +2922,6 @@ static int btusb_setup_intel_newgen(struct hci_dev *hdev) */ boot_param = 0x00000000; - calltime = ktime_get(); - /* Read the Intel version information to determine if the device * is in bootloader mode or if it already has operational firmware * loaded. @@ -2974,7 +2933,9 @@ static int btusb_setup_intel_newgen(struct hci_dev *hdev) return err; } - btintel_version_info_tlv(hdev, &version); + err = btintel_version_info_tlv(hdev, &version); + if (err) + return err; err = btusb_intel_download_firmware_newgen(hdev, &version, &boot_param); if (err) @@ -2984,52 +2945,9 @@ static int btusb_setup_intel_newgen(struct hci_dev *hdev) if (version.img_type == 0x03) goto finish; - rettime = ktime_get(); - delta = ktime_sub(rettime, calltime); - duration = (unsigned long long)ktime_to_ns(delta) >> 10; - - bt_dev_info(hdev, "Firmware loaded in %llu usecs", duration); - - calltime = ktime_get(); - - set_bit(BTUSB_BOOTING, &data->flags); - - err = btintel_send_intel_reset(hdev, boot_param); - if (err) { - bt_dev_err(hdev, "Intel Soft Reset failed (%d)", err); - btintel_reset_to_bootloader(hdev); + err = btusb_intel_boot(hdev, boot_param); + if (err) return err; - } - - /* The bootloader will not indicate when the device is ready. This - * is done by the operational firmware sending bootup notification. - * - * Booting into operational firmware should not take longer than - * 1 second. However if that happens, then just fail the setup - * since something went wrong. - */ - bt_dev_info(hdev, "Waiting for device to boot"); - - err = wait_on_bit_timeout(&data->flags, BTUSB_BOOTING, - TASK_INTERRUPTIBLE, - msecs_to_jiffies(1000)); - - if (err == -EINTR) { - bt_dev_err(hdev, "Device boot interrupted"); - return -EINTR; - } - - if (err) { - bt_dev_err(hdev, "Device boot timeout"); - btintel_reset_to_bootloader(hdev); - return -ETIMEDOUT; - } - - rettime = ktime_get(); - delta = ktime_sub(rettime, calltime); - duration = (unsigned long long)ktime_to_ns(delta) >> 10; - - bt_dev_info(hdev, "Device booted in %llu usecs", duration); clear_bit(BTUSB_BOOTLOADER, &data->flags); @@ -3495,7 +3413,7 @@ static int btusb_mtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwnam fw_ptr = fw->data; fw_bin_ptr = fw_ptr; globaldesc = (struct btmtk_global_desc *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE); - section_num = globaldesc->section_num; + section_num = le32_to_cpu(globaldesc->section_num); for (i = 0; i < section_num; i++) { first_block = 1; @@ -3503,8 +3421,8 @@ static int btusb_mtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwnam sectionmap = (struct btmtk_section_map *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE + MTK_FW_ROM_PATCH_GD_SIZE + MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i); - section_offset = sectionmap->secoffset; - dl_size = sectionmap->bin_info_spec.dlsize; + section_offset = le32_to_cpu(sectionmap->secoffset); + dl_size = le32_to_cpu(sectionmap->bin_info_spec.dlsize); if (dl_size > 0) { retry = 20; @@ -3740,7 +3658,7 @@ static int btusb_mtk_setup(struct hci_dev *hdev) int err, status; u32 dev_id; char fw_bin_name[64]; - u32 fw_version; + u32 fw_version = 0; u8 param; calltime = ktime_get(); diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 3764ceb6fa0d..3cd57fc56ade 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -68,6 +68,8 @@ struct bcm_device_data { * deassert = Bluetooth device may sleep when sleep criteria are met * @shutdown: BT_REG_ON pin, * power up or power down Bluetooth device internal regulators + * @reset: BT_RST_N pin, + * active low resets the Bluetooth logic core * @set_device_wakeup: callback to toggle BT_WAKE pin * either by accessing @device_wakeup or by calling @btlp * @set_shutdown: callback to toggle BT_REG_ON pin @@ -101,6 +103,7 @@ struct bcm_device { const char *name; struct gpio_desc *device_wakeup; struct gpio_desc *shutdown; + struct gpio_desc *reset; int (*set_device_wakeup)(struct bcm_device *, bool); int (*set_shutdown)(struct bcm_device *, bool); #ifdef CONFIG_ACPI @@ -985,6 +988,15 @@ static int bcm_gpio_set_device_wakeup(struct bcm_device *dev, bool awake) static int bcm_gpio_set_shutdown(struct bcm_device *dev, bool powered) { gpiod_set_value_cansleep(dev->shutdown, powered); + if (dev->reset) + /* + * The reset line is asserted on powerdown and deasserted + * on poweron so the inverse of powered is used. Notice + * that the GPIO line BT_RST_N needs to be specified as + * active low in the device tree or similar system + * description. + */ + gpiod_set_value_cansleep(dev->reset, !powered); return 0; } @@ -1050,6 +1062,11 @@ static int bcm_get_resources(struct bcm_device *dev) if (IS_ERR(dev->shutdown)) return PTR_ERR(dev->shutdown); + dev->reset = devm_gpiod_get_optional(dev->dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(dev->reset)) + return PTR_ERR(dev->reset); + dev->set_device_wakeup = bcm_gpio_set_device_wakeup; dev->set_shutdown = bcm_gpio_set_shutdown; @@ -1482,6 +1499,8 @@ static struct bcm_device_data bcm43438_device_data = { static const struct of_device_id bcm_bluetooth_of_match[] = { { .compatible = "brcm,bcm20702a1" }, { .compatible = "brcm,bcm4329-bt" }, + { .compatible = "brcm,bcm4330-bt" }, + { .compatible = "brcm,bcm4334-bt" }, { .compatible = "brcm,bcm4345c5" }, { .compatible = "brcm,bcm4330-bt" }, { .compatible = "brcm,bcm43438-bt", .data = &bcm43438_device_data }, diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c index b20a40fab83e..7249b91d9b91 100644 --- a/drivers/bluetooth/hci_intel.c +++ b/drivers/bluetooth/hci_intel.c @@ -735,7 +735,7 @@ static int intel_setup(struct hci_uart *hu) set_bit(STATE_DOWNLOADING, &intel->flags); /* Start firmware downloading and get boot parameter */ - err = btintel_download_firmware(hdev, fw, &boot_param); + err = btintel_download_firmware(hdev, &ver, fw, &boot_param); if (err < 0) goto done; @@ -784,7 +784,10 @@ static int intel_setup(struct hci_uart *hu) done: release_firmware(fw); - if (err < 0) + /* Check if there was an error and if is not -EALREADY which means the + * firmware has already been loaded. + */ + if (err < 0 && err != -EALREADY) return err; /* We need to restore the default speed before Intel reset */ diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index de36af63e182..0a0056912d51 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -1066,7 +1066,7 @@ static void qca_controller_memdump(struct work_struct *work) * packets in the buffer. */ /* For QCA6390, controller does not lost packets but - * sequence number field of packat sometimes has error + * sequence number field of packet sometimes has error * bits, so skip this checking for missing packet. */ while ((seq_no > qca_memdump->current_seq_no + 1) && @@ -1571,6 +1571,20 @@ static void qca_cmd_timeout(struct hci_dev *hdev) mutex_unlock(&qca->hci_memdump_lock); } +static bool qca_prevent_wake(struct hci_dev *hdev) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + bool wakeup; + + /* UART driver handles the interrupt from BT SoC.So we need to use + * device handle of UART driver to get the status of device may wakeup. + */ + wakeup = device_may_wakeup(hu->serdev->ctrl->dev.parent); + bt_dev_dbg(hu->hdev, "wakeup status : %d", wakeup); + + return !wakeup; +} + static int qca_wcn3990_init(struct hci_uart *hu) { struct qca_serdev *qcadev; @@ -1721,6 +1735,7 @@ retry: qca_debugfs_init(hdev); hu->hdev->hw_error = qca_hw_error; hu->hdev->cmd_timeout = qca_cmd_timeout; + hu->hdev->prevent_wake = qca_prevent_wake; } else if (ret == -ENOENT) { /* No patch/nvm-config found, run with original fw/config */ set_bit(QCA_ROM_FW, &qca->flags); diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c new file mode 100644 index 000000000000..c804db7e90f8 --- /dev/null +++ b/drivers/bluetooth/virtio_bt.c @@ -0,0 +1,401 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/module.h> +#include <linux/virtio.h> +#include <linux/virtio_config.h> +#include <linux/skbuff.h> + +#include <uapi/linux/virtio_ids.h> +#include <uapi/linux/virtio_bt.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> + +#define VERSION "0.1" + +enum { + VIRTBT_VQ_TX, + VIRTBT_VQ_RX, + VIRTBT_NUM_VQS, +}; + +struct virtio_bluetooth { + struct virtio_device *vdev; + struct virtqueue *vqs[VIRTBT_NUM_VQS]; + struct work_struct rx; + struct hci_dev *hdev; +}; + +static int virtbt_add_inbuf(struct virtio_bluetooth *vbt) +{ + struct virtqueue *vq = vbt->vqs[VIRTBT_VQ_RX]; + struct scatterlist sg[1]; + struct sk_buff *skb; + int err; + + skb = alloc_skb(1000, GFP_KERNEL); + sg_init_one(sg, skb->data, 1000); + + err = virtqueue_add_inbuf(vq, sg, 1, skb, GFP_KERNEL); + if (err < 0) { + kfree_skb(skb); + return err; + } + + return 0; +} + +static int virtbt_open(struct hci_dev *hdev) +{ + struct virtio_bluetooth *vbt = hci_get_drvdata(hdev); + + if (virtbt_add_inbuf(vbt) < 0) + return -EIO; + + virtqueue_kick(vbt->vqs[VIRTBT_VQ_RX]); + return 0; +} + +static int virtbt_close(struct hci_dev *hdev) +{ + struct virtio_bluetooth *vbt = hci_get_drvdata(hdev); + int i; + + cancel_work_sync(&vbt->rx); + + for (i = 0; i < ARRAY_SIZE(vbt->vqs); i++) { + struct virtqueue *vq = vbt->vqs[i]; + struct sk_buff *skb; + + while ((skb = virtqueue_detach_unused_buf(vq))) + kfree_skb(skb); + } + + return 0; +} + +static int virtbt_flush(struct hci_dev *hdev) +{ + return 0; +} + +static int virtbt_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct virtio_bluetooth *vbt = hci_get_drvdata(hdev); + struct scatterlist sg[1]; + int err; + + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); + + sg_init_one(sg, skb->data, skb->len); + err = virtqueue_add_outbuf(vbt->vqs[VIRTBT_VQ_TX], sg, 1, skb, + GFP_KERNEL); + if (err) { + kfree_skb(skb); + return err; + } + + virtqueue_kick(vbt->vqs[VIRTBT_VQ_TX]); + return 0; +} + +static int virtbt_setup_zephyr(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + /* Read Build Information */ + skb = __hci_cmd_sync(hdev, 0xfc08, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + bt_dev_info(hdev, "%s", (char *)(skb->data + 1)); + + hci_set_fw_info(hdev, "%s", skb->data + 1); + + kfree_skb(skb); + return 0; +} + +static int virtbt_set_bdaddr_zephyr(struct hci_dev *hdev, + const bdaddr_t *bdaddr) +{ + struct sk_buff *skb; + + /* Write BD_ADDR */ + skb = __hci_cmd_sync(hdev, 0xfc06, 6, bdaddr, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + kfree_skb(skb); + return 0; +} + +static int virtbt_setup_intel(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + /* Intel Read Version */ + skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + kfree_skb(skb); + return 0; +} + +static int virtbt_set_bdaddr_intel(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + struct sk_buff *skb; + + /* Intel Write BD Address */ + skb = __hci_cmd_sync(hdev, 0xfc31, 6, bdaddr, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + kfree_skb(skb); + return 0; +} + +static int virtbt_setup_realtek(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + /* Read ROM Version */ + skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + bt_dev_info(hdev, "ROM version %u", *((__u8 *) (skb->data + 1))); + + kfree_skb(skb); + return 0; +} + +static int virtbt_shutdown_generic(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + /* Reset */ + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + kfree_skb(skb); + return 0; +} + +static void virtbt_rx_handle(struct virtio_bluetooth *vbt, struct sk_buff *skb) +{ + __u8 pkt_type; + + pkt_type = *((__u8 *) skb->data); + skb_pull(skb, 1); + + switch (pkt_type) { + case HCI_EVENT_PKT: + case HCI_ACLDATA_PKT: + case HCI_SCODATA_PKT: + case HCI_ISODATA_PKT: + hci_skb_pkt_type(skb) = pkt_type; + hci_recv_frame(vbt->hdev, skb); + break; + } +} + +static void virtbt_rx_work(struct work_struct *work) +{ + struct virtio_bluetooth *vbt = container_of(work, + struct virtio_bluetooth, rx); + struct sk_buff *skb; + unsigned int len; + + skb = virtqueue_get_buf(vbt->vqs[VIRTBT_VQ_RX], &len); + if (!skb) + return; + + skb->len = len; + virtbt_rx_handle(vbt, skb); + + if (virtbt_add_inbuf(vbt) < 0) + return; + + virtqueue_kick(vbt->vqs[VIRTBT_VQ_RX]); +} + +static void virtbt_tx_done(struct virtqueue *vq) +{ + struct sk_buff *skb; + unsigned int len; + + while ((skb = virtqueue_get_buf(vq, &len))) + kfree_skb(skb); +} + +static void virtbt_rx_done(struct virtqueue *vq) +{ + struct virtio_bluetooth *vbt = vq->vdev->priv; + + schedule_work(&vbt->rx); +} + +static int virtbt_probe(struct virtio_device *vdev) +{ + vq_callback_t *callbacks[VIRTBT_NUM_VQS] = { + [VIRTBT_VQ_TX] = virtbt_tx_done, + [VIRTBT_VQ_RX] = virtbt_rx_done, + }; + const char *names[VIRTBT_NUM_VQS] = { + [VIRTBT_VQ_TX] = "tx", + [VIRTBT_VQ_RX] = "rx", + }; + struct virtio_bluetooth *vbt; + struct hci_dev *hdev; + int err; + __u8 type; + + if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) + return -ENODEV; + + type = virtio_cread8(vdev, offsetof(struct virtio_bt_config, type)); + + switch (type) { + case VIRTIO_BT_CONFIG_TYPE_PRIMARY: + case VIRTIO_BT_CONFIG_TYPE_AMP: + break; + default: + return -EINVAL; + } + + vbt = kzalloc(sizeof(*vbt), GFP_KERNEL); + if (!vbt) + return -ENOMEM; + + vdev->priv = vbt; + vbt->vdev = vdev; + + INIT_WORK(&vbt->rx, virtbt_rx_work); + + err = virtio_find_vqs(vdev, VIRTBT_NUM_VQS, vbt->vqs, callbacks, + names, NULL); + if (err) + return err; + + hdev = hci_alloc_dev(); + if (!hdev) { + err = -ENOMEM; + goto failed; + } + + vbt->hdev = hdev; + + hdev->bus = HCI_VIRTIO; + hdev->dev_type = type; + hci_set_drvdata(hdev, vbt); + + hdev->open = virtbt_open; + hdev->close = virtbt_close; + hdev->flush = virtbt_flush; + hdev->send = virtbt_send_frame; + + if (virtio_has_feature(vdev, VIRTIO_BT_F_VND_HCI)) { + __u16 vendor; + + virtio_cread(vdev, struct virtio_bt_config, vendor, &vendor); + + switch (vendor) { + case VIRTIO_BT_CONFIG_VENDOR_ZEPHYR: + hdev->manufacturer = 1521; + hdev->setup = virtbt_setup_zephyr; + hdev->shutdown = virtbt_shutdown_generic; + hdev->set_bdaddr = virtbt_set_bdaddr_zephyr; + break; + + case VIRTIO_BT_CONFIG_VENDOR_INTEL: + hdev->manufacturer = 2; + hdev->setup = virtbt_setup_intel; + hdev->shutdown = virtbt_shutdown_generic; + hdev->set_bdaddr = virtbt_set_bdaddr_intel; + set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); + set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); + break; + + case VIRTIO_BT_CONFIG_VENDOR_REALTEK: + hdev->manufacturer = 93; + hdev->setup = virtbt_setup_realtek; + hdev->shutdown = virtbt_shutdown_generic; + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); + set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); + break; + } + } + + if (virtio_has_feature(vdev, VIRTIO_BT_F_MSFT_EXT)) { + __u16 msft_opcode; + + virtio_cread(vdev, struct virtio_bt_config, + msft_opcode, &msft_opcode); + + hci_set_msft_opcode(hdev, msft_opcode); + } + + if (virtio_has_feature(vdev, VIRTIO_BT_F_AOSP_EXT)) + hci_set_aosp_capable(hdev); + + if (hci_register_dev(hdev) < 0) { + hci_free_dev(hdev); + err = -EBUSY; + goto failed; + } + + return 0; + +failed: + vdev->config->del_vqs(vdev); + return err; +} + +static void virtbt_remove(struct virtio_device *vdev) +{ + struct virtio_bluetooth *vbt = vdev->priv; + struct hci_dev *hdev = vbt->hdev; + + hci_unregister_dev(hdev); + vdev->config->reset(vdev); + + hci_free_dev(hdev); + vbt->hdev = NULL; + + vdev->config->del_vqs(vdev); + kfree(vbt); +} + +static struct virtio_device_id virtbt_table[] = { + { VIRTIO_ID_BT, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +MODULE_DEVICE_TABLE(virtio, virtbt_table); + +static const unsigned int virtbt_features[] = { + VIRTIO_BT_F_VND_HCI, + VIRTIO_BT_F_MSFT_EXT, + VIRTIO_BT_F_AOSP_EXT, +}; + +static struct virtio_driver virtbt_driver = { + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .feature_table = virtbt_features, + .feature_table_size = ARRAY_SIZE(virtbt_features), + .id_table = virtbt_table, + .probe = virtbt_probe, + .remove = virtbt_remove, +}; + +module_virtio_driver(virtbt_driver); + +MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); +MODULE_DESCRIPTION("Generic Bluetooth VIRTIO driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c index 25da0b05b4e2..01370d9a871a 100644 --- a/drivers/infiniband/hw/mlx5/fs.c +++ b/drivers/infiniband/hw/mlx5/fs.c @@ -879,7 +879,7 @@ static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev, misc_parameters_2); MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, - mlx5_eswitch_get_vport_metadata_for_match(esw, + mlx5_eswitch_get_vport_metadata_for_match(rep->esw, rep->vport)); misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c index 9164cc069ad4..db5de720bb12 100644 --- a/drivers/infiniband/hw/mlx5/ib_rep.c +++ b/drivers/infiniband/hw/mlx5/ib_rep.c @@ -20,7 +20,7 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) rep->rep_data[REP_IB].priv = ibdev; write_lock(&ibdev->port[vport_index].roce.netdev_lock); ibdev->port[vport_index].roce.netdev = - mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport); + mlx5_ib_get_rep_netdev(rep->esw, rep->vport); write_unlock(&ibdev->port[vport_index].roce.netdev_lock); return 0; @@ -123,8 +123,7 @@ struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev, rep = dev->port[port - 1].rep; - return mlx5_eswitch_add_send_to_vport_rule(esw, rep->vport, - sq->base.mqp.qpn); + return mlx5_eswitch_add_send_to_vport_rule(esw, rep, sq->base.mqp.qpn); } static int mlx5r_rep_probe(struct auxiliary_device *adev, diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 0d69a697d75f..7a7f6ccd02a5 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -126,7 +126,6 @@ static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev, struct net_device *ndev, u8 *port_num) { - struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; struct net_device *rep_ndev; struct mlx5_ib_port *port; int i; @@ -137,7 +136,7 @@ static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev, continue; read_lock(&port->roce.netdev_lock); - rep_ndev = mlx5_ib_get_rep_netdev(esw, + rep_ndev = mlx5_ib_get_rep_netdev(port->rep->esw, port->rep->vport); if (rep_ndev == ndev) { read_unlock(&port->roce.netdev_lock); diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c index 7013a3f08429..4f7eaa17fb27 100644 --- a/drivers/isdn/hardware/mISDN/hfcmulti.c +++ b/drivers/isdn/hardware/mISDN/hfcmulti.c @@ -173,13 +173,13 @@ #define MAX_FRAGS (32 * MAX_CARDS) static LIST_HEAD(HFClist); -static spinlock_t HFClock; /* global hfc list lock */ +static DEFINE_SPINLOCK(HFClock); /* global hfc list lock */ static void ph_state_change(struct dchannel *); static struct hfc_multi *syncmaster; static int plxsd_master; /* if we have a master card (yet) */ -static spinlock_t plx_lock; /* may not acquire other lock inside */ +static DEFINE_SPINLOCK(plx_lock); /* may not acquire other lock inside */ #define TYP_E1 1 #define TYP_4S 4 @@ -2748,8 +2748,6 @@ hfcmulti_interrupt(int intno, void *dev_id) if (hc->ctype != HFC_TYPE_E1) ph_state_irq(hc, r_irq_statech); } - if (status & V_EXT_IRQSTA) - ; /* external IRQ */ if (status & V_LOST_STA) { /* LOST IRQ */ HFC_outb(hc, R_INC_RES_FIFO, V_RES_LOST); /* clear irq! */ @@ -5482,9 +5480,6 @@ HFCmulti_init(void) printk(KERN_DEBUG "%s: IRQ_DEBUG IS ENABLED!\n", __func__); #endif - spin_lock_init(&HFClock); - spin_lock_init(&plx_lock); - if (debug & DEBUG_HFCMULTI_INIT) printk(KERN_DEBUG "%s: init entered\n", __func__); diff --git a/drivers/isdn/hardware/mISDN/iohelper.h b/drivers/isdn/hardware/mISDN/iohelper.h index b2b2bde8edba..c81f7aba4b57 100644 --- a/drivers/isdn/hardware/mISDN/iohelper.h +++ b/drivers/isdn/hardware/mISDN/iohelper.h @@ -13,14 +13,14 @@ #ifndef _IOHELPER_H #define _IOHELPER_H -typedef u8 (read_reg_func)(void *hwp, u8 offset); - typedef void (write_reg_func)(void *hwp, u8 offset, u8 value); - typedef void (fifo_func)(void *hwp, u8 offset, u8 *datap, int size); +typedef u8 (read_reg_func)(void *hwp, u8 offset); +typedef void (write_reg_func)(void *hwp, u8 offset, u8 value); +typedef void (fifo_func)(void *hwp, u8 offset, u8 *datap, int size); - struct _ioport { - u32 port; - u32 ale; - }; +struct _ioport { + u32 port; + u32 ale; +}; #define IOFUNC_IO(name, hws, ap) \ static u8 Read##name##_IO(void *p, u8 off) { \ diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c index 038e72a84b33..386084530c2f 100644 --- a/drivers/isdn/mISDN/dsp_core.c +++ b/drivers/isdn/mISDN/dsp_core.c @@ -176,9 +176,9 @@ MODULE_LICENSE("GPL"); /*int spinnest = 0;*/ -spinlock_t dsp_lock; /* global dsp lock */ -struct list_head dsp_ilist; -struct list_head conf_ilist; +DEFINE_SPINLOCK(dsp_lock); /* global dsp lock */ +LIST_HEAD(dsp_ilist); +LIST_HEAD(conf_ilist); int dsp_debug; int dsp_options; int dsp_poll, dsp_tics; @@ -953,7 +953,6 @@ dsp_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg) { struct dsp *dsp = container_of(ch, struct dsp, ch); u_long flags; - int err = 0; if (debug & DEBUG_DSP_CTRL) printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd); @@ -998,7 +997,7 @@ dsp_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg) module_put(THIS_MODULE); break; } - return err; + return 0; } static void @@ -1170,10 +1169,6 @@ static int __init dsp_init(void) printk(KERN_INFO "mISDN_dsp: DSP clocks every %d samples. This equals " "%d jiffies.\n", dsp_poll, dsp_tics); - spin_lock_init(&dsp_lock); - INIT_LIST_HEAD(&dsp_ilist); - INIT_LIST_HEAD(&conf_ilist); - /* init conversion tables */ dsp_audio_generate_law_tables(); dsp_silence = (dsp_options & DSP_OPT_ULAW) ? 0xff : 0x2a; diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index b57dcb834594..2c40412466e6 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c @@ -200,7 +200,7 @@ The complete socket opening and closing is done by a thread. When the thread opened a socket, the hc->socket descriptor is set. Whenever a - packet shall be sent to the socket, the hc->socket must be checked wheter not + packet shall be sent to the socket, the hc->socket must be checked whether not NULL. To prevent change in socket descriptor, the hc->socket_lock must be used. To change the socket, a recall of l1oip_socket_open() will safely kill the socket process and create a new one. @@ -229,8 +229,8 @@ static const char *l1oip_revision = "2.00"; static int l1oip_cnt; -static spinlock_t l1oip_lock; -static struct list_head l1oip_ilist; +static DEFINE_SPINLOCK(l1oip_lock); +static LIST_HEAD(l1oip_ilist); #define MAX_CARDS 16 static u_int type[MAX_CARDS]; @@ -1440,9 +1440,6 @@ l1oip_init(void) printk(KERN_INFO "mISDN: Layer-1-over-IP driver Rev. %s\n", l1oip_revision); - INIT_LIST_HEAD(&l1oip_ilist); - spin_lock_init(&l1oip_lock); - if (l1oip_4bit_alloc(ulaw)) return -ENOMEM; diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index bcd31f458d1a..74dc8e249faa 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -502,6 +502,8 @@ source "drivers/net/wan/Kconfig" source "drivers/net/ieee802154/Kconfig" +source "drivers/net/wwan/Kconfig" + config XEN_NETDEV_FRONTEND tristate "Xen network device frontend driver" depends on XEN @@ -579,6 +581,7 @@ config NETDEVSIM depends on DEBUG_FS depends on INET depends on IPV6 || IPV6=n + depends on PSAMPLE || PSAMPLE=n select NET_DEVLINK help This driver is a developer testing tool and software model that can diff --git a/drivers/net/Makefile b/drivers/net/Makefile index f4990ff32fa4..7ffd2d03efaf 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -45,7 +45,7 @@ obj-$(CONFIG_ARCNET) += arcnet/ obj-$(CONFIG_DEV_APPLETALK) += appletalk/ obj-$(CONFIG_CAIF) += caif/ obj-$(CONFIG_CAN) += can/ -obj-y += dsa/ +obj-$(CONFIG_NET_DSA) += dsa/ obj-$(CONFIG_ETHERNET) += ethernet/ obj-$(CONFIG_FDDI) += fddi/ obj-$(CONFIG_HIPPI) += hippi/ @@ -68,6 +68,7 @@ obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o obj-$(CONFIG_WAN) += wan/ obj-$(CONFIG_WLAN) += wireless/ obj-$(CONFIG_IEEE802154) += ieee802154/ +obj-$(CONFIG_WWAN) += wwan/ obj-$(CONFIG_VMXNET3) += vmxnet3/ obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o diff --git a/drivers/net/Space.c b/drivers/net/Space.c index 890c86e11bcc..df79e7370bcc 100644 --- a/drivers/net/Space.c +++ b/drivers/net/Space.c @@ -59,9 +59,6 @@ static int __init probe_list2(int unit, struct devprobe2 *p, int autoprobe) * look for EISA/PCI cards in addition to ISA cards). */ static struct devprobe2 isa_probes[] __initdata = { -#if defined(CONFIG_HP100) && defined(CONFIG_ISA) /* ISA, EISA */ - {hp100_probe, 0}, -#endif #ifdef CONFIG_3C515 {tc515_probe, 0}, #endif diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 7511bca9c15e..edfad93e7b68 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -218,6 +218,7 @@ static struct socket *bareudp_create_sock(struct net *net, __be16 port) if (err < 0) return ERR_PTR(err); + udp_allow_gso(sock->sk); return sock; } diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index c3091e00dd5f..3455f2cc13f2 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -1098,7 +1098,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1, * If @slave's permanent hw address is different both from its current * address and from @bond's address, then somewhere in the bond there's * a slave that has @slave's permanet address as its current address. - * We'll make sure that that slave no longer uses @slave's permanent address. + * We'll make sure that slave no longer uses @slave's permanent address. * * Caller must hold RTNL and no other locks */ diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 74cbbb22470b..20bbda1b36e1 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -964,7 +964,7 @@ static bool bond_should_notify_peers(struct bonding *bond) } /** - * change_active_interface - change the active slave into the specified one + * bond_change_active_slave - change the active slave into the specified one * @bond: our bonding struct * @new_active: the new slave to make the active one * @@ -4391,9 +4391,7 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) int agg_id = 0; int ret = 0; -#ifdef CONFIG_LOCKDEP - WARN_ON(lockdep_is_held(&bond->mode_lock)); -#endif + might_sleep(); usable_slaves = kzalloc(struct_size(usable_slaves, arr, bond->slave_cnt), GFP_KERNEL); @@ -4406,7 +4404,9 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) if (BOND_MODE(bond) == BOND_MODE_8023AD) { struct ad_info ad_info; + spin_lock_bh(&bond->mode_lock); if (bond_3ad_get_active_agg_info(bond, &ad_info)) { + spin_unlock_bh(&bond->mode_lock); pr_debug("bond_3ad_get_active_agg_info failed\n"); /* No active aggragator means it's not safe to use * the previous array. @@ -4414,6 +4414,7 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) bond_reset_slave_arr(bond); goto out; } + spin_unlock_bh(&bond->mode_lock); agg_id = ad_info.aggregator_id; } bond_for_each_slave(bond, slave, iter) { diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 77d7c38bd435..c9d3604ae129 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -640,6 +640,15 @@ static void bond_opt_error_interpret(struct bonding *bond, netdev_err(bond->dev, "option %s: unable to set because the bond device is up\n", opt->name); break; + case -ENODEV: + if (val && val->string) { + p = strchr(val->string, '\n'); + if (p) + *p = '\0'; + netdev_err(bond->dev, "option %s: interface %s does not exist!\n", + opt->name, val->string); + } + break; default: break; } diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 1c28eade6bec..e355d3974977 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -103,7 +103,7 @@ config CAN_FLEXCAN config CAN_GRCAN tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices" - depends on OF && HAS_DMA + depends on OF && HAS_DMA && HAS_IOMEM help Say Y here if you want to use Aeroflex Gaisler GRCAN or GRHCAN. Note that the driver supports little endian, even though little diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 6958830cb983..313793f6922d 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -132,7 +132,6 @@ /* For the high buffers we clear the interrupt bit and newdat */ #define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_CLR_NEWDAT) - /* Receive setup of message objects */ #define IF_COMM_RCV_SETUP (IF_COMM_MASK | IF_COMM_ARB | IF_COMM_CONTROL) @@ -161,9 +160,7 @@ #define IF_MCONT_TX (IF_MCONT_TXIE | IF_MCONT_EOB) -/* - * Use IF1 for RX and IF2 for TX - */ +/* Use IF1 for RX and IF2 for TX */ #define IF_RX 0 #define IF_TX 1 @@ -173,9 +170,6 @@ /* Wait for ~1 sec for INIT bit */ #define INIT_WAIT_MS 1000 -/* napi related */ -#define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM - /* c_can lec values */ enum c_can_lec_type { LEC_NO_ERROR = 0, @@ -189,8 +183,7 @@ enum c_can_lec_type { LEC_MASK = LEC_UNUSED, }; -/* - * c_can error types: +/* c_can error types: * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported */ enum c_can_bus_error_types { @@ -253,7 +246,6 @@ static void c_can_obj_update(struct net_device *dev, int iface, u32 cmd, u32 obj udelay(1); } netdev_err(dev, "Updating object timed out\n"); - } static inline void c_can_object_get(struct net_device *dev, int iface, @@ -268,8 +260,7 @@ static inline void c_can_object_put(struct net_device *dev, int iface, c_can_obj_update(dev, iface, cmd | IF_COMM_WR, obj); } -/* - * Note: According to documentation clearing TXIE while MSGVAL is set +/* Note: According to documentation clearing TXIE while MSGVAL is set * is not allowed, but works nicely on C/DCAN. And that lowers the I/O * load significantly. */ @@ -285,8 +276,7 @@ static void c_can_inval_msg_object(struct net_device *dev, int iface, int obj) { struct c_can_priv *priv = netdev_priv(dev); - priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0); - priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0); + priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), 0); c_can_inval_tx_object(dev, iface, obj); } @@ -309,12 +299,11 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface, if (!rtr) arb |= IF_ARB_TRANSMIT; - /* - * If we change the DIR bit, we need to invalidate the buffer + /* If we change the DIR bit, we need to invalidate the buffer * first, i.e. clear the MSGVAL flag in the arbiter. */ if (rtr != (bool)test_bit(idx, &priv->tx_dir)) { - u32 obj = idx + C_CAN_MSG_OBJ_TX_FIRST; + u32 obj = idx + priv->msg_obj_tx_first; c_can_inval_msg_object(dev, iface, obj); change_bit(idx, &priv->tx_dir); @@ -447,18 +436,16 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb, if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; - /* - * This is not a FIFO. C/D_CAN sends out the buffers + /* This is not a FIFO. C/D_CAN sends out the buffers * prioritized. The lowest buffer number wins. */ idx = fls(atomic_read(&priv->tx_active)); - obj = idx + C_CAN_MSG_OBJ_TX_FIRST; + obj = idx + priv->msg_obj_tx_first; /* If this is the last buffer, stop the xmit queue */ - if (idx == C_CAN_MSG_OBJ_TX_NUM - 1) + if (idx == priv->msg_obj_tx_num - 1) netif_stop_queue(dev); - /* - * Store the message in the interface so we can call + /* Store the message in the interface so we can call * can_put_echo_skb(). We must do this before we enable * transmit as we might race against do_tx(). */ @@ -467,7 +454,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb, can_put_echo_skb(skb, dev, idx, 0); /* Update the active bits */ - atomic_add((1 << idx), &priv->tx_active); + atomic_add(BIT(idx), &priv->tx_active); /* Start transmission */ c_can_object_put(dev, IF_TX, obj, IF_COMM_TX); @@ -511,7 +498,7 @@ static int c_can_set_bittiming(struct net_device *dev) reg_brpe = brpe & BRP_EXT_BRPE_MASK; netdev_info(dev, - "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe); + "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe); ctrl_save = priv->read_reg(priv, C_CAN_CTRL_REG); ctrl_save &= ~CONTROL_INIT; @@ -527,8 +514,7 @@ static int c_can_set_bittiming(struct net_device *dev) return c_can_wait_for_ctrl_init(dev, priv, 0); } -/* - * Configure C_CAN message objects for Tx and Rx purposes: +/* Configure C_CAN message objects for Tx and Rx purposes: * C_CAN provides a total of 32 message objects that can be configured * either for Tx or Rx purposes. Here the first 16 message objects are used as * a reception FIFO. The end of reception FIFO is signified by the EoB bit @@ -538,17 +524,18 @@ static int c_can_set_bittiming(struct net_device *dev) */ static void c_can_configure_msg_objects(struct net_device *dev) { + struct c_can_priv *priv = netdev_priv(dev); int i; /* first invalidate all message objects */ - for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++) + for (i = priv->msg_obj_rx_first; i <= priv->msg_obj_num; i++) c_can_inval_msg_object(dev, IF_RX, i); /* setup receive message objects */ - for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++) + for (i = priv->msg_obj_rx_first; i < priv->msg_obj_rx_last; i++) c_can_setup_receive_object(dev, IF_RX, i, 0, 0, IF_MCONT_RCV); - c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0, + c_can_setup_receive_object(dev, IF_RX, priv->msg_obj_rx_last, 0, 0, IF_MCONT_RCV_EOB); } @@ -572,8 +559,7 @@ static int c_can_software_reset(struct net_device *dev) return 0; } -/* - * Configure C_CAN chip: +/* Configure C_CAN chip: * - enable/disable auto-retransmission * - set operating mode * - configure message objects @@ -714,12 +700,21 @@ static void c_can_do_tx(struct net_device *dev) struct net_device_stats *stats = &dev->stats; u32 idx, obj, pkts = 0, bytes = 0, pend, clr; - clr = pend = priv->read_reg(priv, C_CAN_INTPND2_REG); + if (priv->msg_obj_tx_last > 32) + pend = priv->read_reg32(priv, C_CAN_INTPND3_REG); + else + pend = priv->read_reg(priv, C_CAN_INTPND2_REG); + clr = pend; while ((idx = ffs(pend))) { idx--; - pend &= ~(1 << idx); - obj = idx + C_CAN_MSG_OBJ_TX_FIRST; + pend &= ~BIT(idx); + obj = idx + priv->msg_obj_tx_first; + + /* We use IF_RX interface instead of IF_TX because we + * are called from c_can_poll(), which runs inside + * NAPI. We are not trasmitting. + */ c_can_inval_tx_object(dev, IF_RX, obj); can_get_echo_skb(dev, idx, NULL); bytes += priv->dlc[idx]; @@ -729,7 +724,7 @@ static void c_can_do_tx(struct net_device *dev) /* Clear the bits in the tx_active mask */ atomic_sub(clr, &priv->tx_active); - if (clr & (1 << (C_CAN_MSG_OBJ_TX_NUM - 1))) + if (clr & BIT(priv->msg_obj_tx_num - 1)) netif_wake_queue(dev); if (pkts) { @@ -739,20 +734,18 @@ static void c_can_do_tx(struct net_device *dev) } } -/* - * If we have a gap in the pending bits, that means we either +/* If we have a gap in the pending bits, that means we either * raced with the hardware or failed to readout all upper * objects in the last run due to quota limit. */ -static u32 c_can_adjust_pending(u32 pend) +static u32 c_can_adjust_pending(u32 pend, u32 rx_mask) { u32 weight, lasts; - if (pend == RECEIVE_OBJECT_BITS) + if (pend == rx_mask) return pend; - /* - * If the last set bit is larger than the number of pending + /* If the last set bit is larger than the number of pending * bits we have a gap. */ weight = hweight32(pend); @@ -762,19 +755,19 @@ static u32 c_can_adjust_pending(u32 pend) if (lasts == weight) return pend; - /* - * Find the first set bit after the gap. We walk backwards + /* Find the first set bit after the gap. We walk backwards * from the last set bit. */ - for (lasts--; pend & (1 << (lasts - 1)); lasts--); + for (lasts--; pend & BIT(lasts - 1); lasts--) + ; - return pend & ~((1 << lasts) - 1); + return pend & ~GENMASK(lasts - 1, 0); } static inline void c_can_rx_object_get(struct net_device *dev, struct c_can_priv *priv, u32 obj) { - c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high); + c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high); } static inline void c_can_rx_finalize(struct net_device *dev, @@ -803,8 +796,7 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv, continue; } - /* - * This really should not happen, but this covers some + /* This really should not happen, but this covers some * odd HW behaviour. Do not remove that unless you * want to brick your machine. */ @@ -825,19 +817,22 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv, static inline u32 c_can_get_pending(struct c_can_priv *priv) { - u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG); + u32 pend; + + if (priv->msg_obj_rx_last > 16) + pend = priv->read_reg32(priv, C_CAN_NEWDAT1_REG); + else + pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG); return pend; } -/* - * theory of operation: +/* theory of operation: * * c_can core saves a received CAN message into the first free message * object it finds free (starting with the lowest). Bits NEWDAT and * INTPND are set for this message object indicating that a new message - * has arrived. To work-around this issue, we keep two groups of message - * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT. + * has arrived. * * We clear the newdat bit right away. * @@ -848,23 +843,16 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota) struct c_can_priv *priv = netdev_priv(dev); u32 pkts = 0, pend = 0, toread, n; - /* - * It is faster to read only one 16bit register. This is only possible - * for a maximum number of 16 objects. - */ - BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16, - "Implementation does not support more message objects than 16"); - while (quota > 0) { if (!pend) { pend = c_can_get_pending(priv); if (!pend) break; - /* - * If the pending field has a gap, handle the + /* If the pending field has a gap, handle the * bits above the gap first. */ - toread = c_can_adjust_pending(pend); + toread = c_can_adjust_pending(pend, + priv->msg_obj_rx_mask); } else { toread = pend; } @@ -883,7 +871,7 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota) } static int c_can_handle_state_change(struct net_device *dev, - enum c_can_bus_error_types error_type) + enum c_can_bus_error_types error_type) { unsigned int reg_err_counter; unsigned int rx_err_passive; @@ -979,8 +967,7 @@ static int c_can_handle_bus_err(struct net_device *dev, struct can_frame *cf; struct sk_buff *skb; - /* - * early exit if no lec update or no error. + /* early exit if no lec update or no error. * no lec update means that no CAN bus event has been detected * since CPU wrote 0x7 value to status reg. */ @@ -999,8 +986,7 @@ static int c_can_handle_bus_err(struct net_device *dev, if (unlikely(!skb)) return 0; - /* - * check for 'last error code' which tells us the + /* check for 'last error code' which tells us the * type of the last error to occur on the CAN bus */ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; @@ -1049,7 +1035,8 @@ static int c_can_poll(struct napi_struct *napi, int quota) /* Only read the status register if a status interrupt was pending */ if (atomic_xchg(&priv->sie_pending, 0)) { - priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG); + priv->last_status = priv->read_reg(priv, C_CAN_STS_REG); + curr = priv->last_status; /* Ack status on C_CAN. D_CAN is self clearing */ if (priv->type != BOSCH_D_CAN) priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); @@ -1147,7 +1134,7 @@ static int c_can_open(struct net_device *dev) /* register interrupt handler */ err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name, - dev); + dev); if (err < 0) { netdev_err(dev, "failed to request interrupt\n"); goto exit_irq_fail; @@ -1195,17 +1182,31 @@ static int c_can_close(struct net_device *dev) return 0; } -struct net_device *alloc_c_can_dev(void) +struct net_device *alloc_c_can_dev(int msg_obj_num) { struct net_device *dev; struct c_can_priv *priv; + int msg_obj_tx_num = msg_obj_num / 2; - dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM); + dev = alloc_candev(struct_size(priv, dlc, msg_obj_tx_num), + msg_obj_tx_num); if (!dev) return NULL; priv = netdev_priv(dev); - netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT); + priv->msg_obj_num = msg_obj_num; + priv->msg_obj_rx_num = msg_obj_num - msg_obj_tx_num; + priv->msg_obj_rx_first = 1; + priv->msg_obj_rx_last = + priv->msg_obj_rx_first + priv->msg_obj_rx_num - 1; + priv->msg_obj_rx_mask = GENMASK(priv->msg_obj_rx_num - 1, 0); + + priv->msg_obj_tx_num = msg_obj_tx_num; + priv->msg_obj_tx_first = priv->msg_obj_rx_last + 1; + priv->msg_obj_tx_last = + priv->msg_obj_tx_first + priv->msg_obj_tx_num - 1; + + netif_napi_add(dev, &priv->napi, c_can_poll, priv->msg_obj_rx_num); priv->dev = dev; priv->can.bittiming_const = &c_can_bittiming_const; @@ -1239,7 +1240,7 @@ int c_can_power_down(struct net_device *dev) /* Wait for the PDA bit to get set */ time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS); while (!(priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) && - time_after(time_out, jiffies)) + time_after(time_out, jiffies)) cpu_relax(); if (time_after(jiffies, time_out)) @@ -1280,7 +1281,7 @@ int c_can_power_up(struct net_device *dev) /* Wait for the PDA bit to get clear */ time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS); while ((priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) && - time_after(time_out, jiffies)) + time_after(time_out, jiffies)) cpu_relax(); if (time_after(jiffies, time_out)) { diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h index 92213d3d96eb..06045f610f0e 100644 --- a/drivers/net/can/c_can/c_can.h +++ b/drivers/net/can/c_can/c_can.h @@ -22,23 +22,6 @@ #ifndef C_CAN_H #define C_CAN_H -/* message object split */ -#define C_CAN_NO_OF_OBJECTS 32 -#define C_CAN_MSG_OBJ_RX_NUM 16 -#define C_CAN_MSG_OBJ_TX_NUM 16 - -#define C_CAN_MSG_OBJ_RX_FIRST 1 -#define C_CAN_MSG_OBJ_RX_LAST (C_CAN_MSG_OBJ_RX_FIRST + \ - C_CAN_MSG_OBJ_RX_NUM - 1) - -#define C_CAN_MSG_OBJ_TX_FIRST (C_CAN_MSG_OBJ_RX_LAST + 1) -#define C_CAN_MSG_OBJ_TX_LAST (C_CAN_MSG_OBJ_TX_FIRST + \ - C_CAN_MSG_OBJ_TX_NUM - 1) - -#define C_CAN_MSG_OBJ_RX_SPLIT 9 -#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1) -#define RECEIVE_OBJECT_BITS 0x0000ffff - enum reg { C_CAN_CTRL_REG = 0, C_CAN_CTRL_EX_REG, @@ -76,6 +59,7 @@ enum reg { C_CAN_NEWDAT2_REG, C_CAN_INTPND1_REG, C_CAN_INTPND2_REG, + C_CAN_INTPND3_REG, C_CAN_MSGVAL1_REG, C_CAN_MSGVAL2_REG, C_CAN_FUNCTION_REG, @@ -137,6 +121,7 @@ static const u16 __maybe_unused reg_map_d_can[] = { [C_CAN_NEWDAT2_REG] = 0x9E, [C_CAN_INTPND1_REG] = 0xB0, [C_CAN_INTPND2_REG] = 0xB2, + [C_CAN_INTPND3_REG] = 0xB4, [C_CAN_MSGVAL1_REG] = 0xC4, [C_CAN_MSGVAL2_REG] = 0xC6, [C_CAN_IF1_COMREQ_REG] = 0x100, @@ -164,7 +149,6 @@ static const u16 __maybe_unused reg_map_d_can[] = { }; enum c_can_dev_id { - BOSCH_C_CAN_PLATFORM, BOSCH_C_CAN, BOSCH_D_CAN, }; @@ -176,6 +160,7 @@ struct raminit_bits { struct c_can_driver_data { enum c_can_dev_id id; + unsigned int msg_obj_num; /* RAMINIT register description. Optional. */ const struct raminit_bits *raminit_bits; /* Array of START/DONE bit positions */ @@ -197,26 +182,34 @@ struct c_can_priv { struct napi_struct napi; struct net_device *dev; struct device *device; + unsigned int msg_obj_num; + unsigned int msg_obj_rx_num; + unsigned int msg_obj_tx_num; + unsigned int msg_obj_rx_first; + unsigned int msg_obj_rx_last; + unsigned int msg_obj_tx_first; + unsigned int msg_obj_tx_last; + u32 msg_obj_rx_mask; atomic_t tx_active; atomic_t sie_pending; unsigned long tx_dir; int last_status; - u16 (*read_reg) (const struct c_can_priv *priv, enum reg index); - void (*write_reg) (const struct c_can_priv *priv, enum reg index, u16 val); - u32 (*read_reg32) (const struct c_can_priv *priv, enum reg index); - void (*write_reg32) (const struct c_can_priv *priv, enum reg index, u32 val); + u16 (*read_reg)(const struct c_can_priv *priv, enum reg index); + void (*write_reg)(const struct c_can_priv *priv, enum reg index, u16 val); + u32 (*read_reg32)(const struct c_can_priv *priv, enum reg index); + void (*write_reg32)(const struct c_can_priv *priv, enum reg index, u32 val); void __iomem *base; const u16 *regs; void *priv; /* for board-specific data */ enum c_can_dev_id type; struct c_can_raminit raminit_sys; /* RAMINIT via syscon regmap */ - void (*raminit) (const struct c_can_priv *priv, bool enable); + void (*raminit)(const struct c_can_priv *priv, bool enable); u32 comm_rcv_high; u32 rxmasked; - u32 dlc[C_CAN_MSG_OBJ_TX_NUM]; + u32 dlc[]; }; -struct net_device *alloc_c_can_dev(void); +struct net_device *alloc_c_can_dev(int msg_obj_num); void free_c_can_dev(struct net_device *dev); int register_c_can_dev(struct net_device *dev); void unregister_c_can_dev(struct net_device *dev); diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c index 7efb60b50876..bf2f8c3da1c1 100644 --- a/drivers/net/can/c_can/c_can_pci.c +++ b/drivers/net/can/c_can/c_can_pci.c @@ -31,6 +31,8 @@ enum c_can_pci_reg_align { struct c_can_pci_data { /* Specify if is C_CAN or D_CAN */ enum c_can_dev_id type; + /* Number of message objects */ + unsigned int msg_obj_num; /* Set the register alignment in the memory */ enum c_can_pci_reg_align reg_align; /* Set the frequency */ @@ -41,32 +43,31 @@ struct c_can_pci_data { void (*init)(const struct c_can_priv *priv, bool enable); }; -/* - * 16-bit c_can registers can be arranged differently in the memory +/* 16-bit c_can registers can be arranged differently in the memory * architecture of different implementations. For example: 16-bit * registers can be aligned to a 16-bit boundary or 32-bit boundary etc. * Handle the same by providing a common read/write interface. */ static u16 c_can_pci_read_reg_aligned_to_16bit(const struct c_can_priv *priv, - enum reg index) + enum reg index) { return readw(priv->base + priv->regs[index]); } static void c_can_pci_write_reg_aligned_to_16bit(const struct c_can_priv *priv, - enum reg index, u16 val) + enum reg index, u16 val) { writew(val, priv->base + priv->regs[index]); } static u16 c_can_pci_read_reg_aligned_to_32bit(const struct c_can_priv *priv, - enum reg index) + enum reg index) { return readw(priv->base + 2 * priv->regs[index]); } static void c_can_pci_write_reg_aligned_to_32bit(const struct c_can_priv *priv, - enum reg index, u16 val) + enum reg index, u16 val) { writew(val, priv->base + 2 * priv->regs[index]); } @@ -88,13 +89,13 @@ static u32 c_can_pci_read_reg32(const struct c_can_priv *priv, enum reg index) u32 val; val = priv->read_reg(priv, index); - val |= ((u32) priv->read_reg(priv, index + 1)) << 16; + val |= ((u32)priv->read_reg(priv, index + 1)) << 16; return val; } static void c_can_pci_write_reg32(const struct c_can_priv *priv, enum reg index, - u32 val) + u32 val) { priv->write_reg(priv, index + 1, val >> 16); priv->write_reg(priv, index, val); @@ -142,14 +143,13 @@ static int c_can_pci_probe(struct pci_dev *pdev, pci_resource_len(pdev, c_can_pci_data->bar)); if (!addr) { dev_err(&pdev->dev, - "device has no PCI memory resources, " - "failing adapter\n"); + "device has no PCI memory resources, failing adapter\n"); ret = -ENOMEM; goto out_release_regions; } /* allocate the c_can device */ - dev = alloc_c_can_dev(); + dev = alloc_c_can_dev(c_can_pci_data->msg_obj_num); if (!dev) { ret = -ENOMEM; goto out_iounmap; @@ -217,7 +217,7 @@ static int c_can_pci_probe(struct pci_dev *pdev, } dev_dbg(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n", - KBUILD_MODNAME, priv->regs, dev->irq); + KBUILD_MODNAME, priv->regs, dev->irq); return 0; @@ -252,8 +252,9 @@ static void c_can_pci_remove(struct pci_dev *pdev) pci_disable_device(pdev); } -static const struct c_can_pci_data c_can_sta2x11= { +static const struct c_can_pci_data c_can_sta2x11 = { .type = BOSCH_C_CAN, + .msg_obj_num = 32, .reg_align = C_CAN_REG_ALIGN_32, .freq = 52000000, /* 52 Mhz */ .bar = 0, @@ -261,6 +262,7 @@ static const struct c_can_pci_data c_can_sta2x11= { static const struct c_can_pci_data c_can_pch = { .type = BOSCH_C_CAN, + .msg_obj_num = 32, .reg_align = C_CAN_REG_32, .freq = 50000000, /* 50 MHz */ .init = c_can_pci_reset_pch, @@ -269,7 +271,7 @@ static const struct c_can_pci_data c_can_pch = { #define C_CAN_ID(_vend, _dev, _driverdata) { \ PCI_DEVICE(_vend, _dev), \ - .driver_data = (unsigned long)&_driverdata, \ + .driver_data = (unsigned long)&(_driverdata), \ } static const struct pci_device_id c_can_pci_tbl[] = { @@ -279,6 +281,7 @@ static const struct pci_device_id c_can_pci_tbl[] = { c_can_pch), {}, }; + static struct pci_driver c_can_pci_driver = { .name = KBUILD_MODNAME, .id_table = c_can_pci_tbl, diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index 47b251b1607c..36950363682f 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -193,10 +193,12 @@ static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable) static const struct c_can_driver_data c_can_drvdata = { .id = BOSCH_C_CAN, + .msg_obj_num = 32, }; static const struct c_can_driver_data d_can_drvdata = { .id = BOSCH_D_CAN, + .msg_obj_num = 32, }; static const struct raminit_bits dra7_raminit_bits[] = { @@ -206,6 +208,7 @@ static const struct raminit_bits dra7_raminit_bits[] = { static const struct c_can_driver_data dra7_dcan_drvdata = { .id = BOSCH_D_CAN, + .msg_obj_num = 64, .raminit_num = ARRAY_SIZE(dra7_raminit_bits), .raminit_bits = dra7_raminit_bits, .raminit_pulse = true, @@ -218,6 +221,7 @@ static const struct raminit_bits am3352_raminit_bits[] = { static const struct c_can_driver_data am3352_dcan_drvdata = { .id = BOSCH_D_CAN, + .msg_obj_num = 64, .raminit_num = ARRAY_SIZE(am3352_raminit_bits), .raminit_bits = am3352_raminit_bits, }; @@ -294,7 +298,7 @@ static int c_can_plat_probe(struct platform_device *pdev) } /* allocate the c_can device */ - dev = alloc_c_can_dev(); + dev = alloc_c_can_dev(drvdata->msg_obj_num); if (!dev) { ret = -ENOMEM; goto exit; diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c index f7fe226bb395..f49170eadd54 100644 --- a/drivers/net/can/dev/bittiming.c +++ b/drivers/net/can/dev/bittiming.c @@ -81,9 +81,9 @@ int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, if (bt->sample_point) { sample_point_nominal = bt->sample_point; } else { - if (bt->bitrate > 800000) + if (bt->bitrate > 800 * CAN_KBPS) sample_point_nominal = 750; - else if (bt->bitrate > 500000) + else if (bt->bitrate > 500 * CAN_KBPS) sample_point_nominal = 800; else sample_point_nominal = 875; @@ -174,6 +174,30 @@ int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, return 0; } + +void can_calc_tdco(struct net_device *dev) +{ + struct can_priv *priv = netdev_priv(dev); + const struct can_bittiming *dbt = &priv->data_bittiming; + struct can_tdc *tdc = &priv->tdc; + const struct can_tdc_const *tdc_const = priv->tdc_const; + + if (!tdc_const) + return; + + /* As specified in ISO 11898-1 section 11.3.3 "Transmitter + * delay compensation" (TDC) is only applicable if data BRP is + * one or two. + */ + if (dbt->brp == 1 || dbt->brp == 2) { + /* Reuse "normal" sample point and convert it to time quanta */ + u32 sample_point_in_tq = can_bit_time(dbt) * dbt->sample_point / 1000; + + tdc->tdco = min(sample_point_in_tq, tdc_const->tdco_max); + } else { + tdc->tdco = 0; + } +} #endif /* CONFIG_CAN_CALC_BITTIMING */ /* Checks the validity of the specified bit-timing parameters prop_seg, diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c index f5d79e6e5483..e38c2566aff4 100644 --- a/drivers/net/can/dev/netlink.c +++ b/drivers/net/can/dev/netlink.c @@ -8,20 +8,17 @@ #include <net/rtnetlink.h> static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = { - [IFLA_CAN_STATE] = { .type = NLA_U32 }, - [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) }, - [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 }, - [IFLA_CAN_RESTART] = { .type = NLA_U32 }, - [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) }, - [IFLA_CAN_BITTIMING_CONST] - = { .len = sizeof(struct can_bittiming_const) }, - [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) }, - [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) }, - [IFLA_CAN_DATA_BITTIMING] - = { .len = sizeof(struct can_bittiming) }, - [IFLA_CAN_DATA_BITTIMING_CONST] - = { .len = sizeof(struct can_bittiming_const) }, - [IFLA_CAN_TERMINATION] = { .type = NLA_U16 }, + [IFLA_CAN_STATE] = { .type = NLA_U32 }, + [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) }, + [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 }, + [IFLA_CAN_RESTART] = { .type = NLA_U32 }, + [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) }, + [IFLA_CAN_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) }, + [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) }, + [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) }, + [IFLA_CAN_DATA_BITTIMING] = { .len = sizeof(struct can_bittiming) }, + [IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) }, + [IFLA_CAN_TERMINATION] = { .type = NLA_U16 }, }; static int can_validate(struct nlattr *tb[], struct nlattr *data[], @@ -189,6 +186,8 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], memcpy(&priv->data_bittiming, &dbt, sizeof(dbt)); + can_calc_tdco(dev); + if (priv->do_set_data_bittiming) { /* Finally, set the bit-timing registers */ err = priv->do_set_data_bittiming(dev); diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c index 6a64fe410987..61660248c69e 100644 --- a/drivers/net/can/dev/skb.c +++ b/drivers/net/can/dev/skb.c @@ -45,7 +45,7 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, BUG_ON(idx >= priv->echo_skb_max); /* check flag whether this packet has to be looped back */ - if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK || + if (!(dev->flags & IFF_ECHO) || (skb->protocol != htons(ETH_P_CAN) && skb->protocol != htons(ETH_P_CANFD))) { kfree_skb(skb); @@ -58,7 +58,6 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, return -ENOMEM; /* make settings for echo to reduce code in irq context */ - skb->pkt_type = PACKET_BROADCAST; skb->ip_summed = CHECKSUM_UNNECESSARY; skb->dev = dev; @@ -111,6 +110,13 @@ __can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr, priv->echo_skb[idx] = NULL; + if (skb->pkt_type == PACKET_LOOPBACK) { + skb->pkt_type = PACKET_BROADCAST; + } else { + dev_consume_skb_any(skb); + return NULL; + } + return skb; } @@ -147,14 +153,25 @@ EXPORT_SYMBOL_GPL(can_get_echo_skb); * * The function is typically called when TX failed. */ -void can_free_echo_skb(struct net_device *dev, unsigned int idx) +void can_free_echo_skb(struct net_device *dev, unsigned int idx, + unsigned int *frame_len_ptr) { struct can_priv *priv = netdev_priv(dev); - BUG_ON(idx >= priv->echo_skb_max); + if (idx >= priv->echo_skb_max) { + netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", + __func__, idx, priv->echo_skb_max); + return; + } if (priv->echo_skb[idx]) { - dev_kfree_skb_any(priv->echo_skb[idx]); + struct sk_buff *skb = priv->echo_skb[idx]; + struct can_skb_priv *can_skb_priv = can_skb_prv(skb); + + if (frame_len_ptr) + *frame_len_ptr = can_skb_priv->frame_len; + + dev_kfree_skb_any(skb); priv->echo_skb[idx] = NULL; } } @@ -166,8 +183,11 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) + sizeof(struct can_frame)); - if (unlikely(!skb)) + if (unlikely(!skb)) { + *cf = NULL; + return NULL; + } skb->protocol = htons(ETH_P_CAN); skb->pkt_type = PACKET_BROADCAST; @@ -194,8 +214,11 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev, skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) + sizeof(struct canfd_frame)); - if (unlikely(!skb)) + if (unlikely(!skb)) { + *cfd = NULL; + return NULL; + } skb->protocol = htons(ETH_P_CANFD); skb->pkt_type = PACKET_BROADCAST; diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c index 4a8453290530..78e27940b2af 100644 --- a/drivers/net/can/grcan.c +++ b/drivers/net/can/grcan.c @@ -520,7 +520,7 @@ static int catch_up_echo_skb(struct net_device *dev, int budget, bool echo) can_get_echo_skb(dev, i, NULL); } else { /* For cleanup of untransmitted messages */ - can_free_echo_skb(dev, i); + can_free_echo_skb(dev, i, NULL); } priv->eskbp = grcan_ring_add(priv->eskbp, GRCAN_MSG_SIZE, diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 0c8d36bc668c..34073cd077e4 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -8,6 +8,7 @@ * https://github.com/linux-can/can-doc/tree/master/m_can */ +#include <linux/bitfield.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> @@ -148,6 +149,16 @@ enum m_can_reg { #define NBTP_NTSEG2_SHIFT 0 #define NBTP_NTSEG2_MASK (0x7f << NBTP_NTSEG2_SHIFT) +/* Timestamp Counter Configuration Register (TSCC) */ +#define TSCC_TCP_MASK GENMASK(19, 16) +#define TSCC_TSS_MASK GENMASK(1, 0) +#define TSCC_TSS_DISABLE 0x0 +#define TSCC_TSS_INTERNAL 0x1 +#define TSCC_TSS_EXTERNAL 0x2 + +/* Timestamp Counter Value Register (TSCV) */ +#define TSCV_TSC_MASK GENMASK(15, 0) + /* Error Counter Register(ECR) */ #define ECR_RP BIT(15) #define ECR_REC_SHIFT 8 @@ -302,6 +313,7 @@ enum m_can_reg { #define RX_BUF_ANMF BIT(31) #define RX_BUF_FDF BIT(21) #define RX_BUF_BRS BIT(20) +#define RX_BUF_RXTS_MASK GENMASK(15, 0) /* Tx Buffer Element */ /* T0 */ @@ -319,6 +331,7 @@ enum m_can_reg { /* E1 */ #define TX_EVENT_MM_SHIFT TX_BUF_MM_SHIFT #define TX_EVENT_MM_MASK (0xff << TX_EVENT_MM_SHIFT) +#define TX_EVENT_TXTS_MASK GENMASK(15, 0) static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg) { @@ -413,6 +426,20 @@ static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev) m_can_write(cdev, M_CAN_ILE, 0x0); } +/* Retrieve internal timestamp counter from TSCV.TSC, and shift it to 32-bit + * width. + */ +static u32 m_can_get_timestamp(struct m_can_classdev *cdev) +{ + u32 tscv; + u32 tsc; + + tscv = m_can_read(cdev, M_CAN_TSCV); + tsc = FIELD_GET(TSCV_TSC_MASK, tscv); + + return (tsc << 16); +} + static void m_can_clean(struct net_device *net) { struct m_can_classdev *cdev = netdev_priv(net); @@ -425,11 +452,33 @@ static void m_can_clean(struct net_device *net) putidx = ((m_can_read(cdev, M_CAN_TXFQS) & TXFQS_TFQPI_MASK) >> TXFQS_TFQPI_SHIFT); - can_free_echo_skb(cdev->net, putidx); + can_free_echo_skb(cdev->net, putidx, NULL); cdev->tx_skb = NULL; } } +/* For peripherals, pass skb to rx-offload, which will push skb from + * napi. For non-peripherals, RX is done in napi already, so push + * directly. timestamp is used to ensure good skb ordering in + * rx-offload and is ignored for non-peripherals. +*/ +static void m_can_receive_skb(struct m_can_classdev *cdev, + struct sk_buff *skb, + u32 timestamp) +{ + if (cdev->is_peripheral) { + struct net_device_stats *stats = &cdev->net->stats; + int err; + + err = can_rx_offload_queue_sorted(&cdev->offload, skb, + timestamp); + if (err) + stats->rx_fifo_errors++; + } else { + netif_receive_skb(skb); + } +} + static void m_can_read_fifo(struct net_device *dev, u32 rxfs) { struct net_device_stats *stats = &dev->stats; @@ -437,6 +486,7 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs) struct canfd_frame *cf; struct sk_buff *skb; u32 id, fgi, dlc; + u32 timestamp = 0; int i; /* calculate the fifo get index for where to read data */ @@ -485,7 +535,9 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs) stats->rx_packets++; stats->rx_bytes += cf->len; - netif_receive_skb(skb); + timestamp = FIELD_GET(RX_BUF_RXTS_MASK, dlc); + + m_can_receive_skb(cdev, skb, timestamp); } static int m_can_do_rx_poll(struct net_device *dev, int quota) @@ -516,9 +568,11 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota) static int m_can_handle_lost_msg(struct net_device *dev) { + struct m_can_classdev *cdev = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct sk_buff *skb; struct can_frame *frame; + u32 timestamp = 0; netdev_err(dev, "msg lost in rxf0\n"); @@ -532,7 +586,10 @@ static int m_can_handle_lost_msg(struct net_device *dev) frame->can_id |= CAN_ERR_CRTL; frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - netif_receive_skb(skb); + if (cdev->is_peripheral) + timestamp = m_can_get_timestamp(cdev); + + m_can_receive_skb(cdev, skb, timestamp); return 1; } @@ -544,6 +601,7 @@ static int m_can_handle_lec_err(struct net_device *dev, struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; + u32 timestamp = 0; cdev->can.can_stats.bus_error++; stats->rx_errors++; @@ -589,7 +647,11 @@ static int m_can_handle_lec_err(struct net_device *dev, stats->rx_packets++; stats->rx_bytes += cf->len; - netif_receive_skb(skb); + + if (cdev->is_peripheral) + timestamp = m_can_get_timestamp(cdev); + + m_can_receive_skb(cdev, skb, timestamp); return 1; } @@ -647,6 +709,7 @@ static int m_can_handle_state_change(struct net_device *dev, struct sk_buff *skb; struct can_berr_counter bec; unsigned int ecr; + u32 timestamp = 0; switch (new_state) { case CAN_STATE_ERROR_WARNING: @@ -708,7 +771,11 @@ static int m_can_handle_state_change(struct net_device *dev, stats->rx_packets++; stats->rx_bytes += cf->len; - netif_receive_skb(skb); + + if (cdev->is_peripheral) + timestamp = m_can_get_timestamp(cdev); + + m_can_receive_skb(cdev, skb, timestamp); return 1; } @@ -773,6 +840,7 @@ static int m_can_handle_protocol_error(struct net_device *dev, u32 irqstatus) struct m_can_classdev *cdev = netdev_priv(dev); struct can_frame *cf; struct sk_buff *skb; + u32 timestamp = 0; /* propagate the error condition to the CAN stack */ skb = alloc_can_err_skb(dev, &cf); @@ -794,7 +862,11 @@ static int m_can_handle_protocol_error(struct net_device *dev, u32 irqstatus) netdev_dbg(dev, "allocation of skb failed\n"); return 0; } - netif_receive_skb(skb); + + if (cdev->is_peripheral) + timestamp = m_can_get_timestamp(cdev); + + m_can_receive_skb(cdev, skb, timestamp); return 1; } @@ -895,6 +967,29 @@ static int m_can_poll(struct napi_struct *napi, int quota) return work_done; } +/* Echo tx skb and update net stats. Peripherals use rx-offload for + * echo. timestamp is used for peripherals to ensure correct ordering + * by rx-offload, and is ignored for non-peripherals. +*/ +static void m_can_tx_update_stats(struct m_can_classdev *cdev, + unsigned int msg_mark, + u32 timestamp) +{ + struct net_device *dev = cdev->net; + struct net_device_stats *stats = &dev->stats; + + if (cdev->is_peripheral) + stats->tx_bytes += + can_rx_offload_get_echo_skb(&cdev->offload, + msg_mark, + timestamp, + NULL); + else + stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL); + + stats->tx_packets++; +} + static void m_can_echo_tx_event(struct net_device *dev) { u32 txe_count = 0; @@ -904,7 +999,6 @@ static void m_can_echo_tx_event(struct net_device *dev) unsigned int msg_mark; struct m_can_classdev *cdev = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; /* read tx event fifo status */ m_can_txefs = m_can_read(cdev, M_CAN_TXEFS); @@ -914,21 +1008,23 @@ static void m_can_echo_tx_event(struct net_device *dev) /* Get and process all sent elements */ for (i = 0; i < txe_count; i++) { + u32 txe, timestamp = 0; + /* retrieve get index */ fgi = (m_can_read(cdev, M_CAN_TXEFS) & TXEFS_EFGI_MASK) >> TXEFS_EFGI_SHIFT; - /* get message marker */ - msg_mark = (m_can_txe_fifo_read(cdev, fgi, 4) & - TX_EVENT_MM_MASK) >> TX_EVENT_MM_SHIFT; + /* get message marker, timestamp */ + txe = m_can_txe_fifo_read(cdev, fgi, 4); + msg_mark = (txe & TX_EVENT_MM_MASK) >> TX_EVENT_MM_SHIFT; + timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe); /* ack txe element */ m_can_write(cdev, M_CAN_TXEFA, (TXEFA_EFAI_MASK & (fgi << TXEFA_EFAI_SHIFT))); /* update stats */ - stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL); - stats->tx_packets++; + m_can_tx_update_stats(cdev, msg_mark, timestamp); } } @@ -936,7 +1032,6 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct m_can_classdev *cdev = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; u32 ir; if (pm_runtime_suspended(cdev->dev)) @@ -969,8 +1064,12 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) if (cdev->version == 30) { if (ir & IR_TC) { /* Transmission Complete Interrupt*/ - stats->tx_bytes += can_get_echo_skb(dev, 0, NULL); - stats->tx_packets++; + u32 timestamp = 0; + + if (cdev->is_peripheral) + timestamp = m_can_get_timestamp(cdev); + m_can_tx_update_stats(cdev, 0, timestamp); + can_led_event(dev, CAN_LED_EVENT_TX); netif_wake_queue(dev); } @@ -1108,6 +1207,7 @@ static int m_can_set_bittiming(struct net_device *dev) * - >= v3.1.x: TX FIFO is used * - configure mode * - setup bittiming + * - configure timestamp generation */ static void m_can_chip_config(struct net_device *dev) { @@ -1219,6 +1319,10 @@ static void m_can_chip_config(struct net_device *dev) /* set bittiming params */ m_can_set_bittiming(dev); + /* enable internal timestamp generation, with a prescalar of 16. The + * prescalar is applied to the nominal bit timing */ + m_can_write(cdev, M_CAN_TSCC, FIELD_PREP(TSCC_TCP_MASK, 0xf)); + m_can_config_endisable(cdev, false); if (cdev->ops->init) @@ -1426,6 +1530,9 @@ static int m_can_close(struct net_device *dev) cdev->tx_wq = NULL; } + if (cdev->is_peripheral) + can_rx_offload_disable(&cdev->offload); + close_candev(dev); can_led_event(dev, CAN_LED_EVENT_STOP); @@ -1624,6 +1731,9 @@ static int m_can_open(struct net_device *dev) goto exit_disable_clks; } + if (cdev->is_peripheral) + can_rx_offload_enable(&cdev->offload); + /* register interrupt handler */ if (cdev->is_peripheral) { cdev->tx_skb = NULL; @@ -1665,6 +1775,8 @@ exit_irq_fail: if (cdev->is_peripheral) destroy_workqueue(cdev->tx_wq); out_wq_fail: + if (cdev->is_peripheral) + can_rx_offload_disable(&cdev->offload); close_candev(dev); exit_disable_clks: m_can_clk_stop(cdev); @@ -1787,11 +1899,6 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev, } class_dev = netdev_priv(net_dev); - if (!class_dev) { - dev_err(dev, "Failed to init netdev cdevate"); - goto out; - } - class_dev->net = net_dev; class_dev->dev = dev; SET_NETDEV_DEV(net_dev, dev); @@ -1818,15 +1925,22 @@ int m_can_class_register(struct m_can_classdev *cdev) return ret; } + if (cdev->is_peripheral) { + ret = can_rx_offload_add_manual(cdev->net, &cdev->offload, + M_CAN_NAPI_WEIGHT); + if (ret) + goto clk_disable; + } + ret = m_can_dev_setup(cdev); if (ret) - goto clk_disable; + goto rx_offload_del; ret = register_m_can_dev(cdev->net); if (ret) { dev_err(cdev->dev, "registering %s failed (err=%d)\n", cdev->net->name, ret); - goto clk_disable; + goto rx_offload_del; } devm_can_led_init(cdev->net); @@ -1839,6 +1953,13 @@ int m_can_class_register(struct m_can_classdev *cdev) /* Probe finished * Stop clocks. They will be reactivated once the M_CAN device is opened */ + m_can_clk_stop(cdev); + + return 0; + +rx_offload_del: + if (cdev->is_peripheral) + can_rx_offload_del(&cdev->offload); clk_disable: m_can_clk_stop(cdev); @@ -1848,6 +1969,8 @@ EXPORT_SYMBOL_GPL(m_can_class_register); void m_can_class_unregister(struct m_can_classdev *cdev) { + if (cdev->is_peripheral) + can_rx_offload_del(&cdev->offload); unregister_candev(cdev->net); } EXPORT_SYMBOL_GPL(m_can_class_unregister); diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h index 3fda84cef351..ace071c3e58c 100644 --- a/drivers/net/can/m_can/m_can.h +++ b/drivers/net/can/m_can/m_can.h @@ -8,6 +8,7 @@ #include <linux/can/core.h> #include <linux/can/led.h> +#include <linux/can/rx-offload.h> #include <linux/completion.h> #include <linux/device.h> #include <linux/dma-mapping.h> @@ -71,6 +72,7 @@ struct m_can_ops { struct m_can_classdev { struct can_priv can; + struct can_rx_offload offload; struct napi_struct napi; struct net_device *net; struct device *dev; diff --git a/drivers/net/can/m_can/tcan4x5x.h b/drivers/net/can/m_can/tcan4x5x.h index c66da829b795..e62c030d3e1e 100644 --- a/drivers/net/can/m_can/tcan4x5x.h +++ b/drivers/net/can/m_can/tcan4x5x.h @@ -11,7 +11,6 @@ #include <linux/gpio/consumer.h> #include <linux/regmap.h> -#include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/spi/spi.h> diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index 4870c4ea190a..00e4533c8bdd 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -217,7 +217,7 @@ static void tx_failure_cleanup(struct net_device *ndev) int i; for (i = 0; i < RCAR_CAN_FIFO_DEPTH; i++) - can_free_echo_skb(ndev, i); + can_free_echo_skb(ndev, i, NULL); } static void rcar_can_error(struct net_device *ndev) diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index d8d233e62990..311e6ca3bdc4 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -617,7 +617,7 @@ static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev) u32 i; for (i = 0; i < RCANFD_FIFO_DEPTH; i++) - can_free_echo_skb(ndev, i); + can_free_echo_skb(ndev, i, NULL); } static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv) diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 9e86488ba55f..3fad54646746 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -525,7 +525,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT && !(status & SR_TCS)) { stats->tx_errors++; - can_free_echo_skb(dev, 0); + can_free_echo_skb(dev, 0, NULL); } else { /* transmission complete */ stats->tx_bytes += diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index c3e020c90111..6f5d6d04a8b9 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c @@ -179,7 +179,7 @@ static void hi3110_clean(struct net_device *net) net->stats.tx_errors++; dev_kfree_skb(priv->tx_skb); if (priv->tx_len) - can_free_echo_skb(priv->net, 0); + can_free_echo_skb(priv->net, 0, NULL); priv->tx_skb = NULL; priv->tx_len = 0; } diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index a57da43680d8..492f1bcb0516 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -276,7 +276,7 @@ static void mcp251x_clean(struct net_device *net) net->stats.tx_errors++; dev_kfree_skb(priv->tx_skb); if (priv->tx_len) - can_free_echo_skb(priv->net, 0); + can_free_echo_skb(priv->net, 0, NULL); priv->tx_skb = NULL; priv->tx_len = 0; } diff --git a/drivers/net/can/spi/mcp251xfd/Kconfig b/drivers/net/can/spi/mcp251xfd/Kconfig index f5a147a92cb2..dd0fc0a54be1 100644 --- a/drivers/net/can/spi/mcp251xfd/Kconfig +++ b/drivers/net/can/spi/mcp251xfd/Kconfig @@ -3,6 +3,7 @@ config CAN_MCP251XFD tristate "Microchip MCP251xFD SPI CAN controllers" select REGMAP + select WANT_DEV_COREDUMP help Driver for the Microchip MCP251XFD SPI FD-CAN controller family. diff --git a/drivers/net/can/spi/mcp251xfd/Makefile b/drivers/net/can/spi/mcp251xfd/Makefile index cb71244cbe89..3cba3b9447ea 100644 --- a/drivers/net/can/spi/mcp251xfd/Makefile +++ b/drivers/net/can/spi/mcp251xfd/Makefile @@ -6,3 +6,6 @@ mcp251xfd-objs := mcp251xfd-objs += mcp251xfd-core.o mcp251xfd-objs += mcp251xfd-crc16.o mcp251xfd-objs += mcp251xfd-regmap.o +mcp251xfd-objs += mcp251xfd-timestamp.o + +mcp251xfd-$(CONFIG_DEV_COREDUMP) += mcp251xfd-dump.o diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index 799e9d5d3481..970dc570e7a5 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -2,8 +2,8 @@ // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // -// Copyright (c) 2019, 2020 Pengutronix, -// Marc Kleine-Budde <kernel@pengutronix.de> +// Copyright (c) 2019, 2020, 2021 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> // // Based on: // @@ -16,7 +16,6 @@ #include <linux/clk.h> #include <linux/device.h> #include <linux/module.h> -#include <linux/netdevice.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pm_runtime.h> @@ -330,11 +329,14 @@ static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv) struct mcp251xfd_tx_ring *tx_ring; struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL; struct mcp251xfd_tx_obj *tx_obj; + struct spi_transfer *xfer; u32 val; u16 addr; u8 len; int i, j; + netdev_reset_queue(priv->ndev); + /* TEF */ tef_ring = priv->tef; tef_ring->head = 0; @@ -347,8 +349,6 @@ static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv) addr, val, val); for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) { - struct spi_transfer *xfer; - xfer = &tef_ring->uinc_xfer[j]; xfer->tx_buf = &tef_ring->uinc_buf; xfer->len = len; @@ -357,6 +357,15 @@ static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv) xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; } + /* "cs_change == 1" on the last transfer results in an active + * chip select after the complete SPI message. This causes the + * controller to interpret the next register access as + * data. Set "cs_change" of the last transfer to "0" to + * properly deactivate the chip select at the end of the + * message. + */ + xfer->cs_change = 0; + /* TX */ tx_ring = priv->tx; tx_ring->head = 0; @@ -397,8 +406,6 @@ static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv) addr, val, val); for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) { - struct spi_transfer *xfer; - xfer = &rx_ring->uinc_xfer[j]; xfer->tx_buf = &rx_ring->uinc_buf; xfer->len = len; @@ -406,6 +413,15 @@ static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv) xfer->cs_change_delay.value = 0; xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; } + + /* "cs_change == 1" on the last transfer results in an + * active chip select after the complete SPI + * message. This causes the controller to interpret + * the next register access as data. Set "cs_change" + * of the last transfer to "0" to properly deactivate + * the chip select at the end of the message. + */ + xfer->cs_change = 0; } } @@ -1097,6 +1113,7 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv) return 0; out_chip_stop: + mcp251xfd_dump(priv); mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); return err; @@ -1247,10 +1264,12 @@ mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq) static int mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, - const struct mcp251xfd_hw_tef_obj *hw_tef_obj) + const struct mcp251xfd_hw_tef_obj *hw_tef_obj, + unsigned int *frame_len_ptr) { struct net_device_stats *stats = &priv->ndev->stats; - u32 seq, seq_masked, tef_tail_masked; + struct sk_buff *skb; + u32 seq, seq_masked, tef_tail_masked, tef_tail; seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, hw_tef_obj->flags); @@ -1266,10 +1285,14 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, if (seq_masked != tef_tail_masked) return mcp251xfd_handle_tefif_recover(priv, seq); + tef_tail = mcp251xfd_get_tef_tail(priv); + skb = priv->can.echo_skb[tef_tail]; + if (skb) + mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts); stats->tx_bytes += can_rx_offload_get_echo_skb(&priv->offload, - mcp251xfd_get_tef_tail(priv), - hw_tef_obj->ts, NULL); + tef_tail, hw_tef_obj->ts, + frame_len_ptr); stats->tx_packets++; priv->tef->tail++; @@ -1327,6 +1350,7 @@ mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv, static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) { struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX]; + unsigned int total_frame_len = 0; u8 tef_tail, len, l; int err, i; @@ -1348,7 +1372,9 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) } for (i = 0; i < len; i++) { - err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i]); + unsigned int frame_len = 0; + + err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len); /* -EAGAIN means the Sequence Number in the TEF * doesn't match our tef_tail. This can happen if we * read the TEF objects too early. Leave loop let the @@ -1358,6 +1384,8 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) goto out_netif_wake_queue; if (err) return err; + + total_frame_len += frame_len; } out_netif_wake_queue: @@ -1365,29 +1393,25 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) if (len) { struct mcp251xfd_tef_ring *ring = priv->tef; struct mcp251xfd_tx_ring *tx_ring = priv->tx; - struct spi_transfer *last_xfer; + int offset; /* Increment the TEF FIFO tail pointer 'len' times in * a single SPI message. * * Note: - * - * "cs_change == 1" on the last transfer results in an - * active chip select after the complete SPI - * message. This causes the controller to interpret - * the next register access as data. Temporary set - * "cs_change" of the last transfer to "0" to properly - * deactivate the chip select at the end of the - * message. + * Calculate offset, so that the SPI transfer ends on + * the last message of the uinc_xfer array, which has + * "cs_change == 0", to properly deactivate the chip + * select. */ - last_xfer = &ring->uinc_xfer[len - 1]; - last_xfer->cs_change = 0; - err = spi_sync_transfer(priv->spi, ring->uinc_xfer, len); - last_xfer->cs_change = 1; + offset = ARRAY_SIZE(ring->uinc_xfer) - len; + err = spi_sync_transfer(priv->spi, + ring->uinc_xfer + offset, len); if (err) return err; tx_ring->tail += len; + netdev_completed_queue(priv->ndev, len, total_frame_len); err = mcp251xfd_check_tef_tail(priv); if (err) @@ -1432,7 +1456,7 @@ mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv, } static void -mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv, +mcp251xfd_hw_rx_obj_to_skb(struct mcp251xfd_priv *priv, const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj, struct sk_buff *skb) { @@ -1475,6 +1499,8 @@ mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv, if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)) memcpy(cfd->data, hw_rx_obj->data, cfd->len); + + mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts); } static int @@ -1535,7 +1561,7 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv, return err; while ((len = mcp251xfd_get_rx_linear_len(ring))) { - struct spi_transfer *last_xfer; + int offset; rx_tail = mcp251xfd_get_rx_tail(ring); @@ -1556,19 +1582,14 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv, * single SPI message. * * Note: - * - * "cs_change == 1" on the last transfer results in an - * active chip select after the complete SPI - * message. This causes the controller to interpret - * the next register access as data. Temporary set - * "cs_change" of the last transfer to "0" to properly - * deactivate the chip select at the end of the - * message. + * Calculate offset, so that the SPI transfer ends on + * the last message of the uinc_xfer array, which has + * "cs_change == 0", to properly deactivate the chip + * select. */ - last_xfer = &ring->uinc_xfer[len - 1]; - last_xfer->cs_change = 0; - err = spi_sync_transfer(priv->spi, ring->uinc_xfer, len); - last_xfer->cs_change = 1; + offset = ARRAY_SIZE(ring->uinc_xfer) - len; + err = spi_sync_transfer(priv->spi, + ring->uinc_xfer + offset, len); if (err) return err; @@ -1592,23 +1613,22 @@ static int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv) return 0; } -static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv, - u32 *timestamp) -{ - return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp); -} - static struct sk_buff * -mcp251xfd_alloc_can_err_skb(const struct mcp251xfd_priv *priv, +mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv, struct can_frame **cf, u32 *timestamp) { + struct sk_buff *skb; int err; err = mcp251xfd_get_timestamp(priv, timestamp); if (err) return NULL; - return alloc_can_err_skb(priv->ndev, cf); + skb = alloc_can_err_skb(priv->ndev, cf); + if (skb) + mcp251xfd_skb_set_timestamp(priv, skb, *timestamp); + + return skb; } static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv) @@ -1760,6 +1780,7 @@ static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv) if (!cf) return 0; + mcp251xfd_skb_set_timestamp(priv, skb, timestamp); err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); if (err) stats->rx_fifo_errors++; @@ -2277,6 +2298,7 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id) out_fail: netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n", err, priv->regs_status.intf); + mcp251xfd_dump(priv); mcp251xfd_chip_interrupts_disable(priv); return handled; @@ -2433,6 +2455,7 @@ static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb, struct mcp251xfd_priv *priv = netdev_priv(ndev); struct mcp251xfd_tx_ring *tx_ring = priv->tx; struct mcp251xfd_tx_obj *tx_obj; + unsigned int frame_len; u8 tx_head; int err; @@ -2451,7 +2474,10 @@ static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb, if (mcp251xfd_get_tx_free(tx_ring) == 0) netif_stop_queue(ndev); - can_put_echo_skb(skb, ndev, tx_head, 0); + frame_len = can_skb_get_frame_len(skb); + err = can_put_echo_skb(skb, ndev, tx_head, frame_len); + if (!err) + netdev_sent_queue(priv->ndev, frame_len); err = mcp251xfd_tx_obj_write(priv, tx_obj); if (err) @@ -2493,6 +2519,7 @@ static int mcp251xfd_open(struct net_device *ndev) if (err) goto out_transceiver_disable; + mcp251xfd_timestamp_init(priv); can_rx_offload_enable(&priv->offload); err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq, @@ -2513,6 +2540,7 @@ static int mcp251xfd_open(struct net_device *ndev) free_irq(spi->irq, priv); out_can_rx_offload_disable: can_rx_offload_disable(&priv->offload); + mcp251xfd_timestamp_stop(priv); out_transceiver_disable: mcp251xfd_transceiver_disable(priv); out_mcp251xfd_ring_free: @@ -2534,6 +2562,7 @@ static int mcp251xfd_stop(struct net_device *ndev) mcp251xfd_chip_interrupts_disable(priv); free_irq(ndev->irq, priv); can_rx_offload_disable(&priv->offload); + mcp251xfd_timestamp_stop(priv); mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); mcp251xfd_transceiver_disable(priv); mcp251xfd_ring_free(priv); diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c new file mode 100644 index 000000000000..ffae8fdd3af0 --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// mcp251xfd - Microchip MCP251xFD Family CAN controller driver +// +// Copyright (c) 2020, 2021 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// Copyright (C) 2015-2018 Etnaviv Project +// + +#include <linux/devcoredump.h> + +#include "mcp251xfd.h" +#include "mcp251xfd-dump.h" + +struct mcp251xfd_dump_iter { + void *start; + struct mcp251xfd_dump_object_header *hdr; + void *data; +}; + +struct mcp251xfd_dump_reg_space { + u16 base; + u16 size; +}; + +struct mcp251xfd_dump_ring { + enum mcp251xfd_dump_object_ring_key key; + u32 val; +}; + +static const struct mcp251xfd_dump_reg_space mcp251xfd_dump_reg_space[] = { + { + .base = MCP251XFD_REG_CON, + .size = MCP251XFD_REG_FLTOBJ(32) - MCP251XFD_REG_CON, + }, { + .base = MCP251XFD_RAM_START, + .size = MCP251XFD_RAM_SIZE, + }, { + .base = MCP251XFD_REG_OSC, + .size = MCP251XFD_REG_DEVID - MCP251XFD_REG_OSC, + }, +}; + +static void mcp251xfd_dump_header(struct mcp251xfd_dump_iter *iter, + enum mcp251xfd_dump_object_type object_type, + const void *data_end) +{ + struct mcp251xfd_dump_object_header *hdr = iter->hdr; + unsigned int len; + + len = data_end - iter->data; + if (!len) + return; + + hdr->magic = cpu_to_le32(MCP251XFD_DUMP_MAGIC); + hdr->type = cpu_to_le32(object_type); + hdr->offset = cpu_to_le32(iter->data - iter->start); + hdr->len = cpu_to_le32(len); + + iter->hdr++; + iter->data += len; +} + +static void mcp251xfd_dump_registers(const struct mcp251xfd_priv *priv, + struct mcp251xfd_dump_iter *iter) +{ + const int val_bytes = regmap_get_val_bytes(priv->map_rx); + struct mcp251xfd_dump_object_reg *reg = iter->data; + unsigned int i, j; + int err; + + for (i = 0; i < ARRAY_SIZE(mcp251xfd_dump_reg_space); i++) { + const struct mcp251xfd_dump_reg_space *reg_space; + void *buf; + + reg_space = &mcp251xfd_dump_reg_space[i]; + + buf = kmalloc(reg_space->size, GFP_KERNEL); + if (!buf) + goto out; + + err = regmap_bulk_read(priv->map_reg, reg_space->base, + buf, reg_space->size / val_bytes); + if (err) { + kfree(buf); + continue; + } + + for (j = 0; j < reg_space->size; j += sizeof(u32), reg++) { + reg->reg = cpu_to_le32(reg_space->base + j); + reg->val = cpu_to_le32p(buf + j); + } + + kfree(buf); + } + + out: + mcp251xfd_dump_header(iter, MCP251XFD_DUMP_OBJECT_TYPE_REG, reg); +} + +static void mcp251xfd_dump_ring(struct mcp251xfd_dump_iter *iter, + enum mcp251xfd_dump_object_type object_type, + const struct mcp251xfd_dump_ring *dump_ring, + unsigned int len) +{ + struct mcp251xfd_dump_object_reg *reg = iter->data; + unsigned int i; + + for (i = 0; i < len; i++, reg++) { + reg->reg = cpu_to_le32(dump_ring[i].key); + reg->val = cpu_to_le32(dump_ring[i].val); + } + + mcp251xfd_dump_header(iter, object_type, reg); +} + +static void mcp251xfd_dump_tef_ring(const struct mcp251xfd_priv *priv, + struct mcp251xfd_dump_iter *iter) +{ + const struct mcp251xfd_tef_ring *tef = priv->tef; + const struct mcp251xfd_tx_ring *tx = priv->tx; + const struct mcp251xfd_dump_ring dump_ring[] = { + { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_HEAD, + .val = tef->head, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_TAIL, + .val = tef->tail, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_BASE, + .val = 0, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_NR, + .val = 0, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR, + .val = 0, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM, + .val = tx->obj_num, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_SIZE, + .val = sizeof(struct mcp251xfd_hw_tef_obj), + }, + }; + + mcp251xfd_dump_ring(iter, MCP251XFD_DUMP_OBJECT_TYPE_TEF, + dump_ring, ARRAY_SIZE(dump_ring)); +} + +static void mcp251xfd_dump_rx_ring_one(const struct mcp251xfd_priv *priv, + struct mcp251xfd_dump_iter *iter, + const struct mcp251xfd_rx_ring *rx) +{ + const struct mcp251xfd_dump_ring dump_ring[] = { + { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_HEAD, + .val = rx->head, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_TAIL, + .val = rx->tail, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_BASE, + .val = rx->base, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_NR, + .val = rx->nr, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR, + .val = rx->fifo_nr, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM, + .val = rx->obj_num, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_SIZE, + .val = rx->obj_size, + }, + }; + + mcp251xfd_dump_ring(iter, MCP251XFD_DUMP_OBJECT_TYPE_RX, + dump_ring, ARRAY_SIZE(dump_ring)); +} + +static void mcp251xfd_dump_rx_ring(const struct mcp251xfd_priv *priv, + struct mcp251xfd_dump_iter *iter) +{ + struct mcp251xfd_rx_ring *rx_ring; + unsigned int i; + + mcp251xfd_for_each_rx_ring(priv, rx_ring, i) + mcp251xfd_dump_rx_ring_one(priv, iter, rx_ring); +} + +static void mcp251xfd_dump_tx_ring(const struct mcp251xfd_priv *priv, + struct mcp251xfd_dump_iter *iter) +{ + const struct mcp251xfd_tx_ring *tx = priv->tx; + const struct mcp251xfd_dump_ring dump_ring[] = { + { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_HEAD, + .val = tx->head, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_TAIL, + .val = tx->tail, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_BASE, + .val = tx->base, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_NR, + .val = 0, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR, + .val = MCP251XFD_TX_FIFO, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM, + .val = tx->obj_num, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_SIZE, + .val = tx->obj_size, + }, + }; + + mcp251xfd_dump_ring(iter, MCP251XFD_DUMP_OBJECT_TYPE_TX, + dump_ring, ARRAY_SIZE(dump_ring)); +} + +static void mcp251xfd_dump_end(const struct mcp251xfd_priv *priv, + struct mcp251xfd_dump_iter *iter) +{ + struct mcp251xfd_dump_object_header *hdr = iter->hdr; + + hdr->magic = cpu_to_le32(MCP251XFD_DUMP_MAGIC); + hdr->type = cpu_to_le32(MCP251XFD_DUMP_OBJECT_TYPE_END); + hdr->offset = cpu_to_le32(0); + hdr->len = cpu_to_le32(0); + + /* provoke NULL pointer access, if used after END object */ + iter->hdr = NULL; +} + +void mcp251xfd_dump(const struct mcp251xfd_priv *priv) +{ + struct mcp251xfd_dump_iter iter; + unsigned int rings_num, obj_num; + unsigned int file_size = 0; + unsigned int i; + + /* register space + end marker */ + obj_num = 2; + + /* register space */ + for (i = 0; i < ARRAY_SIZE(mcp251xfd_dump_reg_space); i++) + file_size += mcp251xfd_dump_reg_space[i].size / sizeof(u32) * + sizeof(struct mcp251xfd_dump_object_reg); + + /* TEF ring, RX ring, TX rings */ + rings_num = 1 + priv->rx_ring_num + 1; + obj_num += rings_num; + file_size += rings_num * __MCP251XFD_DUMP_OBJECT_RING_KEY_MAX * + sizeof(struct mcp251xfd_dump_object_reg); + + /* size of the headers */ + file_size += sizeof(*iter.hdr) * obj_num; + + /* allocate the file in vmalloc memory, it's likely to be big */ + iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | + __GFP_ZERO | __GFP_NORETRY); + if (!iter.start) { + netdev_warn(priv->ndev, "Failed to allocate devcoredump file.\n"); + return; + } + + /* point the data member after the headers */ + iter.hdr = iter.start; + iter.data = &iter.hdr[obj_num]; + + mcp251xfd_dump_registers(priv, &iter); + mcp251xfd_dump_tef_ring(priv, &iter); + mcp251xfd_dump_rx_ring(priv, &iter); + mcp251xfd_dump_tx_ring(priv, &iter); + mcp251xfd_dump_end(priv, &iter); + + dev_coredumpv(&priv->spi->dev, iter.start, + iter.data - iter.start, GFP_KERNEL); +} diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.h new file mode 100644 index 000000000000..e7560b0712eb --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * mcp251xfd - Microchip MCP251xFD Family CAN controller driver + * + * Copyright (c) 2019, 2020, 2021 Pengutronix, + * Marc Kleine-Budde <kernel@pengutronix.de> + */ + +#ifndef _MCP251XFD_DUMP_H +#define _MCP251XFD_DUMP_H + +#define MCP251XFD_DUMP_MAGIC 0x1825434d + +enum mcp251xfd_dump_object_type { + MCP251XFD_DUMP_OBJECT_TYPE_REG, + MCP251XFD_DUMP_OBJECT_TYPE_TEF, + MCP251XFD_DUMP_OBJECT_TYPE_RX, + MCP251XFD_DUMP_OBJECT_TYPE_TX, + MCP251XFD_DUMP_OBJECT_TYPE_END = -1, +}; + +enum mcp251xfd_dump_object_ring_key { + MCP251XFD_DUMP_OBJECT_RING_KEY_HEAD, + MCP251XFD_DUMP_OBJECT_RING_KEY_TAIL, + MCP251XFD_DUMP_OBJECT_RING_KEY_BASE, + MCP251XFD_DUMP_OBJECT_RING_KEY_NR, + MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR, + MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM, + MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_SIZE, + __MCP251XFD_DUMP_OBJECT_RING_KEY_MAX, +}; + +struct mcp251xfd_dump_object_header { + __le32 magic; + __le32 type; + __le32 offset; + __le32 len; +}; + +struct mcp251xfd_dump_object_reg { + __le32 reg; + __le32 val; +}; + +#endif diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c index 314f868b3465..297491516a26 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c @@ -233,12 +233,30 @@ mcp251xfd_regmap_crc_write(void *context, } static int +mcp251xfd_regmap_crc_read_check_crc(const struct mcp251xfd_map_buf_crc * const buf_rx, + const struct mcp251xfd_map_buf_crc * const buf_tx, + unsigned int data_len) +{ + u16 crc_received, crc_calculated; + + crc_received = get_unaligned_be16(buf_rx->data + data_len); + crc_calculated = mcp251xfd_crc16_compute2(&buf_tx->cmd, + sizeof(buf_tx->cmd), + buf_rx->data, + data_len); + if (crc_received != crc_calculated) + return -EBADMSG; + + return 0; +} + + +static int mcp251xfd_regmap_crc_read_one(struct mcp251xfd_priv *priv, struct spi_message *msg, unsigned int data_len) { const struct mcp251xfd_map_buf_crc *buf_rx = priv->map_buf_crc_rx; const struct mcp251xfd_map_buf_crc *buf_tx = priv->map_buf_crc_tx; - u16 crc_received, crc_calculated; int err; BUILD_BUG_ON(sizeof(buf_rx->cmd) != sizeof(__be16) + sizeof(u8)); @@ -248,15 +266,7 @@ mcp251xfd_regmap_crc_read_one(struct mcp251xfd_priv *priv, if (err) return err; - crc_received = get_unaligned_be16(buf_rx->data + data_len); - crc_calculated = mcp251xfd_crc16_compute2(&buf_tx->cmd, - sizeof(buf_tx->cmd), - buf_rx->data, - data_len); - if (crc_received != crc_calculated) - return -EBADMSG; - - return 0; + return mcp251xfd_regmap_crc_read_check_crc(buf_rx, buf_tx, data_len); } static int @@ -311,6 +321,40 @@ mcp251xfd_regmap_crc_read(void *context, if (err != -EBADMSG) return err; + /* MCP251XFD_REG_TBC is the time base counter + * register. It increments once per SYS clock tick, + * which is 20 or 40 MHz. + * + * Observation shows that if the lowest byte (which is + * transferred first on the SPI bus) of that register + * is 0x00 or 0x80 the calculated CRC doesn't always + * match the transferred one. + * + * If the highest bit in the lowest byte is flipped + * the transferred CRC matches the calculated one. We + * assume for now the CRC calculation in the chip + * works on wrong data and the transferred data is + * correct. + */ + if (reg == MCP251XFD_REG_TBC && + (buf_rx->data[0] == 0x0 || buf_rx->data[0] == 0x80)) { + /* Flip highest bit in lowest byte of le32 */ + buf_rx->data[0] ^= 0x80; + + /* re-check CRC */ + err = mcp251xfd_regmap_crc_read_check_crc(buf_rx, + buf_tx, + val_len); + if (!err) { + /* If CRC is now correct, assume + * transferred data was OK, flip bit + * back to original value. + */ + buf_rx->data[0] ^= 0x80; + goto out; + } + } + /* MCP251XFD_REG_OSC is the first ever reg we read from. * * The chip may be in deep sleep and this SPI transfer diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c new file mode 100644 index 000000000000..ed3169274d24 --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// mcp251xfd - Microchip MCP251xFD Family CAN controller driver +// +// Copyright (c) 2021 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// + +#include <linux/clocksource.h> +#include <linux/workqueue.h> + +#include "mcp251xfd.h" + +static u64 mcp251xfd_timestamp_read(const struct cyclecounter *cc) +{ + struct mcp251xfd_priv *priv; + u32 timestamp = 0; + int err; + + priv = container_of(cc, struct mcp251xfd_priv, cc); + err = mcp251xfd_get_timestamp(priv, ×tamp); + if (err) + netdev_err(priv->ndev, + "Error %d while reading timestamp. HW timestamps may be inaccurate.", + err); + + return timestamp; +} + +static void mcp251xfd_timestamp_work(struct work_struct *work) +{ + struct delayed_work *delayed_work = to_delayed_work(work); + struct mcp251xfd_priv *priv; + + priv = container_of(delayed_work, struct mcp251xfd_priv, timestamp); + timecounter_read(&priv->tc); + + schedule_delayed_work(&priv->timestamp, + MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ); +} + +void mcp251xfd_skb_set_timestamp(struct mcp251xfd_priv *priv, + struct sk_buff *skb, u32 timestamp) +{ + struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); + u64 ns; + + ns = timecounter_cyc2time(&priv->tc, timestamp); + hwtstamps->hwtstamp = ns_to_ktime(ns); +} + +void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv) +{ + struct cyclecounter *cc = &priv->cc; + + cc->read = mcp251xfd_timestamp_read; + cc->mask = CYCLECOUNTER_MASK(32); + cc->shift = 1; + cc->mult = clocksource_hz2mult(priv->can.clock.freq, cc->shift); + + timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns()); + + INIT_DELAYED_WORK(&priv->timestamp, mcp251xfd_timestamp_work); + schedule_delayed_work(&priv->timestamp, + MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ); +} + +void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv) +{ + cancel_delayed_work_sync(&priv->timestamp); +} diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h index 480bd4480bdf..1002f3902ad2 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h @@ -15,9 +15,12 @@ #include <linux/can/rx-offload.h> #include <linux/gpio/consumer.h> #include <linux/kernel.h> +#include <linux/netdevice.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/spi/spi.h> +#include <linux/timecounter.h> +#include <linux/workqueue.h> /* MPC251x registers */ @@ -394,6 +397,9 @@ #define MCP251XFD_SYSCLOCK_HZ_MAX 40000000 #define MCP251XFD_SYSCLOCK_HZ_MIN 1000000 #define MCP251XFD_SPICLOCK_HZ_MAX 20000000 +#define MCP251XFD_TIMESTAMP_WORK_DELAY_SEC 45 +static_assert(MCP251XFD_TIMESTAMP_WORK_DELAY_SEC < + CYCLECOUNTER_MASK(32) / MCP251XFD_SYSCLOCK_HZ_MAX / 2); #define MCP251XFD_OSC_PLL_MULTIPLIER 10 #define MCP251XFD_OSC_STAB_SLEEP_US (3 * USEC_PER_MSEC) #define MCP251XFD_OSC_STAB_TIMEOUT_US (10 * MCP251XFD_OSC_STAB_SLEEP_US) @@ -595,6 +601,10 @@ struct mcp251xfd_priv { struct mcp251xfd_ecc ecc; struct mcp251xfd_regs_status regs_status; + struct cyclecounter cc; + struct timecounter tc; + struct delayed_work timestamp; + struct gpio_desc *rx_int; struct clk *clk; struct regulator *reg_vdd; @@ -727,6 +737,12 @@ mcp251xfd_spi_cmd_write(const struct mcp251xfd_priv *priv, return data; } +static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv, + u32 *timestamp) +{ + return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp); +} + static inline u16 mcp251xfd_get_tef_obj_addr(u8 n) { return MCP251XFD_RAM_START + @@ -837,5 +853,17 @@ int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv); u16 mcp251xfd_crc16_compute2(const void *cmd, size_t cmd_size, const void *data, size_t data_size); u16 mcp251xfd_crc16_compute(const void *data, size_t data_size); +void mcp251xfd_skb_set_timestamp(struct mcp251xfd_priv *priv, + struct sk_buff *skb, u32 timestamp); +void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv); +void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv); + +#if IS_ENABLED(CONFIG_DEV_COREDUMP) +void mcp251xfd_dump(const struct mcp251xfd_priv *priv); +#else +static inline void mcp251xfd_dump(const struct mcp251xfd_priv *priv) +{ +} +#endif #endif diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig index 538f4d9adb91..3deb9f1cd292 100644 --- a/drivers/net/can/usb/Kconfig +++ b/drivers/net/can/usb/Kconfig @@ -20,6 +20,16 @@ config CAN_ESD_USB2 This driver supports the CAN-USB/2 interface from esd electronic system design gmbh (http://www.esd.eu). +config CAN_ETAS_ES58X + tristate "ETAS ES58X CAN/USB interfaces" + select CRC16 + help + This driver supports the ES581.4, ES582.1 and ES584.1 interfaces + from ETAS GmbH (https://www.etas.com/en/products/es58x.php). + + To compile this driver as a module, choose M here: the module + will be called etas_es58x. + config CAN_GS_USB tristate "Geschwister Schneider UG interfaces" help diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile index aa0f17c0b2ed..748cf31a0d53 100644 --- a/drivers/net/can/usb/Makefile +++ b/drivers/net/can/usb/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o +obj-$(CONFIG_CAN_ETAS_ES58X) += etas_es58x/ obj-$(CONFIG_CAN_GS_USB) += gs_usb.o obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb/ obj-$(CONFIG_CAN_MCBA_USB) += mcba_usb.o diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 18f40eb20360..5af69787d9d5 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -807,7 +807,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err)) { - can_free_echo_skb(netdev, context->echo_index); + can_free_echo_skb(netdev, context->echo_index, NULL); usb_unanchor_urb(urb); usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 562acbf454fd..65b58f8fc328 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -360,7 +360,7 @@ static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv, can_get_echo_skb(netdev, context->echo_index, NULL); } else { stats->tx_errors++; - can_free_echo_skb(netdev, context->echo_index); + can_free_echo_skb(netdev, context->echo_index, NULL); } /* Release context */ @@ -793,7 +793,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb, err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { - can_free_echo_skb(netdev, context->echo_index); + can_free_echo_skb(netdev, context->echo_index, NULL); atomic_dec(&priv->active_tx_jobs); usb_unanchor_urb(urb); diff --git a/drivers/net/can/usb/etas_es58x/Makefile b/drivers/net/can/usb/etas_es58x/Makefile new file mode 100644 index 000000000000..a129b4aa0215 --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_CAN_ETAS_ES58X) += etas_es58x.o +etas_es58x-y = es58x_core.o es581_4.o es58x_fd.o diff --git a/drivers/net/can/usb/etas_es58x/es581_4.c b/drivers/net/can/usb/etas_es58x/es581_4.c new file mode 100644 index 000000000000..1985f772fc3c --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es581_4.c @@ -0,0 +1,507 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. + * + * File es581_4.c: Adds support to ETAS ES581.4. + * + * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved. + * Copyright (c) 2020 ETAS K.K.. All rights reserved. + * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr> + */ + +#include <linux/kernel.h> +#include <asm/unaligned.h> + +#include "es58x_core.h" +#include "es581_4.h" + +/** + * es581_4_sizeof_rx_tx_msg() - Calculate the actual length of the + * structure of a rx or tx message. + * @msg: message of variable length, must have a dlc field. + * + * Even if RTR frames have actually no payload, the ES58X devices + * still expect it. Must be a macro in order to accept several types + * (struct es581_4_tx_can_msg and struct es581_4_rx_can_msg) as an + * input. + * + * Return: length of the message. + */ +#define es581_4_sizeof_rx_tx_msg(msg) \ + offsetof(typeof(msg), data[can_cc_dlc2len((msg).dlc)]) + +static u16 es581_4_get_msg_len(const union es58x_urb_cmd *urb_cmd) +{ + return get_unaligned_le16(&urb_cmd->es581_4_urb_cmd.msg_len); +} + +static int es581_4_echo_msg(struct es58x_device *es58x_dev, + const struct es581_4_urb_cmd *es581_4_urb_cmd) +{ + struct net_device *netdev; + const struct es581_4_bulk_echo_msg *bulk_echo_msg; + const struct es581_4_echo_msg *echo_msg; + u64 *tstamps = es58x_dev->timestamps; + u16 msg_len; + u32 first_packet_idx, packet_idx; + unsigned int dropped = 0; + int i, num_element, ret; + + bulk_echo_msg = &es581_4_urb_cmd->bulk_echo_msg; + msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len) - + sizeof(bulk_echo_msg->channel_no); + num_element = es58x_msg_num_element(es58x_dev->dev, + bulk_echo_msg->echo_msg, msg_len); + if (num_element <= 0) + return num_element; + + ret = es58x_get_netdev(es58x_dev, bulk_echo_msg->channel_no, + ES581_4_CHANNEL_IDX_OFFSET, &netdev); + if (ret) + return ret; + + echo_msg = &bulk_echo_msg->echo_msg[0]; + first_packet_idx = get_unaligned_le32(&echo_msg->packet_idx); + packet_idx = first_packet_idx; + for (i = 0; i < num_element; i++) { + u32 tmp_idx; + + echo_msg = &bulk_echo_msg->echo_msg[i]; + tmp_idx = get_unaligned_le32(&echo_msg->packet_idx); + if (tmp_idx == packet_idx - 1) { + if (net_ratelimit()) + netdev_warn(netdev, + "Received echo packet idx %u twice\n", + packet_idx - 1); + dropped++; + continue; + } + if (tmp_idx != packet_idx) { + netdev_err(netdev, "Echo packet idx jumped from %u to %u\n", + packet_idx - 1, echo_msg->packet_idx); + return -EBADMSG; + } + + tstamps[i] = get_unaligned_le64(&echo_msg->timestamp); + packet_idx++; + } + + netdev->stats.tx_dropped += dropped; + return es58x_can_get_echo_skb(netdev, first_packet_idx, + tstamps, num_element - dropped); +} + +static int es581_4_rx_can_msg(struct es58x_device *es58x_dev, + const struct es581_4_urb_cmd *es581_4_urb_cmd, + u16 msg_len) +{ + const struct device *dev = es58x_dev->dev; + struct net_device *netdev; + int pkts, num_element, channel_no, ret; + + num_element = es58x_msg_num_element(dev, es581_4_urb_cmd->rx_can_msg, + msg_len); + if (num_element <= 0) + return num_element; + + channel_no = es581_4_urb_cmd->rx_can_msg[0].channel_no; + ret = es58x_get_netdev(es58x_dev, channel_no, + ES581_4_CHANNEL_IDX_OFFSET, &netdev); + if (ret) + return ret; + + if (!netif_running(netdev)) { + if (net_ratelimit()) + netdev_info(netdev, + "%s: %s is down, dropping %d rx packets\n", + __func__, netdev->name, num_element); + netdev->stats.rx_dropped += num_element; + return 0; + } + + for (pkts = 0; pkts < num_element; pkts++) { + const struct es581_4_rx_can_msg *rx_can_msg = + &es581_4_urb_cmd->rx_can_msg[pkts]; + u64 tstamp = get_unaligned_le64(&rx_can_msg->timestamp); + canid_t can_id = get_unaligned_le32(&rx_can_msg->can_id); + + if (channel_no != rx_can_msg->channel_no) + return -EBADMSG; + + ret = es58x_rx_can_msg(netdev, tstamp, rx_can_msg->data, + can_id, rx_can_msg->flags, + rx_can_msg->dlc); + if (ret) + break; + } + + return ret; +} + +static int es581_4_rx_err_msg(struct es58x_device *es58x_dev, + const struct es581_4_rx_err_msg *rx_err_msg) +{ + struct net_device *netdev; + enum es58x_err error = get_unaligned_le32(&rx_err_msg->error); + int ret; + + ret = es58x_get_netdev(es58x_dev, rx_err_msg->channel_no, + ES581_4_CHANNEL_IDX_OFFSET, &netdev); + if (ret) + return ret; + + return es58x_rx_err_msg(netdev, error, 0, + get_unaligned_le64(&rx_err_msg->timestamp)); +} + +static int es581_4_rx_event_msg(struct es58x_device *es58x_dev, + const struct es581_4_rx_event_msg *rx_event_msg) +{ + struct net_device *netdev; + enum es58x_event event = get_unaligned_le32(&rx_event_msg->event); + int ret; + + ret = es58x_get_netdev(es58x_dev, rx_event_msg->channel_no, + ES581_4_CHANNEL_IDX_OFFSET, &netdev); + if (ret) + return ret; + + return es58x_rx_err_msg(netdev, 0, event, + get_unaligned_le64(&rx_event_msg->timestamp)); +} + +static int es581_4_rx_cmd_ret_u32(struct es58x_device *es58x_dev, + const struct es581_4_urb_cmd *es581_4_urb_cmd, + enum es58x_ret_type ret_type) +{ + struct net_device *netdev; + const struct es581_4_rx_cmd_ret *rx_cmd_ret; + u16 msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len); + int ret; + + ret = es58x_check_msg_len(es58x_dev->dev, + es581_4_urb_cmd->rx_cmd_ret, msg_len); + if (ret) + return ret; + + rx_cmd_ret = &es581_4_urb_cmd->rx_cmd_ret; + + ret = es58x_get_netdev(es58x_dev, rx_cmd_ret->channel_no, + ES581_4_CHANNEL_IDX_OFFSET, &netdev); + if (ret) + return ret; + + return es58x_rx_cmd_ret_u32(netdev, ret_type, + get_unaligned_le32(&rx_cmd_ret->rx_cmd_ret_le32)); +} + +static int es581_4_tx_ack_msg(struct es58x_device *es58x_dev, + const struct es581_4_urb_cmd *es581_4_urb_cmd) +{ + struct net_device *netdev; + const struct es581_4_tx_ack_msg *tx_ack_msg; + u16 msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len); + int ret; + + tx_ack_msg = &es581_4_urb_cmd->tx_ack_msg; + ret = es58x_check_msg_len(es58x_dev->dev, *tx_ack_msg, msg_len); + if (ret) + return ret; + + if (tx_ack_msg->rx_cmd_ret_u8 != ES58X_RET_U8_OK) + return es58x_rx_cmd_ret_u8(es58x_dev->dev, + ES58X_RET_TYPE_TX_MSG, + tx_ack_msg->rx_cmd_ret_u8); + + ret = es58x_get_netdev(es58x_dev, tx_ack_msg->channel_no, + ES581_4_CHANNEL_IDX_OFFSET, &netdev); + if (ret) + return ret; + + return es58x_tx_ack_msg(netdev, + get_unaligned_le16(&tx_ack_msg->tx_free_entries), + ES58X_RET_U32_OK); +} + +static int es581_4_dispatch_rx_cmd(struct es58x_device *es58x_dev, + const struct es581_4_urb_cmd *es581_4_urb_cmd) +{ + const struct device *dev = es58x_dev->dev; + u16 msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len); + enum es581_4_rx_type rx_type = es581_4_urb_cmd->rx_can_msg[0].rx_type; + int ret = 0; + + switch (rx_type) { + case ES581_4_RX_TYPE_MESSAGE: + return es581_4_rx_can_msg(es58x_dev, es581_4_urb_cmd, msg_len); + + case ES581_4_RX_TYPE_ERROR: + ret = es58x_check_msg_len(dev, es581_4_urb_cmd->rx_err_msg, + msg_len); + if (ret < 0) + return ret; + return es581_4_rx_err_msg(es58x_dev, + &es581_4_urb_cmd->rx_err_msg); + + case ES581_4_RX_TYPE_EVENT: + ret = es58x_check_msg_len(dev, es581_4_urb_cmd->rx_event_msg, + msg_len); + if (ret < 0) + return ret; + return es581_4_rx_event_msg(es58x_dev, + &es581_4_urb_cmd->rx_event_msg); + + default: + dev_err(dev, "%s: Unknown rx_type 0x%02X\n", __func__, rx_type); + return -EBADRQC; + } +} + +static int es581_4_handle_urb_cmd(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd) +{ + const struct es581_4_urb_cmd *es581_4_urb_cmd; + struct device *dev = es58x_dev->dev; + u16 msg_len = es581_4_get_msg_len(urb_cmd); + int ret; + + es581_4_urb_cmd = &urb_cmd->es581_4_urb_cmd; + + if (es581_4_urb_cmd->cmd_type != ES581_4_CAN_COMMAND_TYPE) { + dev_err(dev, "%s: Unknown command type (0x%02X)\n", + __func__, es581_4_urb_cmd->cmd_type); + return -EBADRQC; + } + + switch ((enum es581_4_cmd_id)es581_4_urb_cmd->cmd_id) { + case ES581_4_CMD_ID_SET_BITTIMING: + return es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, + ES58X_RET_TYPE_SET_BITTIMING); + + case ES581_4_CMD_ID_ENABLE_CHANNEL: + return es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, + ES58X_RET_TYPE_ENABLE_CHANNEL); + + case ES581_4_CMD_ID_TX_MSG: + return es581_4_tx_ack_msg(es58x_dev, es581_4_urb_cmd); + + case ES581_4_CMD_ID_RX_MSG: + return es581_4_dispatch_rx_cmd(es58x_dev, es581_4_urb_cmd); + + case ES581_4_CMD_ID_RESET_RX: + ret = es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, + ES58X_RET_TYPE_RESET_RX); + return ret; + + case ES581_4_CMD_ID_RESET_TX: + ret = es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, + ES58X_RET_TYPE_RESET_TX); + return ret; + + case ES581_4_CMD_ID_DISABLE_CHANNEL: + return es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, + ES58X_RET_TYPE_DISABLE_CHANNEL); + + case ES581_4_CMD_ID_TIMESTAMP: + ret = es58x_check_msg_len(dev, es581_4_urb_cmd->timestamp, + msg_len); + if (ret < 0) + return ret; + es58x_rx_timestamp(es58x_dev, + get_unaligned_le64(&es581_4_urb_cmd->timestamp)); + return 0; + + case ES581_4_CMD_ID_ECHO: + return es581_4_echo_msg(es58x_dev, es581_4_urb_cmd); + + case ES581_4_CMD_ID_DEVICE_ERR: + ret = es58x_check_msg_len(dev, es581_4_urb_cmd->rx_cmd_ret_u8, + msg_len); + if (ret) + return ret; + return es58x_rx_cmd_ret_u8(dev, ES58X_RET_TYPE_DEVICE_ERR, + es581_4_urb_cmd->rx_cmd_ret_u8); + + default: + dev_warn(dev, "%s: Unexpected command ID: 0x%02X\n", + __func__, es581_4_urb_cmd->cmd_id); + return -EBADRQC; + } +} + +static void es581_4_fill_urb_header(union es58x_urb_cmd *urb_cmd, u8 cmd_type, + u8 cmd_id, u8 channel_idx, u16 msg_len) +{ + struct es581_4_urb_cmd *es581_4_urb_cmd = &urb_cmd->es581_4_urb_cmd; + + es581_4_urb_cmd->SOF = cpu_to_le16(es581_4_param.tx_start_of_frame); + es581_4_urb_cmd->cmd_type = cmd_type; + es581_4_urb_cmd->cmd_id = cmd_id; + es581_4_urb_cmd->msg_len = cpu_to_le16(msg_len); +} + +static int es581_4_tx_can_msg(struct es58x_priv *priv, + const struct sk_buff *skb) +{ + struct es58x_device *es58x_dev = priv->es58x_dev; + union es58x_urb_cmd *urb_cmd = priv->tx_urb->transfer_buffer; + struct es581_4_urb_cmd *es581_4_urb_cmd = &urb_cmd->es581_4_urb_cmd; + struct can_frame *cf = (struct can_frame *)skb->data; + struct es581_4_tx_can_msg *tx_can_msg; + u16 msg_len; + int ret; + + if (can_is_canfd_skb(skb)) + return -EMSGSIZE; + + if (priv->tx_can_msg_cnt == 0) { + msg_len = 1; /* struct es581_4_bulk_tx_can_msg:num_can_msg */ + es581_4_fill_urb_header(urb_cmd, ES581_4_CAN_COMMAND_TYPE, + ES581_4_CMD_ID_TX_MSG, + priv->channel_idx, msg_len); + es581_4_urb_cmd->bulk_tx_can_msg.num_can_msg = 0; + } else { + msg_len = es581_4_get_msg_len(urb_cmd); + } + + ret = es58x_check_msg_max_len(es58x_dev->dev, + es581_4_urb_cmd->bulk_tx_can_msg, + msg_len + sizeof(*tx_can_msg)); + if (ret) + return ret; + + /* Fill message contents. */ + tx_can_msg = (struct es581_4_tx_can_msg *) + &es581_4_urb_cmd->bulk_tx_can_msg.tx_can_msg_buf[msg_len - 1]; + put_unaligned_le32(es58x_get_raw_can_id(cf), &tx_can_msg->can_id); + put_unaligned_le32(priv->tx_head, &tx_can_msg->packet_idx); + put_unaligned_le16((u16)es58x_get_flags(skb), &tx_can_msg->flags); + tx_can_msg->channel_no = priv->channel_idx + ES581_4_CHANNEL_IDX_OFFSET; + tx_can_msg->dlc = can_get_cc_dlc(cf, priv->can.ctrlmode); + + memcpy(tx_can_msg->data, cf->data, cf->len); + + /* Calculate new sizes. */ + es581_4_urb_cmd->bulk_tx_can_msg.num_can_msg++; + msg_len += es581_4_sizeof_rx_tx_msg(*tx_can_msg); + priv->tx_urb->transfer_buffer_length = es58x_get_urb_cmd_len(es58x_dev, + msg_len); + es581_4_urb_cmd->msg_len = cpu_to_le16(msg_len); + + return 0; +} + +static int es581_4_set_bittiming(struct es58x_priv *priv) +{ + struct es581_4_tx_conf_msg tx_conf_msg = { 0 }; + struct can_bittiming *bt = &priv->can.bittiming; + + tx_conf_msg.bitrate = cpu_to_le32(bt->bitrate); + /* bt->sample_point is in tenth of percent. Convert it to percent. */ + tx_conf_msg.sample_point = cpu_to_le32(bt->sample_point / 10U); + tx_conf_msg.samples_per_bit = cpu_to_le32(ES58X_SAMPLES_PER_BIT_ONE); + tx_conf_msg.bit_time = cpu_to_le32(can_bit_time(bt)); + tx_conf_msg.sjw = cpu_to_le32(bt->sjw); + tx_conf_msg.sync_edge = cpu_to_le32(ES58X_SYNC_EDGE_SINGLE); + tx_conf_msg.physical_layer = + cpu_to_le32(ES58X_PHYSICAL_LAYER_HIGH_SPEED); + tx_conf_msg.echo_mode = cpu_to_le32(ES58X_ECHO_ON); + tx_conf_msg.channel_no = priv->channel_idx + ES581_4_CHANNEL_IDX_OFFSET; + + return es58x_send_msg(priv->es58x_dev, ES581_4_CAN_COMMAND_TYPE, + ES581_4_CMD_ID_SET_BITTIMING, &tx_conf_msg, + sizeof(tx_conf_msg), priv->channel_idx); +} + +static int es581_4_enable_channel(struct es58x_priv *priv) +{ + int ret; + u8 msg = priv->channel_idx + ES581_4_CHANNEL_IDX_OFFSET; + + ret = es581_4_set_bittiming(priv); + if (ret) + return ret; + + return es58x_send_msg(priv->es58x_dev, ES581_4_CAN_COMMAND_TYPE, + ES581_4_CMD_ID_ENABLE_CHANNEL, &msg, sizeof(msg), + priv->channel_idx); +} + +static int es581_4_disable_channel(struct es58x_priv *priv) +{ + u8 msg = priv->channel_idx + ES581_4_CHANNEL_IDX_OFFSET; + + return es58x_send_msg(priv->es58x_dev, ES581_4_CAN_COMMAND_TYPE, + ES581_4_CMD_ID_DISABLE_CHANNEL, &msg, sizeof(msg), + priv->channel_idx); +} + +static int es581_4_reset_device(struct es58x_device *es58x_dev) +{ + return es58x_send_msg(es58x_dev, ES581_4_CAN_COMMAND_TYPE, + ES581_4_CMD_ID_RESET_DEVICE, + ES58X_EMPTY_MSG, 0, ES58X_CHANNEL_IDX_NA); +} + +static int es581_4_get_timestamp(struct es58x_device *es58x_dev) +{ + return es58x_send_msg(es58x_dev, ES581_4_CAN_COMMAND_TYPE, + ES581_4_CMD_ID_TIMESTAMP, + ES58X_EMPTY_MSG, 0, ES58X_CHANNEL_IDX_NA); +} + +/* Nominal bittiming constants for ES581.4 as specified in the + * microcontroller datasheet: "Stellaris(R) LM3S5B91 Microcontroller" + * table 17-4 "CAN Protocol Ranges" from Texas Instruments. + */ +static const struct can_bittiming_const es581_4_bittiming_const = { + .name = "ES581.4", + .tseg1_min = 1, + .tseg1_max = 8, + .tseg2_min = 1, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 128, + .brp_inc = 1 +}; + +const struct es58x_parameters es581_4_param = { + .bittiming_const = &es581_4_bittiming_const, + .data_bittiming_const = NULL, + .tdc_const = NULL, + .bitrate_max = 1 * CAN_MBPS, + .clock = {.freq = 50 * CAN_MHZ}, + .ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC, + .tx_start_of_frame = 0xAFAF, + .rx_start_of_frame = 0xFAFA, + .tx_urb_cmd_max_len = ES581_4_TX_URB_CMD_MAX_LEN, + .rx_urb_cmd_max_len = ES581_4_RX_URB_CMD_MAX_LEN, + /* Size of internal device TX queue is 330. + * + * However, we witnessed some ES58X_ERR_PROT_CRC errors from + * the device and thus, echo_skb_max was lowered to the + * empirical value of 75 which seems stable and then rounded + * down to become a power of two. + * + * Root cause of those ES58X_ERR_PROT_CRC errors is still + * unclear. + */ + .fifo_mask = 63, /* echo_skb_max = 64 */ + .dql_min_limit = CAN_FRAME_LEN_MAX * 50, /* Empirical value. */ + .tx_bulk_max = ES581_4_TX_BULK_MAX, + .urb_cmd_header_len = ES581_4_URB_CMD_HEADER_LEN, + .rx_urb_max = ES58X_RX_URBS_MAX, + .tx_urb_max = ES58X_TX_URBS_MAX +}; + +const struct es58x_operators es581_4_ops = { + .get_msg_len = es581_4_get_msg_len, + .handle_urb_cmd = es581_4_handle_urb_cmd, + .fill_urb_header = es581_4_fill_urb_header, + .tx_can_msg = es581_4_tx_can_msg, + .enable_channel = es581_4_enable_channel, + .disable_channel = es581_4_disable_channel, + .reset_device = es581_4_reset_device, + .get_timestamp = es581_4_get_timestamp +}; diff --git a/drivers/net/can/usb/etas_es58x/es581_4.h b/drivers/net/can/usb/etas_es58x/es581_4.h new file mode 100644 index 000000000000..4bc60a6df697 --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es581_4.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. + * + * File es581_4.h: Definitions and declarations specific to ETAS + * ES581.4. + * + * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved. + * Copyright (c) 2020 ETAS K.K.. All rights reserved. + * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr> + */ + +#ifndef __ES581_4_H__ +#define __ES581_4_H__ + +#include <linux/types.h> + +#define ES581_4_NUM_CAN_CH 2 +#define ES581_4_CHANNEL_IDX_OFFSET 1 + +#define ES581_4_TX_BULK_MAX 25 +#define ES581_4_RX_BULK_MAX 30 +#define ES581_4_ECHO_BULK_MAX 30 + +enum es581_4_cmd_type { + ES581_4_CAN_COMMAND_TYPE = 0x45 +}; + +enum es581_4_cmd_id { + ES581_4_CMD_ID_OPEN_CHANNEL = 0x01, + ES581_4_CMD_ID_CLOSE_CHANNEL = 0x02, + ES581_4_CMD_ID_SET_BITTIMING = 0x03, + ES581_4_CMD_ID_ENABLE_CHANNEL = 0x04, + ES581_4_CMD_ID_TX_MSG = 0x05, + ES581_4_CMD_ID_RX_MSG = 0x06, + ES581_4_CMD_ID_RESET_RX = 0x0A, + ES581_4_CMD_ID_RESET_TX = 0x0B, + ES581_4_CMD_ID_DISABLE_CHANNEL = 0x0C, + ES581_4_CMD_ID_TIMESTAMP = 0x0E, + ES581_4_CMD_ID_RESET_DEVICE = 0x28, + ES581_4_CMD_ID_ECHO = 0x71, + ES581_4_CMD_ID_DEVICE_ERR = 0x72 +}; + +enum es581_4_rx_type { + ES581_4_RX_TYPE_MESSAGE = 1, + ES581_4_RX_TYPE_ERROR = 3, + ES581_4_RX_TYPE_EVENT = 4 +}; + +/** + * struct es581_4_tx_conf_msg - Channel configuration. + * @bitrate: Bitrate. + * @sample_point: Sample point is in percent [0..100]. + * @samples_per_bit: type enum es58x_samples_per_bit. + * @bit_time: Number of time quanta in one bit. + * @sjw: Synchronization Jump Width. + * @sync_edge: type enum es58x_sync_edge. + * @physical_layer: type enum es58x_physical_layer. + * @echo_mode: type enum es58x_echo_mode. + * @channel_no: Channel number, starting from 1. Not to be confused + * with channed_idx of the ES58X FD which starts from 0. + */ +struct es581_4_tx_conf_msg { + __le32 bitrate; + __le32 sample_point; + __le32 samples_per_bit; + __le32 bit_time; + __le32 sjw; + __le32 sync_edge; + __le32 physical_layer; + __le32 echo_mode; + u8 channel_no; +} __packed; + +struct es581_4_tx_can_msg { + __le32 can_id; + __le32 packet_idx; + __le16 flags; + u8 channel_no; + u8 dlc; + u8 data[CAN_MAX_DLEN]; +} __packed; + +/* The ES581.4 allows bulk transfer. */ +struct es581_4_bulk_tx_can_msg { + u8 num_can_msg; + /* Using type "u8[]" instead of "struct es581_4_tx_can_msg[]" + * for tx_msg_buf because each member has a flexible size. + */ + u8 tx_can_msg_buf[ES581_4_TX_BULK_MAX * + sizeof(struct es581_4_tx_can_msg)]; +} __packed; + +struct es581_4_echo_msg { + __le64 timestamp; + __le32 packet_idx; +} __packed; + +struct es581_4_bulk_echo_msg { + u8 channel_no; + struct es581_4_echo_msg echo_msg[ES581_4_ECHO_BULK_MAX]; +} __packed; + +/* Normal Rx CAN Message */ +struct es581_4_rx_can_msg { + __le64 timestamp; + u8 rx_type; /* type enum es581_4_rx_type */ + u8 flags; /* type enum es58x_flag */ + u8 channel_no; + u8 dlc; + __le32 can_id; + u8 data[CAN_MAX_DLEN]; +} __packed; + +struct es581_4_rx_err_msg { + __le64 timestamp; + __le16 rx_type; /* type enum es581_4_rx_type */ + __le16 flags; /* type enum es58x_flag */ + u8 channel_no; + u8 __padding[2]; + u8 dlc; + __le32 tag; /* Related to the CAN filtering. Unused in this module */ + __le32 can_id; + __le32 error; /* type enum es58x_error */ + __le32 destination; /* Unused in this module */ +} __packed; + +struct es581_4_rx_event_msg { + __le64 timestamp; + __le16 rx_type; /* type enum es581_4_rx_type */ + u8 channel_no; + u8 __padding; + __le32 tag; /* Related to the CAN filtering. Unused in this module */ + __le32 event; /* type enum es58x_event */ + __le32 destination; /* Unused in this module */ +} __packed; + +struct es581_4_tx_ack_msg { + __le16 tx_free_entries; /* Number of remaining free entries in the device TX queue */ + u8 channel_no; + u8 rx_cmd_ret_u8; /* type enum es58x_cmd_ret_code_u8 */ +} __packed; + +struct es581_4_rx_cmd_ret { + __le32 rx_cmd_ret_le32; + u8 channel_no; + u8 __padding[3]; +} __packed; + +/** + * struct es581_4_urb_cmd - Commands received from or sent to the + * ES581.4 device. + * @SOF: Start of Frame. + * @cmd_type: Command Type (type: enum es581_4_cmd_type). The CRC + * calculation starts at this position. + * @cmd_id: Command ID (type: enum es581_4_cmd_id). + * @msg_len: Length of the message, excluding CRC (i.e. length of the + * union). + * @tx_conf_msg: Channel configuration. + * @bulk_tx_can_msg: Tx messages. + * @rx_can_msg: Array of Rx messages. + * @bulk_echo_msg: Tx message being looped back. + * @rx_err_msg: Error message. + * @rx_event_msg: Event message. + * @tx_ack_msg: Tx acknowledgment message. + * @rx_cmd_ret: Command return code. + * @timestamp: Timestamp reply. + * @rx_cmd_ret_u8: Rx 8 bits return code (type: enum + * es58x_cmd_ret_code_u8). + * @raw_msg: Message raw payload. + * @reserved_for_crc16_do_not_use: The structure ends with a + * CRC16. Because the structures in above union are of variable + * lengths, we can not predict the offset of the CRC in + * advance. Use functions es58x_get_crc() and es58x_set_crc() to + * manipulate it. + */ +struct es581_4_urb_cmd { + __le16 SOF; + u8 cmd_type; + u8 cmd_id; + __le16 msg_len; + + union { + struct es581_4_tx_conf_msg tx_conf_msg; + struct es581_4_bulk_tx_can_msg bulk_tx_can_msg; + struct es581_4_rx_can_msg rx_can_msg[ES581_4_RX_BULK_MAX]; + struct es581_4_bulk_echo_msg bulk_echo_msg; + struct es581_4_rx_err_msg rx_err_msg; + struct es581_4_rx_event_msg rx_event_msg; + struct es581_4_tx_ack_msg tx_ack_msg; + struct es581_4_rx_cmd_ret rx_cmd_ret; + __le64 timestamp; + u8 rx_cmd_ret_u8; + u8 raw_msg[0]; + } __packed; + + __le16 reserved_for_crc16_do_not_use; +} __packed; + +#define ES581_4_URB_CMD_HEADER_LEN (offsetof(struct es581_4_urb_cmd, raw_msg)) +#define ES581_4_TX_URB_CMD_MAX_LEN \ + ES58X_SIZEOF_URB_CMD(struct es581_4_urb_cmd, bulk_tx_can_msg) +#define ES581_4_RX_URB_CMD_MAX_LEN \ + ES58X_SIZEOF_URB_CMD(struct es581_4_urb_cmd, rx_can_msg) + +#endif /* __ES581_4_H__ */ diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c new file mode 100644 index 000000000000..8e9102482c52 --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es58x_core.c @@ -0,0 +1,2301 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. + * + * File es58x_core.c: Core logic to manage the network devices and the + * USB interface. + * + * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved. + * Copyright (c) 2020 ETAS K.K.. All rights reserved. + * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/usb.h> +#include <linux/crc16.h> +#include <asm/unaligned.h> + +#include "es58x_core.h" + +#define DRV_VERSION "1.00" +MODULE_AUTHOR("Mailhol Vincent <mailhol.vincent@wanadoo.fr>"); +MODULE_AUTHOR("Arunachalam Santhanam <arunachalam.santhanam@in.bosch.com>"); +MODULE_DESCRIPTION("Socket CAN driver for ETAS ES58X USB adapters"); +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("GPL v2"); + +#define ES58X_MODULE_NAME "etas_es58x" +#define ES58X_VENDOR_ID 0x108C +#define ES581_4_PRODUCT_ID 0x0159 +#define ES582_1_PRODUCT_ID 0x0168 +#define ES584_1_PRODUCT_ID 0x0169 + +/* ES58X FD has some interface protocols unsupported by this driver. */ +#define ES58X_FD_INTERFACE_PROTOCOL 0 + +/* Table of devices which work with this driver. */ +static const struct usb_device_id es58x_id_table[] = { + { + /* ETAS GmbH ES581.4 USB dual-channel CAN Bus Interface module. */ + USB_DEVICE(ES58X_VENDOR_ID, ES581_4_PRODUCT_ID), + .driver_info = ES58X_DUAL_CHANNEL + }, { + /* ETAS GmbH ES582.1 USB dual-channel CAN FD Bus Interface module. */ + USB_DEVICE_INTERFACE_PROTOCOL(ES58X_VENDOR_ID, ES582_1_PRODUCT_ID, + ES58X_FD_INTERFACE_PROTOCOL), + .driver_info = ES58X_DUAL_CHANNEL | ES58X_FD_FAMILY + }, { + /* ETAS GmbH ES584.1 USB single-channel CAN FD Bus Interface module. */ + USB_DEVICE_INTERFACE_PROTOCOL(ES58X_VENDOR_ID, ES584_1_PRODUCT_ID, + ES58X_FD_INTERFACE_PROTOCOL), + .driver_info = ES58X_FD_FAMILY + }, { + /* Terminating entry */ + } +}; + +MODULE_DEVICE_TABLE(usb, es58x_id_table); + +#define es58x_print_hex_dump(buf, len) \ + print_hex_dump(KERN_DEBUG, \ + ES58X_MODULE_NAME " " __stringify(buf) ": ", \ + DUMP_PREFIX_NONE, 16, 1, buf, len, false) + +#define es58x_print_hex_dump_debug(buf, len) \ + print_hex_dump_debug(ES58X_MODULE_NAME " " __stringify(buf) ": ",\ + DUMP_PREFIX_NONE, 16, 1, buf, len, false) + +/* The last two bytes of an ES58X command is a CRC16. The first two + * bytes (the start of frame) are skipped and the CRC calculation + * starts on the third byte. + */ +#define ES58X_CRC_CALC_OFFSET 2 + +/** + * es58x_calculate_crc() - Compute the crc16 of a given URB. + * @urb_cmd: The URB command for which we want to calculate the CRC. + * @urb_len: Length of @urb_cmd. Must be at least bigger than 4 + * (ES58X_CRC_CALC_OFFSET + sizeof(crc)) + * + * Return: crc16 value. + */ +static u16 es58x_calculate_crc(const union es58x_urb_cmd *urb_cmd, u16 urb_len) +{ + u16 crc; + ssize_t len = urb_len - ES58X_CRC_CALC_OFFSET - sizeof(crc); + + crc = crc16(0, &urb_cmd->raw_cmd[ES58X_CRC_CALC_OFFSET], len); + return crc; +} + +/** + * es58x_get_crc() - Get the CRC value of a given URB. + * @urb_cmd: The URB command for which we want to get the CRC. + * @urb_len: Length of @urb_cmd. Must be at least bigger than 4 + * (ES58X_CRC_CALC_OFFSET + sizeof(crc)) + * + * Return: crc16 value. + */ +static u16 es58x_get_crc(const union es58x_urb_cmd *urb_cmd, u16 urb_len) +{ + u16 crc; + const __le16 *crc_addr; + + crc_addr = (__le16 *)&urb_cmd->raw_cmd[urb_len - sizeof(crc)]; + crc = get_unaligned_le16(crc_addr); + return crc; +} + +/** + * es58x_set_crc() - Set the CRC value of a given URB. + * @urb_cmd: The URB command for which we want to get the CRC. + * @urb_len: Length of @urb_cmd. Must be at least bigger than 4 + * (ES58X_CRC_CALC_OFFSET + sizeof(crc)) + */ +static void es58x_set_crc(union es58x_urb_cmd *urb_cmd, u16 urb_len) +{ + u16 crc; + __le16 *crc_addr; + + crc = es58x_calculate_crc(urb_cmd, urb_len); + crc_addr = (__le16 *)&urb_cmd->raw_cmd[urb_len - sizeof(crc)]; + put_unaligned_le16(crc, crc_addr); +} + +/** + * es58x_check_crc() - Validate the CRC value of a given URB. + * @es58x_dev: ES58X device. + * @urb_cmd: The URB command for which we want to check the CRC. + * @urb_len: Length of @urb_cmd. Must be at least bigger than 4 + * (ES58X_CRC_CALC_OFFSET + sizeof(crc)) + * + * Return: zero on success, -EBADMSG if the CRC check fails. + */ +static int es58x_check_crc(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd, u16 urb_len) +{ + u16 calculated_crc = es58x_calculate_crc(urb_cmd, urb_len); + u16 expected_crc = es58x_get_crc(urb_cmd, urb_len); + + if (expected_crc != calculated_crc) { + dev_err_ratelimited(es58x_dev->dev, + "%s: Bad CRC, urb_len: %d\n", + __func__, urb_len); + return -EBADMSG; + } + + return 0; +} + +/** + * es58x_timestamp_to_ns() - Convert a timestamp value received from a + * ES58X device to nanoseconds. + * @timestamp: Timestamp received from a ES58X device. + * + * The timestamp received from ES58X is expressed in multiples of 0.5 + * micro seconds. This function converts it in to nanoseconds. + * + * Return: Timestamp value in nanoseconds. + */ +static u64 es58x_timestamp_to_ns(u64 timestamp) +{ + const u64 es58x_timestamp_ns_mult_coef = 500ULL; + + return es58x_timestamp_ns_mult_coef * timestamp; +} + +/** + * es58x_set_skb_timestamp() - Set the hardware timestamp of an skb. + * @netdev: CAN network device. + * @skb: socket buffer of a CAN message. + * @timestamp: Timestamp received from an ES58X device. + * + * Used for both received and echo messages. + */ +static void es58x_set_skb_timestamp(struct net_device *netdev, + struct sk_buff *skb, u64 timestamp) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + struct skb_shared_hwtstamps *hwts; + + hwts = skb_hwtstamps(skb); + /* Ignoring overflow (overflow on 64 bits timestamp with nano + * second precision would occur after more than 500 years). + */ + hwts->hwtstamp = ns_to_ktime(es58x_timestamp_to_ns(timestamp) + + es58x_dev->realtime_diff_ns); +} + +/** + * es58x_rx_timestamp() - Handle a received timestamp. + * @es58x_dev: ES58X device. + * @timestamp: Timestamp received from a ES58X device. + * + * Calculate the difference between the ES58X device and the kernel + * internal clocks. This difference will be later used as an offset to + * convert the timestamps of RX and echo messages to match the kernel + * system time (e.g. convert to UNIX time). + */ +void es58x_rx_timestamp(struct es58x_device *es58x_dev, u64 timestamp) +{ + u64 ktime_real_ns = ktime_get_real_ns(); + u64 device_timestamp = es58x_timestamp_to_ns(timestamp); + + dev_dbg(es58x_dev->dev, "%s: request round-trip time: %llu ns\n", + __func__, ktime_real_ns - es58x_dev->ktime_req_ns); + + es58x_dev->realtime_diff_ns = + (es58x_dev->ktime_req_ns + ktime_real_ns) / 2 - device_timestamp; + es58x_dev->ktime_req_ns = 0; + + dev_dbg(es58x_dev->dev, + "%s: Device timestamp: %llu, diff with kernel: %llu\n", + __func__, device_timestamp, es58x_dev->realtime_diff_ns); +} + +/** + * es58x_set_realtime_diff_ns() - Calculate difference between the + * clocks of the ES58X device and the kernel + * @es58x_dev: ES58X device. + * + * Request a timestamp from the ES58X device. Once the answer is + * received, the timestamp difference will be set by the callback + * function es58x_rx_timestamp(). + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_set_realtime_diff_ns(struct es58x_device *es58x_dev) +{ + if (es58x_dev->ktime_req_ns) { + dev_warn(es58x_dev->dev, + "%s: Previous request to set timestamp has not completed yet\n", + __func__); + return -EBUSY; + } + + es58x_dev->ktime_req_ns = ktime_get_real_ns(); + return es58x_dev->ops->get_timestamp(es58x_dev); +} + +/** + * es58x_is_can_state_active() - Is the network device in an active + * CAN state? + * @netdev: CAN network device. + * + * The device is considered active if it is able to send or receive + * CAN frames, that is to say if it is in any of + * CAN_STATE_ERROR_ACTIVE, CAN_STATE_ERROR_WARNING or + * CAN_STATE_ERROR_PASSIVE states. + * + * Caution: when recovering from a bus-off, + * net/core/dev.c#can_restart() will call + * net/core/dev.c#can_flush_echo_skb() without using any kind of + * locks. For this reason, it is critical to guarantee that no TX or + * echo operations (i.e. any access to priv->echo_skb[]) can be done + * while this function is returning false. + * + * Return: true if the device is active, else returns false. + */ +static bool es58x_is_can_state_active(struct net_device *netdev) +{ + return es58x_priv(netdev)->can.state < CAN_STATE_BUS_OFF; +} + +/** + * es58x_is_echo_skb_threshold_reached() - Determine the limit of how + * many skb slots can be taken before we should stop the network + * queue. + * @priv: ES58X private parameters related to the network device. + * + * We need to save enough free skb slots in order to be able to do + * bulk send. This function can be used to determine when to wake or + * stop the network queue in regard to the number of skb slots already + * taken if the echo FIFO. + * + * Return: boolean. + */ +static bool es58x_is_echo_skb_threshold_reached(struct es58x_priv *priv) +{ + u32 num_echo_skb = priv->tx_head - priv->tx_tail; + u32 threshold = priv->can.echo_skb_max - + priv->es58x_dev->param->tx_bulk_max + 1; + + return num_echo_skb >= threshold; +} + +/** + * es58x_can_free_echo_skb_tail() - Remove the oldest echo skb of the + * echo FIFO. + * @netdev: CAN network device. + * + * Naming convention: the tail is the beginning of the FIFO, i.e. the + * first skb to have entered the FIFO. + */ +static void es58x_can_free_echo_skb_tail(struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + u16 fifo_mask = priv->es58x_dev->param->fifo_mask; + unsigned int frame_len = 0; + + can_free_echo_skb(netdev, priv->tx_tail & fifo_mask, &frame_len); + netdev_completed_queue(netdev, 1, frame_len); + + priv->tx_tail++; + + netdev->stats.tx_dropped++; +} + +/** + * es58x_can_get_echo_skb_recovery() - Try to re-sync the echo FIFO. + * @netdev: CAN network device. + * @rcv_packet_idx: Index + * + * This function should not be called under normal circumstances. In + * the unlikely case that one or several URB packages get dropped by + * the device, the index will get out of sync. Try to recover by + * dropping the echo skb packets with older indexes. + * + * Return: zero if recovery was successful, -EINVAL otherwise. + */ +static int es58x_can_get_echo_skb_recovery(struct net_device *netdev, + u32 rcv_packet_idx) +{ + struct es58x_priv *priv = es58x_priv(netdev); + int ret = 0; + + netdev->stats.tx_errors++; + + if (net_ratelimit()) + netdev_warn(netdev, + "Bad echo packet index: %u. First index: %u, end index %u, num_echo_skb: %02u/%02u\n", + rcv_packet_idx, priv->tx_tail, priv->tx_head, + priv->tx_head - priv->tx_tail, + priv->can.echo_skb_max); + + if ((s32)(rcv_packet_idx - priv->tx_tail) < 0) { + if (net_ratelimit()) + netdev_warn(netdev, + "Received echo index is from the past. Ignoring it\n"); + ret = -EINVAL; + } else if ((s32)(rcv_packet_idx - priv->tx_head) >= 0) { + if (net_ratelimit()) + netdev_err(netdev, + "Received echo index is from the future. Ignoring it\n"); + ret = -EINVAL; + } else { + if (net_ratelimit()) + netdev_warn(netdev, + "Recovery: dropping %u echo skb from index %u to %u\n", + rcv_packet_idx - priv->tx_tail, + priv->tx_tail, rcv_packet_idx - 1); + while (priv->tx_tail != rcv_packet_idx) { + if (priv->tx_tail == priv->tx_head) + return -EINVAL; + es58x_can_free_echo_skb_tail(netdev); + } + } + return ret; +} + +/** + * es58x_can_get_echo_skb() - Get the skb from the echo FIFO and loop + * it back locally. + * @netdev: CAN network device. + * @rcv_packet_idx: Index of the first packet received from the device. + * @tstamps: Array of hardware timestamps received from a ES58X device. + * @pkts: Number of packets (and so, length of @tstamps). + * + * Callback function for when we receive a self reception + * acknowledgment. Retrieves the skb from the echo FIFO, sets its + * hardware timestamp (the actual time it was sent) and loops it back + * locally. + * + * The device has to be active (i.e. network interface UP and not in + * bus off state or restarting). + * + * Packet indexes must be consecutive (i.e. index of first packet is + * @rcv_packet_idx, index of second packet is @rcv_packet_idx + 1 and + * index of last packet is @rcv_packet_idx + @pkts - 1). + * + * Return: zero on success. + */ +int es58x_can_get_echo_skb(struct net_device *netdev, u32 rcv_packet_idx, + u64 *tstamps, unsigned int pkts) +{ + struct es58x_priv *priv = es58x_priv(netdev); + unsigned int rx_total_frame_len = 0; + unsigned int num_echo_skb = priv->tx_head - priv->tx_tail; + int i; + u16 fifo_mask = priv->es58x_dev->param->fifo_mask; + + if (!netif_running(netdev)) { + if (net_ratelimit()) + netdev_info(netdev, + "%s: %s is down, dropping %d echo packets\n", + __func__, netdev->name, pkts); + netdev->stats.tx_dropped += pkts; + return 0; + } else if (!es58x_is_can_state_active(netdev)) { + if (net_ratelimit()) + netdev_dbg(netdev, + "Bus is off or device is restarting. Ignoring %u echo packets from index %u\n", + pkts, rcv_packet_idx); + /* stats.tx_dropped will be (or was already) + * incremented by + * drivers/net/can/net/dev.c:can_flush_echo_skb(). + */ + return 0; + } else if (num_echo_skb == 0) { + if (net_ratelimit()) + netdev_warn(netdev, + "Received %u echo packets from index: %u but echo skb queue is empty.\n", + pkts, rcv_packet_idx); + netdev->stats.tx_dropped += pkts; + return 0; + } + + if (priv->tx_tail != rcv_packet_idx) { + if (es58x_can_get_echo_skb_recovery(netdev, rcv_packet_idx) < 0) { + if (net_ratelimit()) + netdev_warn(netdev, + "Could not find echo skb for echo packet index: %u\n", + rcv_packet_idx); + return 0; + } + } + if (num_echo_skb < pkts) { + int pkts_drop = pkts - num_echo_skb; + + if (net_ratelimit()) + netdev_err(netdev, + "Received %u echo packets but have only %d echo skb. Dropping %d echo skb\n", + pkts, num_echo_skb, pkts_drop); + netdev->stats.tx_dropped += pkts_drop; + pkts -= pkts_drop; + } + + for (i = 0; i < pkts; i++) { + unsigned int skb_idx = priv->tx_tail & fifo_mask; + struct sk_buff *skb = priv->can.echo_skb[skb_idx]; + unsigned int frame_len = 0; + + if (skb) + es58x_set_skb_timestamp(netdev, skb, tstamps[i]); + + netdev->stats.tx_bytes += can_get_echo_skb(netdev, skb_idx, + &frame_len); + rx_total_frame_len += frame_len; + + priv->tx_tail++; + } + + netdev_completed_queue(netdev, pkts, rx_total_frame_len); + netdev->stats.tx_packets += pkts; + + priv->err_passive_before_rtx_success = 0; + if (!es58x_is_echo_skb_threshold_reached(priv)) + netif_wake_queue(netdev); + + return 0; +} + +/** + * es58x_can_reset_echo_fifo() - Reset the echo FIFO. + * @netdev: CAN network device. + * + * The echo_skb array of struct can_priv will be flushed by + * drivers/net/can/dev.c:can_flush_echo_skb(). This function resets + * the parameters of the struct es58x_priv of our device and reset the + * queue (c.f. BQL). + */ +static void es58x_can_reset_echo_fifo(struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + + priv->tx_tail = 0; + priv->tx_head = 0; + priv->tx_urb = NULL; + priv->err_passive_before_rtx_success = 0; + netdev_reset_queue(netdev); +} + +/** + * es58x_flush_pending_tx_msg() - Reset the buffer for transmission messages. + * @netdev: CAN network device. + * + * es58x_start_xmit() will queue up to tx_bulk_max messages in + * &tx_urb buffer and do a bulk send of all messages in one single URB + * (c.f. xmit_more flag). When the device recovers from a bus off + * state or when the device stops, the tx_urb buffer might still have + * pending messages in it and thus need to be flushed. + */ +static void es58x_flush_pending_tx_msg(struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + struct es58x_device *es58x_dev = priv->es58x_dev; + + if (priv->tx_urb) { + netdev_warn(netdev, "%s: dropping %d TX messages\n", + __func__, priv->tx_can_msg_cnt); + netdev->stats.tx_dropped += priv->tx_can_msg_cnt; + while (priv->tx_can_msg_cnt > 0) { + unsigned int frame_len = 0; + u16 fifo_mask = priv->es58x_dev->param->fifo_mask; + + priv->tx_head--; + priv->tx_can_msg_cnt--; + can_free_echo_skb(netdev, priv->tx_head & fifo_mask, + &frame_len); + netdev_completed_queue(netdev, 1, frame_len); + } + usb_anchor_urb(priv->tx_urb, &priv->es58x_dev->tx_urbs_idle); + atomic_inc(&es58x_dev->tx_urbs_idle_cnt); + usb_free_urb(priv->tx_urb); + } + priv->tx_urb = NULL; +} + +/** + * es58x_tx_ack_msg() - Handle acknowledgment messages. + * @netdev: CAN network device. + * @tx_free_entries: Number of free entries in the device transmit FIFO. + * @rx_cmd_ret_u32: error code as returned by the ES58X device. + * + * ES58X sends an acknowledgment message after a transmission request + * is done. This is mandatory for the ES581.4 but is optional (and + * deactivated in this driver) for the ES58X_FD family. + * + * Under normal circumstances, this function should never throw an + * error message. + * + * Return: zero on success, errno when any error occurs. + */ +int es58x_tx_ack_msg(struct net_device *netdev, u16 tx_free_entries, + enum es58x_ret_u32 rx_cmd_ret_u32) +{ + struct es58x_priv *priv = es58x_priv(netdev); + + if (tx_free_entries <= priv->es58x_dev->param->tx_bulk_max) { + if (net_ratelimit()) + netdev_err(netdev, + "Only %d entries left in device queue, num_echo_skb: %d/%d\n", + tx_free_entries, + priv->tx_head - priv->tx_tail, + priv->can.echo_skb_max); + netif_stop_queue(netdev); + } + + return es58x_rx_cmd_ret_u32(netdev, ES58X_RET_TYPE_TX_MSG, + rx_cmd_ret_u32); +} + +/** + * es58x_rx_can_msg() - Handle a received a CAN message. + * @netdev: CAN network device. + * @timestamp: Hardware time stamp (only relevant in rx branches). + * @data: CAN payload. + * @can_id: CAN ID. + * @es58x_flags: Please refer to enum es58x_flag. + * @dlc: Data Length Code (raw value). + * + * Fill up a CAN skb and post it. + * + * This function handles the case where the DLC of a classical CAN + * frame is greater than CAN_MAX_DLEN (c.f. the len8_dlc field of + * struct can_frame). + * + * Return: zero on success. + */ +int es58x_rx_can_msg(struct net_device *netdev, u64 timestamp, const u8 *data, + canid_t can_id, enum es58x_flag es58x_flags, u8 dlc) +{ + struct canfd_frame *cfd; + struct can_frame *ccf; + struct sk_buff *skb; + u8 len; + bool is_can_fd = !!(es58x_flags & ES58X_FLAG_FD_DATA); + + if (dlc > CAN_MAX_RAW_DLC) { + netdev_err(netdev, + "%s: DLC is %d but maximum should be %d\n", + __func__, dlc, CAN_MAX_RAW_DLC); + return -EMSGSIZE; + } + + if (is_can_fd) { + len = can_fd_dlc2len(dlc); + skb = alloc_canfd_skb(netdev, &cfd); + } else { + len = can_cc_dlc2len(dlc); + skb = alloc_can_skb(netdev, &ccf); + cfd = (struct canfd_frame *)ccf; + } + if (!skb) { + netdev->stats.rx_dropped++; + return 0; + } + + cfd->can_id = can_id; + if (es58x_flags & ES58X_FLAG_EFF) + cfd->can_id |= CAN_EFF_FLAG; + if (is_can_fd) { + cfd->len = len; + if (es58x_flags & ES58X_FLAG_FD_BRS) + cfd->flags |= CANFD_BRS; + if (es58x_flags & ES58X_FLAG_FD_ESI) + cfd->flags |= CANFD_ESI; + } else { + can_frame_set_cc_len(ccf, dlc, es58x_priv(netdev)->can.ctrlmode); + if (es58x_flags & ES58X_FLAG_RTR) { + ccf->can_id |= CAN_RTR_FLAG; + len = 0; + } + } + memcpy(cfd->data, data, len); + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += len; + + es58x_set_skb_timestamp(netdev, skb, timestamp); + netif_rx(skb); + + es58x_priv(netdev)->err_passive_before_rtx_success = 0; + + return 0; +} + +/** + * es58x_rx_err_msg() - Handle a received CAN event or error message. + * @netdev: CAN network device. + * @error: Error code. + * @event: Event code. + * @timestamp: Timestamp received from a ES58X device. + * + * Handle the errors and events received by the ES58X device, create + * a CAN error skb and post it. + * + * In some rare cases the devices might get stuck alternating between + * CAN_STATE_ERROR_PASSIVE and CAN_STATE_ERROR_WARNING. To prevent + * this behavior, we force a bus off state if the device goes in + * CAN_STATE_ERROR_WARNING for ES58X_MAX_CONSECUTIVE_WARN consecutive + * times with no successful transmission or reception in between. + * + * Once the device is in bus off state, the only way to restart it is + * through the drivers/net/can/dev.c:can_restart() function. The + * device is technically capable to recover by itself under certain + * circumstances, however, allowing self recovery would create + * complex race conditions with drivers/net/can/dev.c:can_restart() + * and thus was not implemented. To activate automatic restart, please + * set the restart-ms parameter (e.g. ip link set can0 type can + * restart-ms 100). + * + * If the bus is really instable, this function would try to send a + * lot of log messages. Those are rate limited (i.e. you will see + * messages such as "net_ratelimit: XXX callbacks suppressed" in + * dmesg). + * + * Return: zero on success, errno when any error occurs. + */ +int es58x_rx_err_msg(struct net_device *netdev, enum es58x_err error, + enum es58x_event event, u64 timestamp) +{ + struct es58x_priv *priv = es58x_priv(netdev); + struct can_priv *can = netdev_priv(netdev); + struct can_device_stats *can_stats = &can->can_stats; + struct can_frame *cf = NULL; + struct sk_buff *skb; + int ret; + + if (!netif_running(netdev)) { + if (net_ratelimit()) + netdev_info(netdev, "%s: %s is down, dropping packet\n", + __func__, netdev->name); + netdev->stats.rx_dropped++; + return 0; + } + + if (error == ES58X_ERR_OK && event == ES58X_EVENT_OK) { + netdev_err(netdev, "%s: Both error and event are zero\n", + __func__); + return -EINVAL; + } + + skb = alloc_can_err_skb(netdev, &cf); + + switch (error) { + case ES58X_ERR_OK: /* 0: No error */ + break; + + case ES58X_ERR_PROT_STUFF: + if (net_ratelimit()) + netdev_dbg(netdev, "Error BITSTUFF\n"); + if (cf) + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + + case ES58X_ERR_PROT_FORM: + if (net_ratelimit()) + netdev_dbg(netdev, "Error FORMAT\n"); + if (cf) + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + + case ES58X_ERR_ACK: + if (net_ratelimit()) + netdev_dbg(netdev, "Error ACK\n"); + if (cf) + cf->can_id |= CAN_ERR_ACK; + break; + + case ES58X_ERR_PROT_BIT: + if (net_ratelimit()) + netdev_dbg(netdev, "Error BIT\n"); + if (cf) + cf->data[2] |= CAN_ERR_PROT_BIT; + break; + + case ES58X_ERR_PROT_CRC: + if (net_ratelimit()) + netdev_dbg(netdev, "Error CRC\n"); + if (cf) + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; + break; + + case ES58X_ERR_PROT_BIT1: + if (net_ratelimit()) + netdev_dbg(netdev, + "Error: expected a recessive bit but monitored a dominant one\n"); + if (cf) + cf->data[2] |= CAN_ERR_PROT_BIT1; + break; + + case ES58X_ERR_PROT_BIT0: + if (net_ratelimit()) + netdev_dbg(netdev, + "Error expected a dominant bit but monitored a recessive one\n"); + if (cf) + cf->data[2] |= CAN_ERR_PROT_BIT0; + break; + + case ES58X_ERR_PROT_OVERLOAD: + if (net_ratelimit()) + netdev_dbg(netdev, "Error OVERLOAD\n"); + if (cf) + cf->data[2] |= CAN_ERR_PROT_OVERLOAD; + break; + + case ES58X_ERR_PROT_UNSPEC: + if (net_ratelimit()) + netdev_dbg(netdev, "Unspecified error\n"); + if (cf) + cf->can_id |= CAN_ERR_PROT; + break; + + default: + if (net_ratelimit()) + netdev_err(netdev, + "%s: Unspecified error code 0x%04X\n", + __func__, (int)error); + if (cf) + cf->can_id |= CAN_ERR_PROT; + break; + } + + switch (event) { + case ES58X_EVENT_OK: /* 0: No event */ + break; + + case ES58X_EVENT_CRTL_ACTIVE: + if (can->state == CAN_STATE_BUS_OFF) { + netdev_err(netdev, + "%s: state transition: BUS OFF -> ACTIVE\n", + __func__); + } + if (net_ratelimit()) + netdev_dbg(netdev, "Event CAN BUS ACTIVE\n"); + if (cf) + cf->data[1] |= CAN_ERR_CRTL_ACTIVE; + can->state = CAN_STATE_ERROR_ACTIVE; + break; + + case ES58X_EVENT_CRTL_PASSIVE: + if (net_ratelimit()) + netdev_dbg(netdev, "Event CAN BUS PASSIVE\n"); + /* Either TX or RX error count reached passive state + * but we do not know which. Setting both flags by + * default. + */ + if (cf) { + cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; + cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; + } + if (can->state < CAN_STATE_BUS_OFF) + can->state = CAN_STATE_ERROR_PASSIVE; + can_stats->error_passive++; + if (priv->err_passive_before_rtx_success < U8_MAX) + priv->err_passive_before_rtx_success++; + break; + + case ES58X_EVENT_CRTL_WARNING: + if (net_ratelimit()) + netdev_dbg(netdev, "Event CAN BUS WARNING\n"); + /* Either TX or RX error count reached warning state + * but we do not know which. Setting both flags by + * default. + */ + if (cf) { + cf->data[1] |= CAN_ERR_CRTL_RX_WARNING; + cf->data[1] |= CAN_ERR_CRTL_TX_WARNING; + } + if (can->state < CAN_STATE_BUS_OFF) + can->state = CAN_STATE_ERROR_WARNING; + can_stats->error_warning++; + break; + + case ES58X_EVENT_BUSOFF: + if (net_ratelimit()) + netdev_dbg(netdev, "Event CAN BUS OFF\n"); + if (cf) + cf->can_id |= CAN_ERR_BUSOFF; + can_stats->bus_off++; + netif_stop_queue(netdev); + if (can->state != CAN_STATE_BUS_OFF) { + can->state = CAN_STATE_BUS_OFF; + can_bus_off(netdev); + ret = can->do_set_mode(netdev, CAN_MODE_STOP); + if (ret) + return ret; + } + break; + + case ES58X_EVENT_SINGLE_WIRE: + if (net_ratelimit()) + netdev_warn(netdev, + "Lost connection on either CAN high or CAN low\n"); + /* Lost connection on either CAN high or CAN + * low. Setting both flags by default. + */ + if (cf) { + cf->data[4] |= CAN_ERR_TRX_CANH_NO_WIRE; + cf->data[4] |= CAN_ERR_TRX_CANL_NO_WIRE; + } + break; + + default: + if (net_ratelimit()) + netdev_err(netdev, + "%s: Unspecified event code 0x%04X\n", + __func__, (int)event); + if (cf) + cf->can_id |= CAN_ERR_CRTL; + break; + } + + /* driver/net/can/dev.c:can_restart() takes in account error + * messages in the RX stats. Doing the same here for + * consistency. + */ + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += CAN_ERR_DLC; + + if (cf) { + if (cf->data[1]) + cf->can_id |= CAN_ERR_CRTL; + if (cf->data[2] || cf->data[3]) { + cf->can_id |= CAN_ERR_PROT; + can_stats->bus_error++; + } + if (cf->data[4]) + cf->can_id |= CAN_ERR_TRX; + + es58x_set_skb_timestamp(netdev, skb, timestamp); + netif_rx(skb); + } + + if ((event & ES58X_EVENT_CRTL_PASSIVE) && + priv->err_passive_before_rtx_success == ES58X_CONSECUTIVE_ERR_PASSIVE_MAX) { + netdev_info(netdev, + "Got %d consecutive warning events with no successful RX or TX. Forcing bus-off\n", + priv->err_passive_before_rtx_success); + return es58x_rx_err_msg(netdev, ES58X_ERR_OK, + ES58X_EVENT_BUSOFF, timestamp); + } + + return 0; +} + +/** + * es58x_cmd_ret_desc() - Convert a command type to a string. + * @cmd_ret_type: Type of the command which triggered the return code. + * + * The final line (return "<unknown>") should not be reached. If this + * is the case, there is an implementation bug. + * + * Return: a readable description of the @cmd_ret_type. + */ +static const char *es58x_cmd_ret_desc(enum es58x_ret_type cmd_ret_type) +{ + switch (cmd_ret_type) { + case ES58X_RET_TYPE_SET_BITTIMING: + return "Set bittiming"; + case ES58X_RET_TYPE_ENABLE_CHANNEL: + return "Enable channel"; + case ES58X_RET_TYPE_DISABLE_CHANNEL: + return "Disable channel"; + case ES58X_RET_TYPE_TX_MSG: + return "Transmit message"; + case ES58X_RET_TYPE_RESET_RX: + return "Reset RX"; + case ES58X_RET_TYPE_RESET_TX: + return "Reset TX"; + case ES58X_RET_TYPE_DEVICE_ERR: + return "Device error"; + } + + return "<unknown>"; +}; + +/** + * es58x_rx_cmd_ret_u8() - Handle the command's return code received + * from the ES58X device. + * @dev: Device, only used for the dev_XXX() print functions. + * @cmd_ret_type: Type of the command which triggered the return code. + * @rx_cmd_ret_u8: Command error code as returned by the ES58X device. + * + * Handles the 8 bits command return code. Those are specific to the + * ES581.4 device. The return value will eventually be used by + * es58x_handle_urb_cmd() function which will take proper actions in + * case of critical issues such and memory errors or bad CRC values. + * + * In contrast with es58x_rx_cmd_ret_u32(), the network device is + * unknown. + * + * Return: zero on success, return errno when any error occurs. + */ +int es58x_rx_cmd_ret_u8(struct device *dev, + enum es58x_ret_type cmd_ret_type, + enum es58x_ret_u8 rx_cmd_ret_u8) +{ + const char *ret_desc = es58x_cmd_ret_desc(cmd_ret_type); + + switch (rx_cmd_ret_u8) { + case ES58X_RET_U8_OK: + dev_dbg_ratelimited(dev, "%s: OK\n", ret_desc); + return 0; + + case ES58X_RET_U8_ERR_UNSPECIFIED_FAILURE: + dev_err(dev, "%s: unspecified failure\n", ret_desc); + return -EBADMSG; + + case ES58X_RET_U8_ERR_NO_MEM: + dev_err(dev, "%s: device ran out of memory\n", ret_desc); + return -ENOMEM; + + case ES58X_RET_U8_ERR_BAD_CRC: + dev_err(dev, "%s: CRC of previous command is incorrect\n", + ret_desc); + return -EIO; + + default: + dev_err(dev, "%s: returned unknown value: 0x%02X\n", + ret_desc, rx_cmd_ret_u8); + return -EBADMSG; + } +} + +/** + * es58x_rx_cmd_ret_u32() - Handle the command return code received + * from the ES58X device. + * @netdev: CAN network device. + * @cmd_ret_type: Type of the command which triggered the return code. + * @rx_cmd_ret_u32: error code as returned by the ES58X device. + * + * Handles the 32 bits command return code. The return value will + * eventually be used by es58x_handle_urb_cmd() function which will + * take proper actions in case of critical issues such and memory + * errors or bad CRC values. + * + * Return: zero on success, errno when any error occurs. + */ +int es58x_rx_cmd_ret_u32(struct net_device *netdev, + enum es58x_ret_type cmd_ret_type, + enum es58x_ret_u32 rx_cmd_ret_u32) +{ + struct es58x_priv *priv = es58x_priv(netdev); + const struct es58x_operators *ops = priv->es58x_dev->ops; + const char *ret_desc = es58x_cmd_ret_desc(cmd_ret_type); + + switch (rx_cmd_ret_u32) { + case ES58X_RET_U32_OK: + switch (cmd_ret_type) { + case ES58X_RET_TYPE_ENABLE_CHANNEL: + es58x_can_reset_echo_fifo(netdev); + priv->can.state = CAN_STATE_ERROR_ACTIVE; + netif_wake_queue(netdev); + netdev_info(netdev, + "%s: %s (Serial Number %s): CAN%d channel becomes ready\n", + ret_desc, priv->es58x_dev->udev->product, + priv->es58x_dev->udev->serial, + priv->channel_idx + 1); + break; + + case ES58X_RET_TYPE_TX_MSG: + if (IS_ENABLED(CONFIG_VERBOSE_DEBUG) && net_ratelimit()) + netdev_vdbg(netdev, "%s: OK\n", ret_desc); + break; + + default: + netdev_dbg(netdev, "%s: OK\n", ret_desc); + break; + } + return 0; + + case ES58X_RET_U32_ERR_UNSPECIFIED_FAILURE: + if (cmd_ret_type == ES58X_RET_TYPE_ENABLE_CHANNEL) { + int ret; + + netdev_warn(netdev, + "%s: channel is already opened, closing and re-opening it to reflect new configuration\n", + ret_desc); + ret = ops->disable_channel(es58x_priv(netdev)); + if (ret) + return ret; + return ops->enable_channel(es58x_priv(netdev)); + } + if (cmd_ret_type == ES58X_RET_TYPE_DISABLE_CHANNEL) { + netdev_info(netdev, + "%s: channel is already closed\n", ret_desc); + return 0; + } + netdev_err(netdev, + "%s: unspecified failure\n", ret_desc); + return -EBADMSG; + + case ES58X_RET_U32_ERR_NO_MEM: + netdev_err(netdev, "%s: device ran out of memory\n", ret_desc); + return -ENOMEM; + + case ES58X_RET_U32_WARN_PARAM_ADJUSTED: + netdev_warn(netdev, + "%s: some incompatible parameters have been adjusted\n", + ret_desc); + return 0; + + case ES58X_RET_U32_WARN_TX_MAYBE_REORDER: + netdev_warn(netdev, + "%s: TX messages might have been reordered\n", + ret_desc); + return 0; + + case ES58X_RET_U32_ERR_TIMEDOUT: + netdev_err(netdev, "%s: command timed out\n", ret_desc); + return -ETIMEDOUT; + + case ES58X_RET_U32_ERR_FIFO_FULL: + netdev_warn(netdev, "%s: fifo is full\n", ret_desc); + return 0; + + case ES58X_RET_U32_ERR_BAD_CONFIG: + netdev_err(netdev, "%s: bad configuration\n", ret_desc); + return -EINVAL; + + case ES58X_RET_U32_ERR_NO_RESOURCE: + netdev_err(netdev, "%s: no resource available\n", ret_desc); + return -EBUSY; + + default: + netdev_err(netdev, "%s returned unknown value: 0x%08X\n", + ret_desc, rx_cmd_ret_u32); + return -EBADMSG; + } +} + +/** + * es58x_increment_rx_errors() - Increment the network devices' error + * count. + * @es58x_dev: ES58X device. + * + * If an error occurs on the early stages on receiving an URB command, + * we might not be able to figure out on which network device the + * error occurred. In such case, we arbitrarily increment the error + * count of all the network devices attached to our ES58X device. + */ +static void es58x_increment_rx_errors(struct es58x_device *es58x_dev) +{ + int i; + + for (i = 0; i < es58x_dev->num_can_ch; i++) + if (es58x_dev->netdev[i]) + es58x_dev->netdev[i]->stats.rx_errors++; +} + +/** + * es58x_handle_urb_cmd() - Handle the URB command + * @es58x_dev: ES58X device. + * @urb_cmd: The URB command received from the ES58X device, might not + * be aligned. + * + * Sends the URB command to the device specific function. Manages the + * errors thrown back by those functions. + */ +static void es58x_handle_urb_cmd(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd) +{ + const struct es58x_operators *ops = es58x_dev->ops; + size_t cmd_len; + int i, ret; + + ret = ops->handle_urb_cmd(es58x_dev, urb_cmd); + switch (ret) { + case 0: /* OK */ + return; + + case -ENODEV: + dev_err_ratelimited(es58x_dev->dev, "Device is not ready\n"); + break; + + case -EINVAL: + case -EMSGSIZE: + case -EBADRQC: + case -EBADMSG: + case -ECHRNG: + case -ETIMEDOUT: + cmd_len = es58x_get_urb_cmd_len(es58x_dev, + ops->get_msg_len(urb_cmd)); + dev_err(es58x_dev->dev, + "ops->handle_urb_cmd() returned error %pe", + ERR_PTR(ret)); + es58x_print_hex_dump(urb_cmd, cmd_len); + break; + + case -EFAULT: + case -ENOMEM: + case -EIO: + default: + dev_crit(es58x_dev->dev, + "ops->handle_urb_cmd() returned error %pe, detaching all network devices\n", + ERR_PTR(ret)); + for (i = 0; i < es58x_dev->num_can_ch; i++) + if (es58x_dev->netdev[i]) + netif_device_detach(es58x_dev->netdev[i]); + if (es58x_dev->ops->reset_device) + es58x_dev->ops->reset_device(es58x_dev); + break; + } + + /* Because the urb command could not fully be parsed, + * channel_id is not confirmed. Incrementing rx_errors count + * of all channels. + */ + es58x_increment_rx_errors(es58x_dev); +} + +/** + * es58x_check_rx_urb() - Check the length and format of the URB command. + * @es58x_dev: ES58X device. + * @urb_cmd: The URB command received from the ES58X device, might not + * be aligned. + * @urb_actual_len: The actual length of the URB command. + * + * Check if the first message of the received urb is valid, that is to + * say that both the header and the length are coherent. + * + * Return: + * the length of the first message of the URB on success. + * + * -ENODATA if the URB command is incomplete (in which case, the URB + * command should be buffered and combined with the next URB to try to + * reconstitute the URB command). + * + * -EOVERFLOW if the length is bigger than the maximum expected one. + * + * -EBADRQC if the start of frame does not match the expected value. + */ +static signed int es58x_check_rx_urb(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd, + u32 urb_actual_len) +{ + const struct device *dev = es58x_dev->dev; + const struct es58x_parameters *param = es58x_dev->param; + u16 sof, msg_len; + signed int urb_cmd_len, ret; + + if (urb_actual_len < param->urb_cmd_header_len) { + dev_vdbg(dev, + "%s: Received %d bytes [%*ph]: header incomplete\n", + __func__, urb_actual_len, urb_actual_len, + urb_cmd->raw_cmd); + return -ENODATA; + } + + sof = get_unaligned_le16(&urb_cmd->sof); + if (sof != param->rx_start_of_frame) { + dev_err_ratelimited(es58x_dev->dev, + "%s: Expected sequence 0x%04X for start of frame but got 0x%04X.\n", + __func__, param->rx_start_of_frame, sof); + return -EBADRQC; + } + + msg_len = es58x_dev->ops->get_msg_len(urb_cmd); + urb_cmd_len = es58x_get_urb_cmd_len(es58x_dev, msg_len); + if (urb_cmd_len > param->rx_urb_cmd_max_len) { + dev_err_ratelimited(es58x_dev->dev, + "%s: Biggest expected size for rx urb_cmd is %u but receive a command of size %d\n", + __func__, + param->rx_urb_cmd_max_len, urb_cmd_len); + return -EOVERFLOW; + } else if (urb_actual_len < urb_cmd_len) { + dev_vdbg(dev, "%s: Received %02d/%02d bytes\n", + __func__, urb_actual_len, urb_cmd_len); + return -ENODATA; + } + + ret = es58x_check_crc(es58x_dev, urb_cmd, urb_cmd_len); + if (ret) + return ret; + + return urb_cmd_len; +} + +/** + * es58x_copy_to_cmd_buf() - Copy an array to the URB command buffer. + * @es58x_dev: ES58X device. + * @raw_cmd: the buffer we want to copy. + * @raw_cmd_len: length of @raw_cmd. + * + * Concatenates @raw_cmd_len bytes of @raw_cmd to the end of the URB + * command buffer. + * + * Return: zero on success, -EMSGSIZE if not enough space is available + * to do the copy. + */ +static int es58x_copy_to_cmd_buf(struct es58x_device *es58x_dev, + u8 *raw_cmd, int raw_cmd_len) +{ + if (es58x_dev->rx_cmd_buf_len + raw_cmd_len > + es58x_dev->param->rx_urb_cmd_max_len) + return -EMSGSIZE; + + memcpy(&es58x_dev->rx_cmd_buf.raw_cmd[es58x_dev->rx_cmd_buf_len], + raw_cmd, raw_cmd_len); + es58x_dev->rx_cmd_buf_len += raw_cmd_len; + + return 0; +} + +/** + * es58x_split_urb_try_recovery() - Try to recover bad URB sequences. + * @es58x_dev: ES58X device. + * @raw_cmd: pointer to the buffer we want to copy. + * @raw_cmd_len: length of @raw_cmd. + * + * Under some rare conditions, we might get incorrect URBs from the + * device. From our observations, one of the valid URB gets replaced + * by one from the past. The full root cause is not identified. + * + * This function looks for the next start of frame in the urb buffer + * in order to try to recover. + * + * Such behavior was not observed on the devices of the ES58X FD + * family and only seems to impact the ES581.4. + * + * Return: the number of bytes dropped on success, -EBADMSG if recovery failed. + */ +static int es58x_split_urb_try_recovery(struct es58x_device *es58x_dev, + u8 *raw_cmd, size_t raw_cmd_len) +{ + union es58x_urb_cmd *urb_cmd; + signed int urb_cmd_len; + u16 sof; + int dropped_bytes = 0; + + es58x_increment_rx_errors(es58x_dev); + + while (raw_cmd_len > sizeof(sof)) { + urb_cmd = (union es58x_urb_cmd *)raw_cmd; + sof = get_unaligned_le16(&urb_cmd->sof); + + if (sof == es58x_dev->param->rx_start_of_frame) { + urb_cmd_len = es58x_check_rx_urb(es58x_dev, + urb_cmd, raw_cmd_len); + if ((urb_cmd_len == -ENODATA) || urb_cmd_len > 0) { + dev_info_ratelimited(es58x_dev->dev, + "Recovery successful! Dropped %d bytes (urb_cmd_len: %d)\n", + dropped_bytes, + urb_cmd_len); + return dropped_bytes; + } + } + raw_cmd++; + raw_cmd_len--; + dropped_bytes++; + } + + dev_warn_ratelimited(es58x_dev->dev, "%s: Recovery failed\n", __func__); + return -EBADMSG; +} + +/** + * es58x_handle_incomplete_cmd() - Reconstitute an URB command from + * different URB pieces. + * @es58x_dev: ES58X device. + * @urb: last urb buffer received. + * + * The device might split the URB commands in an arbitrary amount of + * pieces. This function concatenates those in an URB buffer until a + * full URB command is reconstituted and consume it. + * + * Return: + * number of bytes consumed from @urb if successful. + * + * -ENODATA if the URB command is still incomplete. + * + * -EBADMSG if the URB command is incorrect. + */ +static signed int es58x_handle_incomplete_cmd(struct es58x_device *es58x_dev, + struct urb *urb) +{ + size_t cpy_len; + signed int urb_cmd_len, tmp_cmd_buf_len, ret; + + tmp_cmd_buf_len = es58x_dev->rx_cmd_buf_len; + cpy_len = min_t(int, es58x_dev->param->rx_urb_cmd_max_len - + es58x_dev->rx_cmd_buf_len, urb->actual_length); + ret = es58x_copy_to_cmd_buf(es58x_dev, urb->transfer_buffer, cpy_len); + if (ret < 0) + return ret; + + urb_cmd_len = es58x_check_rx_urb(es58x_dev, &es58x_dev->rx_cmd_buf, + es58x_dev->rx_cmd_buf_len); + if (urb_cmd_len == -ENODATA) { + return -ENODATA; + } else if (urb_cmd_len < 0) { + dev_err_ratelimited(es58x_dev->dev, + "Could not reconstitute incomplete command from previous URB, dropping %d bytes\n", + tmp_cmd_buf_len + urb->actual_length); + dev_err_ratelimited(es58x_dev->dev, + "Error code: %pe, es58x_dev->rx_cmd_buf_len: %d, urb->actual_length: %u\n", + ERR_PTR(urb_cmd_len), + tmp_cmd_buf_len, urb->actual_length); + es58x_print_hex_dump(&es58x_dev->rx_cmd_buf, tmp_cmd_buf_len); + es58x_print_hex_dump(urb->transfer_buffer, urb->actual_length); + return urb->actual_length; + } + + es58x_handle_urb_cmd(es58x_dev, &es58x_dev->rx_cmd_buf); + return urb_cmd_len - tmp_cmd_buf_len; /* consumed length */ +} + +/** + * es58x_split_urb() - Cut the received URB in individual URB commands. + * @es58x_dev: ES58X device. + * @urb: last urb buffer received. + * + * The device might send urb in bulk format (i.e. several URB commands + * concatenated together). This function will split all the commands + * contained in the urb. + * + * Return: + * number of bytes consumed from @urb if successful. + * + * -ENODATA if the URB command is incomplete. + * + * -EBADMSG if the URB command is incorrect. + */ +static signed int es58x_split_urb(struct es58x_device *es58x_dev, + struct urb *urb) +{ + union es58x_urb_cmd *urb_cmd; + u8 *raw_cmd = urb->transfer_buffer; + s32 raw_cmd_len = urb->actual_length; + int ret; + + if (es58x_dev->rx_cmd_buf_len != 0) { + ret = es58x_handle_incomplete_cmd(es58x_dev, urb); + if (ret != -ENODATA) + es58x_dev->rx_cmd_buf_len = 0; + if (ret < 0) + return ret; + + raw_cmd += ret; + raw_cmd_len -= ret; + } + + while (raw_cmd_len > 0) { + if (raw_cmd[0] == ES58X_HEARTBEAT) { + raw_cmd++; + raw_cmd_len--; + continue; + } + urb_cmd = (union es58x_urb_cmd *)raw_cmd; + ret = es58x_check_rx_urb(es58x_dev, urb_cmd, raw_cmd_len); + if (ret > 0) { + es58x_handle_urb_cmd(es58x_dev, urb_cmd); + } else if (ret == -ENODATA) { + es58x_copy_to_cmd_buf(es58x_dev, raw_cmd, raw_cmd_len); + return -ENODATA; + } else if (ret < 0) { + ret = es58x_split_urb_try_recovery(es58x_dev, raw_cmd, + raw_cmd_len); + if (ret < 0) + return ret; + } + raw_cmd += ret; + raw_cmd_len -= ret; + } + + return 0; +} + +/** + * es58x_read_bulk_callback() - Callback for reading data from device. + * @urb: last urb buffer received. + * + * This function gets eventually called each time an URB is received + * from the ES58X device. + * + * Checks urb status, calls read function and resubmits urb read + * operation. + */ +static void es58x_read_bulk_callback(struct urb *urb) +{ + struct es58x_device *es58x_dev = urb->context; + const struct device *dev = es58x_dev->dev; + int i, ret; + + switch (urb->status) { + case 0: /* success */ + break; + + case -EOVERFLOW: + dev_err_ratelimited(dev, "%s: error %pe\n", + __func__, ERR_PTR(urb->status)); + es58x_print_hex_dump_debug(urb->transfer_buffer, + urb->transfer_buffer_length); + goto resubmit_urb; + + case -EPROTO: + dev_warn_ratelimited(dev, "%s: error %pe. Device unplugged?\n", + __func__, ERR_PTR(urb->status)); + goto free_urb; + + case -ENOENT: + case -EPIPE: + dev_err_ratelimited(dev, "%s: error %pe\n", + __func__, ERR_PTR(urb->status)); + goto free_urb; + + case -ESHUTDOWN: + dev_dbg_ratelimited(dev, "%s: error %pe\n", + __func__, ERR_PTR(urb->status)); + goto free_urb; + + default: + dev_err_ratelimited(dev, "%s: error %pe\n", + __func__, ERR_PTR(urb->status)); + goto resubmit_urb; + } + + ret = es58x_split_urb(es58x_dev, urb); + if ((ret != -ENODATA) && ret < 0) { + dev_err(es58x_dev->dev, "es58x_split_urb() returned error %pe", + ERR_PTR(ret)); + es58x_print_hex_dump_debug(urb->transfer_buffer, + urb->actual_length); + + /* Because the urb command could not be parsed, + * channel_id is not confirmed. Incrementing rx_errors + * count of all channels. + */ + es58x_increment_rx_errors(es58x_dev); + } + + resubmit_urb: + usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->rx_pipe, + urb->transfer_buffer, urb->transfer_buffer_length, + es58x_read_bulk_callback, es58x_dev); + + ret = usb_submit_urb(urb, GFP_ATOMIC); + if (ret == -ENODEV) { + for (i = 0; i < es58x_dev->num_can_ch; i++) + if (es58x_dev->netdev[i]) + netif_device_detach(es58x_dev->netdev[i]); + } else if (ret) + dev_err_ratelimited(dev, + "Failed resubmitting read bulk urb: %pe\n", + ERR_PTR(ret)); + return; + + free_urb: + usb_free_coherent(urb->dev, urb->transfer_buffer_length, + urb->transfer_buffer, urb->transfer_dma); +} + +/** + * es58x_write_bulk_callback() - Callback after writing data to the device. + * @urb: urb buffer which was previously submitted. + * + * This function gets eventually called each time an URB was sent to + * the ES58X device. + * + * Puts the @urb back to the urbs idle anchor and tries to restart the + * network queue. + */ +static void es58x_write_bulk_callback(struct urb *urb) +{ + struct net_device *netdev = urb->context; + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + + switch (urb->status) { + case 0: /* success */ + break; + + case -EOVERFLOW: + if (net_ratelimit()) + netdev_err(netdev, "%s: error %pe\n", + __func__, ERR_PTR(urb->status)); + es58x_print_hex_dump(urb->transfer_buffer, + urb->transfer_buffer_length); + break; + + case -ENOENT: + if (net_ratelimit()) + netdev_dbg(netdev, "%s: error %pe\n", + __func__, ERR_PTR(urb->status)); + usb_free_coherent(urb->dev, + es58x_dev->param->tx_urb_cmd_max_len, + urb->transfer_buffer, urb->transfer_dma); + return; + + default: + if (net_ratelimit()) + netdev_info(netdev, "%s: error %pe\n", + __func__, ERR_PTR(urb->status)); + break; + } + + usb_anchor_urb(urb, &es58x_dev->tx_urbs_idle); + atomic_inc(&es58x_dev->tx_urbs_idle_cnt); +} + +/** + * es58x_alloc_urb() - Allocate memory for an URB and its transfer + * buffer. + * @es58x_dev: ES58X device. + * @urb: URB to be allocated. + * @buf: used to return DMA address of buffer. + * @buf_len: requested buffer size. + * @mem_flags: affect whether allocation may block. + * + * Allocates an URB and its @transfer_buffer and set its @transfer_dma + * address. + * + * This function is used at start-up to allocate all RX URBs at once + * and during run time for TX URBs. + * + * Return: zero on success, -ENOMEM if no memory is available. + */ +static int es58x_alloc_urb(struct es58x_device *es58x_dev, struct urb **urb, + u8 **buf, size_t buf_len, gfp_t mem_flags) +{ + *urb = usb_alloc_urb(0, mem_flags); + if (!*urb) { + dev_err(es58x_dev->dev, "No memory left for URBs\n"); + return -ENOMEM; + } + + *buf = usb_alloc_coherent(es58x_dev->udev, buf_len, + mem_flags, &(*urb)->transfer_dma); + if (!*buf) { + dev_err(es58x_dev->dev, "No memory left for USB buffer\n"); + usb_free_urb(*urb); + return -ENOMEM; + } + + (*urb)->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + + return 0; +} + +/** + * es58x_get_tx_urb() - Get an URB for transmission. + * @es58x_dev: ES58X device. + * + * Gets an URB from the idle urbs anchor or allocate a new one if the + * anchor is empty. + * + * If there are more than ES58X_TX_URBS_MAX in the idle anchor, do + * some garbage collection. The garbage collection is done here + * instead of within es58x_write_bulk_callback() because + * usb_free_coherent() should not be used in IRQ context: + * c.f. WARN_ON(irqs_disabled()) in dma_free_attrs(). + * + * Return: a pointer to an URB on success, NULL if no memory is + * available. + */ +static struct urb *es58x_get_tx_urb(struct es58x_device *es58x_dev) +{ + atomic_t *idle_cnt = &es58x_dev->tx_urbs_idle_cnt; + struct urb *urb = usb_get_from_anchor(&es58x_dev->tx_urbs_idle); + + if (!urb) { + size_t tx_buf_len; + u8 *buf; + + tx_buf_len = es58x_dev->param->tx_urb_cmd_max_len; + if (es58x_alloc_urb(es58x_dev, &urb, &buf, tx_buf_len, + GFP_ATOMIC)) + return NULL; + + usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->tx_pipe, + buf, tx_buf_len, NULL, NULL); + return urb; + } + + while (atomic_dec_return(idle_cnt) > ES58X_TX_URBS_MAX) { + /* Garbage collector */ + struct urb *tmp = usb_get_from_anchor(&es58x_dev->tx_urbs_idle); + + if (!tmp) + break; + usb_free_coherent(tmp->dev, + es58x_dev->param->tx_urb_cmd_max_len, + tmp->transfer_buffer, tmp->transfer_dma); + usb_free_urb(tmp); + } + + return urb; +} + +/** + * es58x_submit_urb() - Send data to the device. + * @es58x_dev: ES58X device. + * @urb: URB to be sent. + * @netdev: CAN network device. + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_submit_urb(struct es58x_device *es58x_dev, struct urb *urb, + struct net_device *netdev) +{ + int ret; + + es58x_set_crc(urb->transfer_buffer, urb->transfer_buffer_length); + usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->tx_pipe, + urb->transfer_buffer, urb->transfer_buffer_length, + es58x_write_bulk_callback, netdev); + usb_anchor_urb(urb, &es58x_dev->tx_urbs_busy); + ret = usb_submit_urb(urb, GFP_ATOMIC); + if (ret) { + netdev_err(netdev, "%s: USB send urb failure: %pe\n", + __func__, ERR_PTR(ret)); + usb_unanchor_urb(urb); + usb_free_coherent(urb->dev, + es58x_dev->param->tx_urb_cmd_max_len, + urb->transfer_buffer, urb->transfer_dma); + } + usb_free_urb(urb); + + return ret; +} + +/** + * es58x_send_msg() - Prepare an URB and submit it. + * @es58x_dev: ES58X device. + * @cmd_type: Command type. + * @cmd_id: Command ID. + * @msg: ES58X message to be sent. + * @msg_len: Length of @msg. + * @channel_idx: Index of the network device. + * + * Creates an URB command from a given message, sets the header and the + * CRC and then submits it. + * + * Return: zero on success, errno when any error occurs. + */ +int es58x_send_msg(struct es58x_device *es58x_dev, u8 cmd_type, u8 cmd_id, + const void *msg, u16 msg_len, int channel_idx) +{ + struct net_device *netdev; + union es58x_urb_cmd *urb_cmd; + struct urb *urb; + int urb_cmd_len; + + if (channel_idx == ES58X_CHANNEL_IDX_NA) + netdev = es58x_dev->netdev[0]; /* Default to first channel */ + else + netdev = es58x_dev->netdev[channel_idx]; + + urb_cmd_len = es58x_get_urb_cmd_len(es58x_dev, msg_len); + if (urb_cmd_len > es58x_dev->param->tx_urb_cmd_max_len) + return -EOVERFLOW; + + urb = es58x_get_tx_urb(es58x_dev); + if (!urb) + return -ENOMEM; + + urb_cmd = urb->transfer_buffer; + es58x_dev->ops->fill_urb_header(urb_cmd, cmd_type, cmd_id, + channel_idx, msg_len); + memcpy(&urb_cmd->raw_cmd[es58x_dev->param->urb_cmd_header_len], + msg, msg_len); + urb->transfer_buffer_length = urb_cmd_len; + + return es58x_submit_urb(es58x_dev, urb, netdev); +} + +/** + * es58x_alloc_rx_urbs() - Allocate RX URBs. + * @es58x_dev: ES58X device. + * + * Allocate URBs for reception and anchor them. + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_alloc_rx_urbs(struct es58x_device *es58x_dev) +{ + const struct device *dev = es58x_dev->dev; + const struct es58x_parameters *param = es58x_dev->param; + size_t rx_buf_len = es58x_dev->rx_max_packet_size; + struct urb *urb; + u8 *buf; + int i; + int ret = -EINVAL; + + for (i = 0; i < param->rx_urb_max; i++) { + ret = es58x_alloc_urb(es58x_dev, &urb, &buf, rx_buf_len, + GFP_KERNEL); + if (ret) + break; + + usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->rx_pipe, + buf, rx_buf_len, es58x_read_bulk_callback, + es58x_dev); + usb_anchor_urb(urb, &es58x_dev->rx_urbs); + + ret = usb_submit_urb(urb, GFP_KERNEL); + if (ret) { + usb_unanchor_urb(urb); + usb_free_coherent(es58x_dev->udev, rx_buf_len, + buf, urb->transfer_dma); + usb_free_urb(urb); + break; + } + usb_free_urb(urb); + } + + if (i == 0) { + dev_err(dev, "%s: Could not setup any rx URBs\n", __func__); + return ret; + } + dev_dbg(dev, "%s: Allocated %d rx URBs each of size %zu\n", + __func__, i, rx_buf_len); + + return ret; +} + +/** + * es58x_free_urbs() - Free all the TX and RX URBs. + * @es58x_dev: ES58X device. + */ +static void es58x_free_urbs(struct es58x_device *es58x_dev) +{ + struct urb *urb; + + if (!usb_wait_anchor_empty_timeout(&es58x_dev->tx_urbs_busy, 1000)) { + dev_err(es58x_dev->dev, "%s: Timeout, some TX urbs still remain\n", + __func__); + usb_kill_anchored_urbs(&es58x_dev->tx_urbs_busy); + } + + while ((urb = usb_get_from_anchor(&es58x_dev->tx_urbs_idle)) != NULL) { + usb_free_coherent(urb->dev, es58x_dev->param->tx_urb_cmd_max_len, + urb->transfer_buffer, urb->transfer_dma); + usb_free_urb(urb); + atomic_dec(&es58x_dev->tx_urbs_idle_cnt); + } + if (atomic_read(&es58x_dev->tx_urbs_idle_cnt)) + dev_err(es58x_dev->dev, + "All idle urbs were freed but tx_urb_idle_cnt is %d\n", + atomic_read(&es58x_dev->tx_urbs_idle_cnt)); + + usb_kill_anchored_urbs(&es58x_dev->rx_urbs); +} + +/** + * es58x_open() - Enable the network device. + * @netdev: CAN network device. + * + * Called when the network transitions to the up state. Allocate the + * URB resources if needed and open the channel. + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_open(struct net_device *netdev) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + int ret; + + if (atomic_inc_return(&es58x_dev->opened_channel_cnt) == 1) { + ret = es58x_alloc_rx_urbs(es58x_dev); + if (ret) + return ret; + + ret = es58x_set_realtime_diff_ns(es58x_dev); + if (ret) + goto free_urbs; + } + + ret = open_candev(netdev); + if (ret) + goto free_urbs; + + ret = es58x_dev->ops->enable_channel(es58x_priv(netdev)); + if (ret) + goto free_urbs; + + netif_start_queue(netdev); + + return ret; + + free_urbs: + if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt)) + es58x_free_urbs(es58x_dev); + netdev_err(netdev, "%s: Could not open the network device: %pe\n", + __func__, ERR_PTR(ret)); + + return ret; +} + +/** + * es58x_stop() - Disable the network device. + * @netdev: CAN network device. + * + * Called when the network transitions to the down state. If all the + * channels of the device are closed, free the URB resources which are + * not needed anymore. + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_stop(struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + struct es58x_device *es58x_dev = priv->es58x_dev; + int ret; + + netif_stop_queue(netdev); + ret = es58x_dev->ops->disable_channel(priv); + if (ret) + return ret; + + priv->can.state = CAN_STATE_STOPPED; + es58x_can_reset_echo_fifo(netdev); + close_candev(netdev); + + es58x_flush_pending_tx_msg(netdev); + + if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt)) + es58x_free_urbs(es58x_dev); + + return 0; +} + +/** + * es58x_xmit_commit() - Send the bulk urb. + * @netdev: CAN network device. + * + * Do the bulk send. This function should be called only once by bulk + * transmission. + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_xmit_commit(struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + int ret; + + if (!es58x_is_can_state_active(netdev)) + return -ENETDOWN; + + if (es58x_is_echo_skb_threshold_reached(priv)) + netif_stop_queue(netdev); + + ret = es58x_submit_urb(priv->es58x_dev, priv->tx_urb, netdev); + if (ret == 0) + priv->tx_urb = NULL; + + return ret; +} + +/** + * es58x_xmit_more() - Can we put more packets? + * @priv: ES58X private parameters related to the network device. + * + * Return: true if we can put more, false if it is time to send. + */ +static bool es58x_xmit_more(struct es58x_priv *priv) +{ + unsigned int free_slots = + priv->can.echo_skb_max - (priv->tx_head - priv->tx_tail); + + return netdev_xmit_more() && free_slots > 0 && + priv->tx_can_msg_cnt < priv->es58x_dev->param->tx_bulk_max; +} + +/** + * es58x_start_xmit() - Transmit an skb. + * @skb: socket buffer of a CAN message. + * @netdev: CAN network device. + * + * Called when a packet needs to be transmitted. + * + * This function relies on Byte Queue Limits (BQL). The main benefit + * is to increase the throughput by allowing bulk transfers + * (c.f. xmit_more flag). + * + * Queues up to tx_bulk_max messages in &tx_urb buffer and does + * a bulk send of all messages in one single URB. + * + * Return: NETDEV_TX_OK regardless of if we could transmit the @skb or + * had to drop it. + */ +static netdev_tx_t es58x_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + struct es58x_device *es58x_dev = priv->es58x_dev; + unsigned int frame_len; + int ret; + + if (can_dropped_invalid_skb(netdev, skb)) { + if (priv->tx_urb) + goto xmit_commit; + return NETDEV_TX_OK; + } + + if (priv->tx_urb && priv->tx_can_msg_is_fd != can_is_canfd_skb(skb)) { + /* Can not do bulk send with mixed CAN and CAN FD frames. */ + ret = es58x_xmit_commit(netdev); + if (ret) + goto drop_skb; + } + + if (!priv->tx_urb) { + priv->tx_urb = es58x_get_tx_urb(es58x_dev); + if (!priv->tx_urb) { + ret = -ENOMEM; + goto drop_skb; + } + priv->tx_can_msg_cnt = 0; + priv->tx_can_msg_is_fd = can_is_canfd_skb(skb); + } + + ret = es58x_dev->ops->tx_can_msg(priv, skb); + if (ret) + goto drop_skb; + + frame_len = can_skb_get_frame_len(skb); + ret = can_put_echo_skb(skb, netdev, + priv->tx_head & es58x_dev->param->fifo_mask, + frame_len); + if (ret) + goto xmit_failure; + netdev_sent_queue(netdev, frame_len); + + priv->tx_head++; + priv->tx_can_msg_cnt++; + + xmit_commit: + if (!es58x_xmit_more(priv)) { + ret = es58x_xmit_commit(netdev); + if (ret) + goto xmit_failure; + } + + return NETDEV_TX_OK; + + drop_skb: + dev_kfree_skb(skb); + netdev->stats.tx_dropped++; + xmit_failure: + netdev_warn(netdev, "%s: send message failure: %pe\n", + __func__, ERR_PTR(ret)); + netdev->stats.tx_errors++; + es58x_flush_pending_tx_msg(netdev); + return NETDEV_TX_OK; +} + +static const struct net_device_ops es58x_netdev_ops = { + .ndo_open = es58x_open, + .ndo_stop = es58x_stop, + .ndo_start_xmit = es58x_start_xmit +}; + +/** + * es58x_set_mode() - Change network device mode. + * @netdev: CAN network device. + * @mode: either %CAN_MODE_START, %CAN_MODE_STOP or %CAN_MODE_SLEEP + * + * Currently, this function is only used to stop and restart the + * channel during a bus off event (c.f. es58x_rx_err_msg() and + * drivers/net/can/dev.c:can_restart() which are the two only + * callers). + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_set_mode(struct net_device *netdev, enum can_mode mode) +{ + struct es58x_priv *priv = es58x_priv(netdev); + + switch (mode) { + case CAN_MODE_START: + switch (priv->can.state) { + case CAN_STATE_BUS_OFF: + return priv->es58x_dev->ops->enable_channel(priv); + + case CAN_STATE_STOPPED: + return es58x_open(netdev); + + case CAN_STATE_ERROR_ACTIVE: + case CAN_STATE_ERROR_WARNING: + case CAN_STATE_ERROR_PASSIVE: + default: + return 0; + } + + case CAN_MODE_STOP: + switch (priv->can.state) { + case CAN_STATE_STOPPED: + return 0; + + case CAN_STATE_ERROR_ACTIVE: + case CAN_STATE_ERROR_WARNING: + case CAN_STATE_ERROR_PASSIVE: + case CAN_STATE_BUS_OFF: + default: + return priv->es58x_dev->ops->disable_channel(priv); + } + + case CAN_MODE_SLEEP: + default: + return -EOPNOTSUPP; + } +} + +/** + * es58x_init_priv() - Initialize private parameters. + * @es58x_dev: ES58X device. + * @priv: ES58X private parameters related to the network device. + * @channel_idx: Index of the network device. + */ +static void es58x_init_priv(struct es58x_device *es58x_dev, + struct es58x_priv *priv, int channel_idx) +{ + const struct es58x_parameters *param = es58x_dev->param; + struct can_priv *can = &priv->can; + + priv->es58x_dev = es58x_dev; + priv->channel_idx = channel_idx; + priv->tx_urb = NULL; + priv->tx_can_msg_cnt = 0; + + can->bittiming_const = param->bittiming_const; + if (param->ctrlmode_supported & CAN_CTRLMODE_FD) { + can->data_bittiming_const = param->data_bittiming_const; + can->tdc_const = param->tdc_const; + } + can->bitrate_max = param->bitrate_max; + can->clock = param->clock; + can->state = CAN_STATE_STOPPED; + can->ctrlmode_supported = param->ctrlmode_supported; + can->do_set_mode = es58x_set_mode; +} + +/** + * es58x_init_netdev() - Initialize the network device. + * @es58x_dev: ES58X device. + * @channel_idx: Index of the network device. + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_init_netdev(struct es58x_device *es58x_dev, int channel_idx) +{ + struct net_device *netdev; + struct device *dev = es58x_dev->dev; + int ret; + + netdev = alloc_candev(sizeof(struct es58x_priv), + es58x_dev->param->fifo_mask + 1); + if (!netdev) { + dev_err(dev, "Could not allocate candev\n"); + return -ENOMEM; + } + SET_NETDEV_DEV(netdev, dev); + es58x_dev->netdev[channel_idx] = netdev; + es58x_init_priv(es58x_dev, es58x_priv(netdev), channel_idx); + + netdev->netdev_ops = &es58x_netdev_ops; + netdev->flags |= IFF_ECHO; /* We support local echo */ + + ret = register_candev(netdev); + if (ret) + return ret; + + netdev_queue_set_dql_min_limit(netdev_get_tx_queue(netdev, 0), + es58x_dev->param->dql_min_limit); + + return ret; +} + +/** + * es58x_get_product_info() - Get the product information and print them. + * @es58x_dev: ES58X device. + * + * Do a synchronous call to get the product information. + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_get_product_info(struct es58x_device *es58x_dev) +{ + struct usb_device *udev = es58x_dev->udev; + const int es58x_prod_info_idx = 6; + /* Empirical tests show a prod_info length of maximum 83, + * below should be more than enough. + */ + const size_t prod_info_len = 127; + char *prod_info; + int ret; + + prod_info = kmalloc(prod_info_len, GFP_KERNEL); + if (!prod_info) + return -ENOMEM; + + ret = usb_string(udev, es58x_prod_info_idx, prod_info, prod_info_len); + if (ret < 0) { + dev_err(es58x_dev->dev, + "%s: Could not read the product info: %pe\n", + __func__, ERR_PTR(ret)); + goto out_free; + } + if (ret >= prod_info_len - 1) { + dev_warn(es58x_dev->dev, + "%s: Buffer is too small, result might be truncated\n", + __func__); + } + dev_info(es58x_dev->dev, "Product info: %s\n", prod_info); + + out_free: + kfree(prod_info); + return ret < 0 ? ret : 0; +} + +/** + * es58x_init_es58x_dev() - Initialize the ES58X device. + * @intf: USB interface. + * @p_es58x_dev: pointer to the address of the ES58X device. + * @driver_info: Quirks of the device. + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_init_es58x_dev(struct usb_interface *intf, + struct es58x_device **p_es58x_dev, + kernel_ulong_t driver_info) +{ + struct device *dev = &intf->dev; + struct es58x_device *es58x_dev; + const struct es58x_parameters *param; + const struct es58x_operators *ops; + struct usb_device *udev = interface_to_usbdev(intf); + struct usb_endpoint_descriptor *ep_in, *ep_out; + int ret; + + dev_info(dev, + "Starting %s %s (Serial Number %s) driver version %s\n", + udev->manufacturer, udev->product, udev->serial, DRV_VERSION); + + ret = usb_find_common_endpoints(intf->cur_altsetting, &ep_in, &ep_out, + NULL, NULL); + if (ret) + return ret; + + if (driver_info & ES58X_FD_FAMILY) { + param = &es58x_fd_param; + ops = &es58x_fd_ops; + } else { + param = &es581_4_param; + ops = &es581_4_ops; + } + + es58x_dev = kzalloc(es58x_sizeof_es58x_device(param), GFP_KERNEL); + if (!es58x_dev) + return -ENOMEM; + + es58x_dev->param = param; + es58x_dev->ops = ops; + es58x_dev->dev = dev; + es58x_dev->udev = udev; + + if (driver_info & ES58X_DUAL_CHANNEL) + es58x_dev->num_can_ch = 2; + else + es58x_dev->num_can_ch = 1; + + init_usb_anchor(&es58x_dev->rx_urbs); + init_usb_anchor(&es58x_dev->tx_urbs_idle); + init_usb_anchor(&es58x_dev->tx_urbs_busy); + atomic_set(&es58x_dev->tx_urbs_idle_cnt, 0); + atomic_set(&es58x_dev->opened_channel_cnt, 0); + usb_set_intfdata(intf, es58x_dev); + + es58x_dev->rx_pipe = usb_rcvbulkpipe(es58x_dev->udev, + ep_in->bEndpointAddress); + es58x_dev->tx_pipe = usb_sndbulkpipe(es58x_dev->udev, + ep_out->bEndpointAddress); + es58x_dev->rx_max_packet_size = le16_to_cpu(ep_in->wMaxPacketSize); + + *p_es58x_dev = es58x_dev; + + return 0; +} + +/** + * es58x_probe() - Initialize the USB device. + * @intf: USB interface. + * @id: USB device ID. + * + * Return: zero on success, -ENODEV if the interface is not supported + * or errno when any other error occurs. + */ +static int es58x_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct es58x_device *es58x_dev; + int ch_idx, ret; + + ret = es58x_init_es58x_dev(intf, &es58x_dev, id->driver_info); + if (ret) + return ret; + + ret = es58x_get_product_info(es58x_dev); + if (ret) + goto cleanup_es58x_dev; + + for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++) { + ret = es58x_init_netdev(es58x_dev, ch_idx); + if (ret) + goto cleanup_candev; + } + + return ret; + + cleanup_candev: + for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++) + if (es58x_dev->netdev[ch_idx]) { + unregister_candev(es58x_dev->netdev[ch_idx]); + free_candev(es58x_dev->netdev[ch_idx]); + } + cleanup_es58x_dev: + kfree(es58x_dev); + + return ret; +} + +/** + * es58x_disconnect() - Disconnect the USB device. + * @intf: USB interface + * + * Called by the usb core when driver is unloaded or device is + * removed. + */ +static void es58x_disconnect(struct usb_interface *intf) +{ + struct es58x_device *es58x_dev = usb_get_intfdata(intf); + struct net_device *netdev; + int i; + + dev_info(&intf->dev, "Disconnecting %s %s\n", + es58x_dev->udev->manufacturer, es58x_dev->udev->product); + + for (i = 0; i < es58x_dev->num_can_ch; i++) { + netdev = es58x_dev->netdev[i]; + if (!netdev) + continue; + unregister_candev(netdev); + es58x_dev->netdev[i] = NULL; + free_candev(netdev); + } + + es58x_free_urbs(es58x_dev); + + kfree(es58x_dev); + usb_set_intfdata(intf, NULL); +} + +static struct usb_driver es58x_driver = { + .name = ES58X_MODULE_NAME, + .probe = es58x_probe, + .disconnect = es58x_disconnect, + .id_table = es58x_id_table +}; + +module_usb_driver(es58x_driver); diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h new file mode 100644 index 000000000000..fcf219e727bf --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es58x_core.h @@ -0,0 +1,700 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. + * + * File es58x_core.h: All common definitions and declarations. + * + * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved. + * Copyright (c) 2020 ETAS K.K.. All rights reserved. + * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr> + */ + +#ifndef __ES58X_COMMON_H__ +#define __ES58X_COMMON_H__ + +#include <linux/types.h> +#include <linux/usb.h> +#include <linux/netdevice.h> +#include <linux/can.h> +#include <linux/can/dev.h> + +#include "es581_4.h" +#include "es58x_fd.h" + +/* Driver constants */ +#define ES58X_RX_URBS_MAX 5 /* Empirical value */ +#define ES58X_TX_URBS_MAX 6 /* Empirical value */ + +#define ES58X_MAX(param) \ + (ES581_4_##param > ES58X_FD_##param ? \ + ES581_4_##param : ES58X_FD_##param) +#define ES58X_TX_BULK_MAX ES58X_MAX(TX_BULK_MAX) +#define ES58X_RX_BULK_MAX ES58X_MAX(RX_BULK_MAX) +#define ES58X_ECHO_BULK_MAX ES58X_MAX(ECHO_BULK_MAX) +#define ES58X_NUM_CAN_CH_MAX ES58X_MAX(NUM_CAN_CH) + +/* Use this when channel index is irrelevant (e.g. device + * timestamp). + */ +#define ES58X_CHANNEL_IDX_NA 0xFF +#define ES58X_EMPTY_MSG NULL + +/* Threshold on consecutive CAN_STATE_ERROR_PASSIVE. If we receive + * ES58X_CONSECUTIVE_ERR_PASSIVE_MAX times the event + * ES58X_ERR_CRTL_PASSIVE in a row without any successful RX or TX, + * we force the device to switch to CAN_STATE_BUS_OFF state. + */ +#define ES58X_CONSECUTIVE_ERR_PASSIVE_MAX 254 + +/* A magic number sent by the ES581.4 to inform it is alive. */ +#define ES58X_HEARTBEAT 0x11 + +/** + * enum es58x_driver_info - Quirks of the device. + * @ES58X_DUAL_CHANNEL: Device has two CAN channels. If this flag is + * not set, it is implied that the device has only one CAN + * channel. + * @ES58X_FD_FAMILY: Device is CAN-FD capable. If this flag is not + * set, the device only supports classical CAN. + */ +enum es58x_driver_info { + ES58X_DUAL_CHANNEL = BIT(0), + ES58X_FD_FAMILY = BIT(1) +}; + +enum es58x_echo { + ES58X_ECHO_OFF = 0, + ES58X_ECHO_ON = 1 +}; + +/** + * enum es58x_physical_layer - Type of the physical layer. + * @ES58X_PHYSICAL_LAYER_HIGH_SPEED: High-speed CAN (c.f. ISO + * 11898-2). + * + * Some products of the ETAS portfolio also support low-speed CAN + * (c.f. ISO 11898-3). However, all the devices in scope of this + * driver do not support the option, thus, the enum has only one + * member. + */ +enum es58x_physical_layer { + ES58X_PHYSICAL_LAYER_HIGH_SPEED = 1 +}; + +enum es58x_samples_per_bit { + ES58X_SAMPLES_PER_BIT_ONE = 1, + ES58X_SAMPLES_PER_BIT_THREE = 2 +}; + +/** + * enum es58x_sync_edge - Synchronization method. + * @ES58X_SYNC_EDGE_SINGLE: ISO CAN specification defines the use of a + * single edge synchronization. The synchronization should be + * done on recessive to dominant level change. + * + * For information, ES582.1 and ES584.1 also support a double + * synchronization, requiring both recessive to dominant then dominant + * to recessive level change. However, this is not supported in + * SocketCAN framework, thus, the enum has only one member. + */ +enum es58x_sync_edge { + ES58X_SYNC_EDGE_SINGLE = 1 +}; + +/** + * enum es58x_flag - CAN flags for RX/TX messages. + * @ES58X_FLAG_EFF: Extended Frame Format (EFF). + * @ES58X_FLAG_RTR: Remote Transmission Request (RTR). + * @ES58X_FLAG_FD_BRS: Bit rate switch (BRS): second bitrate for + * payload data. + * @ES58X_FLAG_FD_ESI: Error State Indicator (ESI): tell if the + * transmitting node is in error passive mode. + * @ES58X_FLAG_FD_DATA: CAN FD frame. + */ +enum es58x_flag { + ES58X_FLAG_EFF = BIT(0), + ES58X_FLAG_RTR = BIT(1), + ES58X_FLAG_FD_BRS = BIT(3), + ES58X_FLAG_FD_ESI = BIT(5), + ES58X_FLAG_FD_DATA = BIT(6) +}; + +/** + * enum es58x_err - CAN error detection. + * @ES58X_ERR_OK: No errors. + * @ES58X_ERR_PROT_STUFF: Bit stuffing error: more than 5 consecutive + * equal bits. + * @ES58X_ERR_PROT_FORM: Frame format error. + * @ES58X_ERR_ACK: Received no ACK on transmission. + * @ES58X_ERR_PROT_BIT: Single bit error. + * @ES58X_ERR_PROT_CRC: Incorrect 15, 17 or 21 bits CRC. + * @ES58X_ERR_PROT_BIT1: Unable to send recessive bit: tried to send + * recessive bit 1 but monitored dominant bit 0. + * @ES58X_ERR_PROT_BIT0: Unable to send dominant bit: tried to send + * dominant bit 0 but monitored recessive bit 1. + * @ES58X_ERR_PROT_OVERLOAD: Bus overload. + * @ES58X_ERR_PROT_UNSPEC: Unspecified. + * + * Please refer to ISO 11898-1:2015, section 10.11 "Error detection" + * and section 10.13 "Overload signaling" for additional details. + */ +enum es58x_err { + ES58X_ERR_OK = 0, + ES58X_ERR_PROT_STUFF = BIT(0), + ES58X_ERR_PROT_FORM = BIT(1), + ES58X_ERR_ACK = BIT(2), + ES58X_ERR_PROT_BIT = BIT(3), + ES58X_ERR_PROT_CRC = BIT(4), + ES58X_ERR_PROT_BIT1 = BIT(5), + ES58X_ERR_PROT_BIT0 = BIT(6), + ES58X_ERR_PROT_OVERLOAD = BIT(7), + ES58X_ERR_PROT_UNSPEC = BIT(31) +}; + +/** + * enum es58x_event - CAN error codes returned by the device. + * @ES58X_EVENT_OK: No errors. + * @ES58X_EVENT_CRTL_ACTIVE: Active state: both TR and RX error count + * is less than 128. + * @ES58X_EVENT_CRTL_PASSIVE: Passive state: either TX or RX error + * count is greater than 127. + * @ES58X_EVENT_CRTL_WARNING: Warning state: either TX or RX error + * count is greater than 96. + * @ES58X_EVENT_BUSOFF: Bus off. + * @ES58X_EVENT_SINGLE_WIRE: Lost connection on either CAN high or CAN + * low. + * + * Please refer to ISO 11898-1:2015, section 12.1.4 "Rules of fault + * confinement" for additional details. + */ +enum es58x_event { + ES58X_EVENT_OK = 0, + ES58X_EVENT_CRTL_ACTIVE = BIT(0), + ES58X_EVENT_CRTL_PASSIVE = BIT(1), + ES58X_EVENT_CRTL_WARNING = BIT(2), + ES58X_EVENT_BUSOFF = BIT(3), + ES58X_EVENT_SINGLE_WIRE = BIT(4) +}; + +/* enum es58x_ret_u8 - Device return error codes, 8 bit format. + * + * Specific to ES581.4. + */ +enum es58x_ret_u8 { + ES58X_RET_U8_OK = 0x00, + ES58X_RET_U8_ERR_UNSPECIFIED_FAILURE = 0x80, + ES58X_RET_U8_ERR_NO_MEM = 0x81, + ES58X_RET_U8_ERR_BAD_CRC = 0x99 +}; + +/* enum es58x_ret_u32 - Device return error codes, 32 bit format. + */ +enum es58x_ret_u32 { + ES58X_RET_U32_OK = 0x00000000UL, + ES58X_RET_U32_ERR_UNSPECIFIED_FAILURE = 0x80000000UL, + ES58X_RET_U32_ERR_NO_MEM = 0x80004001UL, + ES58X_RET_U32_WARN_PARAM_ADJUSTED = 0x40004000UL, + ES58X_RET_U32_WARN_TX_MAYBE_REORDER = 0x40004001UL, + ES58X_RET_U32_ERR_TIMEDOUT = 0x80000008UL, + ES58X_RET_U32_ERR_FIFO_FULL = 0x80003002UL, + ES58X_RET_U32_ERR_BAD_CONFIG = 0x80004000UL, + ES58X_RET_U32_ERR_NO_RESOURCE = 0x80004002UL +}; + +/* enum es58x_ret_type - Type of the command returned by the ES58X + * device. + */ +enum es58x_ret_type { + ES58X_RET_TYPE_SET_BITTIMING, + ES58X_RET_TYPE_ENABLE_CHANNEL, + ES58X_RET_TYPE_DISABLE_CHANNEL, + ES58X_RET_TYPE_TX_MSG, + ES58X_RET_TYPE_RESET_RX, + ES58X_RET_TYPE_RESET_TX, + ES58X_RET_TYPE_DEVICE_ERR +}; + +union es58x_urb_cmd { + struct es581_4_urb_cmd es581_4_urb_cmd; + struct es58x_fd_urb_cmd es58x_fd_urb_cmd; + struct { /* Common header parts of all variants */ + __le16 sof; + u8 cmd_type; + u8 cmd_id; + } __packed; + u8 raw_cmd[0]; +}; + +/** + * struct es58x_priv - All information specific to a CAN channel. + * @can: struct can_priv must be the first member (Socket CAN relies + * on the fact that function netdev_priv() returns a pointer to + * a struct can_priv). + * @es58x_dev: pointer to the corresponding ES58X device. + * @tx_urb: Used as a buffer to concatenate the TX messages and to do + * a bulk send. Please refer to es58x_start_xmit() for more + * details. + * @tx_tail: Index of the oldest packet still pending for + * completion. @tx_tail & echo_skb_mask represents the beginning + * of the echo skb FIFO, i.e. index of the first element. + * @tx_head: Index of the next packet to be sent to the + * device. @tx_head & echo_skb_mask represents the end of the + * echo skb FIFO plus one, i.e. the first free index. + * @tx_can_msg_cnt: Number of messages in @tx_urb. + * @tx_can_msg_is_fd: false: all messages in @tx_urb are Classical + * CAN, true: all messages in @tx_urb are CAN FD. Rationale: + * ES58X FD devices do not allow to mix Classical CAN and FD CAN + * frames in one single bulk transmission. + * @err_passive_before_rtx_success: The ES58X device might enter in a + * state in which it keeps alternating between error passive + * and active states. This counter keeps track of the number of + * error passive and if it gets bigger than + * ES58X_CONSECUTIVE_ERR_PASSIVE_MAX, es58x_rx_err_msg() will + * force the status to bus-off. + * @channel_idx: Channel index, starts at zero. + */ +struct es58x_priv { + struct can_priv can; + struct es58x_device *es58x_dev; + struct urb *tx_urb; + + u32 tx_tail; + u32 tx_head; + + u8 tx_can_msg_cnt; + bool tx_can_msg_is_fd; + + u8 err_passive_before_rtx_success; + + u8 channel_idx; +}; + +/** + * struct es58x_parameters - Constant parameters of a given hardware + * variant. + * @bittiming_const: Nominal bittimming constant parameters. + * @data_bittiming_const: Data bittiming constant parameters. + * @tdc_const: Transmission Delay Compensation constant parameters. + * @bitrate_max: Maximum bitrate supported by the device. + * @clock: CAN clock parameters. + * @ctrlmode_supported: List of supported modes. Please refer to + * can/netlink.h file for additional details. + * @tx_start_of_frame: Magic number at the beginning of each TX URB + * command. + * @rx_start_of_frame: Magic number at the beginning of each RX URB + * command. + * @tx_urb_cmd_max_len: Maximum length of a TX URB command. + * @rx_urb_cmd_max_len: Maximum length of a RX URB command. + * @fifo_mask: Bit mask to quickly convert the tx_tail and tx_head + * field of the struct es58x_priv into echo_skb + * indexes. Properties: @fifo_mask = echos_skb_max - 1 where + * echo_skb_max must be a power of two. Also, echo_skb_max must + * not exceed the maximum size of the device internal TX FIFO + * length. This parameter is used to control the network queue + * wake/stop logic. + * @dql_min_limit: Dynamic Queue Limits (DQL) absolute minimum limit + * of bytes allowed to be queued on this network device transmit + * queue. Used by the Byte Queue Limits (BQL) to determine how + * frequently the xmit_more flag will be set to true in + * es58x_start_xmit(). Set this value higher to optimize for + * throughput but be aware that it might have a negative impact + * on the latency! This value can also be set dynamically. Please + * refer to Documentation/ABI/testing/sysfs-class-net-queues for + * more details. + * @tx_bulk_max: Maximum number of TX messages that can be sent in one + * single URB packet. + * @urb_cmd_header_len: Length of the URB command header. + * @rx_urb_max: Number of RX URB to be allocated during device probe. + * @tx_urb_max: Number of TX URB to be allocated during device probe. + */ +struct es58x_parameters { + const struct can_bittiming_const *bittiming_const; + const struct can_bittiming_const *data_bittiming_const; + const struct can_tdc_const *tdc_const; + u32 bitrate_max; + struct can_clock clock; + u32 ctrlmode_supported; + u16 tx_start_of_frame; + u16 rx_start_of_frame; + u16 tx_urb_cmd_max_len; + u16 rx_urb_cmd_max_len; + u16 fifo_mask; + u16 dql_min_limit; + u8 tx_bulk_max; + u8 urb_cmd_header_len; + u8 rx_urb_max; + u8 tx_urb_max; +}; + +/** + * struct es58x_operators - Function pointers used to encode/decode + * the TX/RX messages. + * @get_msg_len: Get field msg_len of the urb_cmd. The offset of + * msg_len inside urb_cmd depends of the device model. + * @handle_urb_cmd: Decode the URB command received from the device + * and dispatch it to the relevant sub function. + * @fill_urb_header: Fill the header of urb_cmd. + * @tx_can_msg: Encode a TX CAN message and add it to the bulk buffer + * cmd_buf of es58x_dev. + * @enable_channel: Start the CAN channel. + * @disable_channel: Stop the CAN channel. + * @reset_device: Full reset of the device. N.B: this feature is only + * present on the ES581.4. For ES58X FD devices, this field is + * set to NULL. + * @get_timestamp: Request a timestamp from the ES58X device. + */ +struct es58x_operators { + u16 (*get_msg_len)(const union es58x_urb_cmd *urb_cmd); + int (*handle_urb_cmd)(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd); + void (*fill_urb_header)(union es58x_urb_cmd *urb_cmd, u8 cmd_type, + u8 cmd_id, u8 channel_idx, u16 cmd_len); + int (*tx_can_msg)(struct es58x_priv *priv, const struct sk_buff *skb); + int (*enable_channel)(struct es58x_priv *priv); + int (*disable_channel)(struct es58x_priv *priv); + int (*reset_device)(struct es58x_device *es58x_dev); + int (*get_timestamp)(struct es58x_device *es58x_dev); +}; + +/** + * struct es58x_device - All information specific to an ES58X device. + * @dev: Device information. + * @udev: USB device information. + * @netdev: Array of our CAN channels. + * @param: The constant parameters. + * @ops: Operators. + * @rx_pipe: USB reception pipe. + * @tx_pipe: USB transmission pipe. + * @rx_urbs: Anchor for received URBs. + * @tx_urbs_busy: Anchor for TX URBs which were send to the device. + * @tx_urbs_idle: Anchor for TX USB which are idle. This driver + * allocates the memory for the URBs during the probe. When a TX + * URB is needed, it can be taken from this anchor. The network + * queue wake/stop logic should prevent this URB from getting + * empty. Please refer to es58x_get_tx_urb() for more details. + * @tx_urbs_idle_cnt: number of urbs in @tx_urbs_idle. + * @opened_channel_cnt: number of channels opened (c.f. es58x_open() + * and es58x_stop()). + * @ktime_req_ns: kernel timestamp when es58x_set_realtime_diff_ns() + * was called. + * @realtime_diff_ns: difference in nanoseconds between the clocks of + * the ES58X device and the kernel. + * @timestamps: a temporary buffer to store the time stamps before + * feeding them to es58x_can_get_echo_skb(). Can only be used + * in RX branches. + * @rx_max_packet_size: Maximum length of bulk-in URB. + * @num_can_ch: Number of CAN channel (i.e. number of elements of @netdev). + * @rx_cmd_buf_len: Length of @rx_cmd_buf. + * @rx_cmd_buf: The device might split the URB commands in an + * arbitrary amount of pieces. This buffer is used to concatenate + * all those pieces. Can only be used in RX branches. This field + * has to be the last one of the structure because it is has a + * flexible size (c.f. es58x_sizeof_es58x_device() function). + */ +struct es58x_device { + struct device *dev; + struct usb_device *udev; + struct net_device *netdev[ES58X_NUM_CAN_CH_MAX]; + + const struct es58x_parameters *param; + const struct es58x_operators *ops; + + int rx_pipe; + int tx_pipe; + + struct usb_anchor rx_urbs; + struct usb_anchor tx_urbs_busy; + struct usb_anchor tx_urbs_idle; + atomic_t tx_urbs_idle_cnt; + atomic_t opened_channel_cnt; + + u64 ktime_req_ns; + s64 realtime_diff_ns; + + u64 timestamps[ES58X_ECHO_BULK_MAX]; + + u16 rx_max_packet_size; + u8 num_can_ch; + + u16 rx_cmd_buf_len; + union es58x_urb_cmd rx_cmd_buf; +}; + +/** + * es58x_sizeof_es58x_device() - Calculate the maximum length of + * struct es58x_device. + * @es58x_dev_param: The constant parameters of the device. + * + * The length of struct es58x_device depends on the length of its last + * field: rx_cmd_buf. This macro allows to optimize the memory + * allocation. + * + * Return: length of struct es58x_device. + */ +static inline size_t es58x_sizeof_es58x_device(const struct es58x_parameters + *es58x_dev_param) +{ + return offsetof(struct es58x_device, rx_cmd_buf) + + es58x_dev_param->rx_urb_cmd_max_len; +} + +static inline int __es58x_check_msg_len(const struct device *dev, + const char *stringified_msg, + size_t actual_len, size_t expected_len) +{ + if (expected_len != actual_len) { + dev_err(dev, + "Length of %s is %zu but received command is %zu.\n", + stringified_msg, expected_len, actual_len); + return -EMSGSIZE; + } + return 0; +} + +/** + * es58x_check_msg_len() - Check the size of a received message. + * @dev: Device, used to print error messages. + * @msg: Received message, must not be a pointer. + * @actual_len: Length of the message as advertised in the command header. + * + * Must be a macro in order to accept the different types of messages + * as an input. Can be use with any of the messages which have a fixed + * length. Check for an exact match of the size. + * + * Return: zero on success, -EMSGSIZE if @actual_len differs from the + * expected length. + */ +#define es58x_check_msg_len(dev, msg, actual_len) \ + __es58x_check_msg_len(dev, __stringify(msg), \ + actual_len, sizeof(msg)) + +static inline int __es58x_check_msg_max_len(const struct device *dev, + const char *stringified_msg, + size_t actual_len, + size_t expected_len) +{ + if (actual_len > expected_len) { + dev_err(dev, + "Maximum length for %s is %zu but received command is %zu.\n", + stringified_msg, expected_len, actual_len); + return -EOVERFLOW; + } + return 0; +} + +/** + * es58x_check_msg_max_len() - Check the maximum size of a received message. + * @dev: Device, used to print error messages. + * @msg: Received message, must not be a pointer. + * @actual_len: Length of the message as advertised in the command header. + * + * Must be a macro in order to accept the different types of messages + * as an input. To be used with the messages of variable sizes. Only + * check that the message is not bigger than the maximum expected + * size. + * + * Return: zero on success, -EOVERFLOW if @actual_len is greater than + * the expected length. + */ +#define es58x_check_msg_max_len(dev, msg, actual_len) \ + __es58x_check_msg_max_len(dev, __stringify(msg), \ + actual_len, sizeof(msg)) + +static inline int __es58x_msg_num_element(const struct device *dev, + const char *stringified_msg, + size_t actual_len, size_t msg_len, + size_t elem_len) +{ + size_t actual_num_elem = actual_len / elem_len; + size_t expected_num_elem = msg_len / elem_len; + + if (actual_num_elem == 0) { + dev_err(dev, + "Minimum length for %s is %zu but received command is %zu.\n", + stringified_msg, elem_len, actual_len); + return -EMSGSIZE; + } else if ((actual_len % elem_len) != 0) { + dev_err(dev, + "Received command length: %zu is not a multiple of %s[0]: %zu\n", + actual_len, stringified_msg, elem_len); + return -EMSGSIZE; + } else if (actual_num_elem > expected_num_elem) { + dev_err(dev, + "Array %s is supposed to have %zu elements each of size %zu...\n", + stringified_msg, expected_num_elem, elem_len); + dev_err(dev, + "... But received command has %zu elements (total length %zu).\n", + actual_num_elem, actual_len); + return -EOVERFLOW; + } + return actual_num_elem; +} + +/** + * es58x_msg_num_element() - Check size and give the number of + * elements in a message of array type. + * @dev: Device, used to print error messages. + * @msg: Received message, must be an array. + * @actual_len: Length of the message as advertised in the command + * header. + * + * Must be a macro in order to accept the different types of messages + * as an input. To be used on message of array type. Array's element + * has to be of fixed size (else use es58x_check_msg_max_len()). Check + * that the total length is an exact multiple of the length of a + * single element. + * + * Return: number of elements in the array on success, -EOVERFLOW if + * @actual_len is greater than the expected length, -EMSGSIZE if + * @actual_len is not a multiple of a single element. + */ +#define es58x_msg_num_element(dev, msg, actual_len) \ +({ \ + size_t __elem_len = sizeof((msg)[0]) + __must_be_array(msg); \ + __es58x_msg_num_element(dev, __stringify(msg), actual_len, \ + sizeof(msg), __elem_len); \ +}) + +/** + * es58x_priv() - Get the priv member and cast it to struct es58x_priv. + * @netdev: CAN network device. + * + * Return: ES58X device. + */ +static inline struct es58x_priv *es58x_priv(struct net_device *netdev) +{ + return (struct es58x_priv *)netdev_priv(netdev); +} + +/** + * ES58X_SIZEOF_URB_CMD() - Calculate the maximum length of an urb + * command for a given message field name. + * @es58x_urb_cmd_type: type (either "struct es581_4_urb_cmd" or + * "struct es58x_fd_urb_cmd"). + * @msg_field: name of the message field. + * + * Must be a macro in order to accept the different command types as + * an input. + * + * Return: length of the urb command. + */ +#define ES58X_SIZEOF_URB_CMD(es58x_urb_cmd_type, msg_field) \ + (offsetof(es58x_urb_cmd_type, raw_msg) \ + + sizeof_field(es58x_urb_cmd_type, msg_field) \ + + sizeof_field(es58x_urb_cmd_type, \ + reserved_for_crc16_do_not_use)) + +/** + * es58x_get_urb_cmd_len() - Calculate the actual length of an urb + * command for a given message length. + * @es58x_dev: ES58X device. + * @msg_len: Length of the message. + * + * Add the header and CRC lengths to the message length. + * + * Return: length of the urb command. + */ +static inline size_t es58x_get_urb_cmd_len(struct es58x_device *es58x_dev, + u16 msg_len) +{ + return es58x_dev->param->urb_cmd_header_len + msg_len + sizeof(u16); +} + +/** + * es58x_get_netdev() - Get the network device. + * @es58x_dev: ES58X device. + * @channel_no: The channel number as advertised in the urb command. + * @channel_idx_offset: Some of the ES58x starts channel numbering + * from 0 (ES58X FD), others from 1 (ES581.4). + * @netdev: CAN network device. + * + * Do a sanity check on the index provided by the device. + * + * Return: zero on success, -ECHRNG if the received channel number is + * out of range and -ENODEV if the network device is not yet + * configured. + */ +static inline int es58x_get_netdev(struct es58x_device *es58x_dev, + int channel_no, int channel_idx_offset, + struct net_device **netdev) +{ + int channel_idx = channel_no - channel_idx_offset; + + *netdev = NULL; + if (channel_idx < 0 || channel_idx >= es58x_dev->num_can_ch) + return -ECHRNG; + + *netdev = es58x_dev->netdev[channel_idx]; + if (!*netdev || !netif_device_present(*netdev)) + return -ENODEV; + + return 0; +} + +/** + * es58x_get_raw_can_id() - Get the CAN ID. + * @cf: CAN frame. + * + * Mask the CAN ID in order to only keep the significant bits. + * + * Return: the raw value of the CAN ID. + */ +static inline int es58x_get_raw_can_id(const struct can_frame *cf) +{ + if (cf->can_id & CAN_EFF_FLAG) + return cf->can_id & CAN_EFF_MASK; + else + return cf->can_id & CAN_SFF_MASK; +} + +/** + * es58x_get_flags() - Get the CAN flags. + * @skb: socket buffer of a CAN message. + * + * Return: the CAN flag as an enum es58x_flag. + */ +static inline enum es58x_flag es58x_get_flags(const struct sk_buff *skb) +{ + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + enum es58x_flag es58x_flags = 0; + + if (cf->can_id & CAN_EFF_FLAG) + es58x_flags |= ES58X_FLAG_EFF; + + if (can_is_canfd_skb(skb)) { + es58x_flags |= ES58X_FLAG_FD_DATA; + if (cf->flags & CANFD_BRS) + es58x_flags |= ES58X_FLAG_FD_BRS; + if (cf->flags & CANFD_ESI) + es58x_flags |= ES58X_FLAG_FD_ESI; + } else if (cf->can_id & CAN_RTR_FLAG) + /* Remote frames are only defined in Classical CAN frames */ + es58x_flags |= ES58X_FLAG_RTR; + + return es58x_flags; +} + +int es58x_can_get_echo_skb(struct net_device *netdev, u32 packet_idx, + u64 *tstamps, unsigned int pkts); +int es58x_tx_ack_msg(struct net_device *netdev, u16 tx_free_entries, + enum es58x_ret_u32 rx_cmd_ret_u32); +int es58x_rx_can_msg(struct net_device *netdev, u64 timestamp, const u8 *data, + canid_t can_id, enum es58x_flag es58x_flags, u8 dlc); +int es58x_rx_err_msg(struct net_device *netdev, enum es58x_err error, + enum es58x_event event, u64 timestamp); +void es58x_rx_timestamp(struct es58x_device *es58x_dev, u64 timestamp); +int es58x_rx_cmd_ret_u8(struct device *dev, enum es58x_ret_type cmd_ret_type, + enum es58x_ret_u8 rx_cmd_ret_u8); +int es58x_rx_cmd_ret_u32(struct net_device *netdev, + enum es58x_ret_type cmd_ret_type, + enum es58x_ret_u32 rx_cmd_ret_u32); +int es58x_send_msg(struct es58x_device *es58x_dev, u8 cmd_type, u8 cmd_id, + const void *msg, u16 cmd_len, int channel_idx); + +extern const struct es58x_parameters es581_4_param; +extern const struct es58x_operators es581_4_ops; + +extern const struct es58x_parameters es58x_fd_param; +extern const struct es58x_operators es58x_fd_ops; + +#endif /* __ES58X_COMMON_H__ */ diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c new file mode 100644 index 000000000000..1a2779d383a4 --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c @@ -0,0 +1,562 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. + * + * File es58x_fd.c: Adds support to ETAS ES582.1 and ES584.1 (naming + * convention: we use the term "ES58X FD" when referring to those two + * variants together). + * + * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved. + * Copyright (c) 2020 ETAS K.K.. All rights reserved. + * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr> + */ + +#include <linux/kernel.h> +#include <asm/unaligned.h> + +#include "es58x_core.h" +#include "es58x_fd.h" + +/** + * es58x_fd_sizeof_rx_tx_msg() - Calculate the actual length of the + * structure of a rx or tx message. + * @msg: message of variable length, must have a dlc and a len fields. + * + * Even if RTR frames have actually no payload, the ES58X devices + * still expect it. Must be a macro in order to accept several types + * (struct es58x_fd_tx_can_msg and struct es58x_fd_rx_can_msg) as an + * input. + * + * Return: length of the message. + */ +#define es58x_fd_sizeof_rx_tx_msg(msg) \ +({ \ + typeof(msg) __msg = (msg); \ + size_t __msg_len; \ + \ + if (__msg.flags & ES58X_FLAG_FD_DATA) \ + __msg_len = canfd_sanitize_len(__msg.len); \ + else \ + __msg_len = can_cc_dlc2len(__msg.dlc); \ + \ + offsetof(typeof(__msg), data[__msg_len]); \ +}) + +static enum es58x_fd_cmd_type es58x_fd_cmd_type(struct net_device *netdev) +{ + u32 ctrlmode = es58x_priv(netdev)->can.ctrlmode; + + if (ctrlmode & (CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO)) + return ES58X_FD_CMD_TYPE_CANFD; + else + return ES58X_FD_CMD_TYPE_CAN; +} + +static u16 es58x_fd_get_msg_len(const union es58x_urb_cmd *urb_cmd) +{ + return get_unaligned_le16(&urb_cmd->es58x_fd_urb_cmd.msg_len); +} + +static int es58x_fd_echo_msg(struct net_device *netdev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + struct es58x_priv *priv = es58x_priv(netdev); + const struct es58x_fd_echo_msg *echo_msg; + struct es58x_device *es58x_dev = priv->es58x_dev; + u64 *tstamps = es58x_dev->timestamps; + u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + int i, num_element; + u32 rcv_packet_idx; + + const u32 mask = GENMASK(31, sizeof(echo_msg->packet_idx) * 8); + + num_element = es58x_msg_num_element(es58x_dev->dev, + es58x_fd_urb_cmd->echo_msg, + msg_len); + if (num_element < 0) + return num_element; + echo_msg = es58x_fd_urb_cmd->echo_msg; + + rcv_packet_idx = (priv->tx_tail & mask) | echo_msg[0].packet_idx; + for (i = 0; i < num_element; i++) { + if ((u8)rcv_packet_idx != echo_msg[i].packet_idx) { + netdev_err(netdev, "Packet idx jumped from %u to %u\n", + (u8)rcv_packet_idx - 1, + echo_msg[i].packet_idx); + return -EBADMSG; + } + + tstamps[i] = get_unaligned_le64(&echo_msg[i].timestamp); + rcv_packet_idx++; + } + + return es58x_can_get_echo_skb(netdev, priv->tx_tail, tstamps, num_element); +} + +static int es58x_fd_rx_can_msg(struct net_device *netdev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + const u8 *rx_can_msg_buf = es58x_fd_urb_cmd->rx_can_msg_buf; + u16 rx_can_msg_buf_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + int pkts, ret; + + ret = es58x_check_msg_max_len(es58x_dev->dev, + es58x_fd_urb_cmd->rx_can_msg_buf, + rx_can_msg_buf_len); + if (ret) + return ret; + + for (pkts = 0; rx_can_msg_buf_len > 0; pkts++) { + const struct es58x_fd_rx_can_msg *rx_can_msg = + (const struct es58x_fd_rx_can_msg *)rx_can_msg_buf; + bool is_can_fd = !!(rx_can_msg->flags & ES58X_FLAG_FD_DATA); + /* rx_can_msg_len is the length of the rx_can_msg + * buffer. Not to be confused with rx_can_msg->len + * which is the length of the CAN payload + * rx_can_msg->data. + */ + u16 rx_can_msg_len = es58x_fd_sizeof_rx_tx_msg(*rx_can_msg); + + if (rx_can_msg_len > rx_can_msg_buf_len) { + netdev_err(netdev, + "%s: Expected a rx_can_msg of size %d but only %d bytes are left in rx_can_msg_buf\n", + __func__, + rx_can_msg_len, rx_can_msg_buf_len); + return -EMSGSIZE; + } + if (rx_can_msg->len > CANFD_MAX_DLEN) { + netdev_err(netdev, + "%s: Data length is %d but maximum should be %d\n", + __func__, rx_can_msg->len, CANFD_MAX_DLEN); + return -EMSGSIZE; + } + + if (netif_running(netdev)) { + u64 tstamp = get_unaligned_le64(&rx_can_msg->timestamp); + canid_t can_id = get_unaligned_le32(&rx_can_msg->can_id); + u8 dlc; + + if (is_can_fd) + dlc = can_fd_len2dlc(rx_can_msg->len); + else + dlc = rx_can_msg->dlc; + + ret = es58x_rx_can_msg(netdev, tstamp, rx_can_msg->data, + can_id, rx_can_msg->flags, dlc); + if (ret) + break; + } + + rx_can_msg_buf_len -= rx_can_msg_len; + rx_can_msg_buf += rx_can_msg_len; + } + + if (!netif_running(netdev)) { + if (net_ratelimit()) + netdev_info(netdev, + "%s: %s is down, dropping %d rx packets\n", + __func__, netdev->name, pkts); + netdev->stats.rx_dropped += pkts; + } + + return ret; +} + +static int es58x_fd_rx_event_msg(struct net_device *netdev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + const struct es58x_fd_rx_event_msg *rx_event_msg; + int ret; + + ret = es58x_check_msg_len(es58x_dev->dev, *rx_event_msg, msg_len); + if (ret) + return ret; + + rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg; + + return es58x_rx_err_msg(netdev, rx_event_msg->error_code, + rx_event_msg->event_code, + get_unaligned_le64(&rx_event_msg->timestamp)); +} + +static int es58x_fd_rx_cmd_ret_u32(struct net_device *netdev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd, + enum es58x_ret_type cmd_ret_type) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + int ret; + + ret = es58x_check_msg_len(es58x_dev->dev, + es58x_fd_urb_cmd->rx_cmd_ret_le32, msg_len); + if (ret) + return ret; + + return es58x_rx_cmd_ret_u32(netdev, cmd_ret_type, + get_unaligned_le32(&es58x_fd_urb_cmd->rx_cmd_ret_le32)); +} + +static int es58x_fd_tx_ack_msg(struct net_device *netdev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + const struct es58x_fd_tx_ack_msg *tx_ack_msg; + u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + int ret; + + tx_ack_msg = &es58x_fd_urb_cmd->tx_ack_msg; + ret = es58x_check_msg_len(es58x_dev->dev, *tx_ack_msg, msg_len); + if (ret) + return ret; + + return es58x_tx_ack_msg(netdev, + get_unaligned_le16(&tx_ack_msg->tx_free_entries), + get_unaligned_le32(&tx_ack_msg->rx_cmd_ret_le32)); +} + +static int es58x_fd_can_cmd_id(struct es58x_device *es58x_dev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + struct net_device *netdev; + int ret; + + ret = es58x_get_netdev(es58x_dev, es58x_fd_urb_cmd->channel_idx, + ES58X_FD_CHANNEL_IDX_OFFSET, &netdev); + if (ret) + return ret; + + switch ((enum es58x_fd_can_cmd_id)es58x_fd_urb_cmd->cmd_id) { + case ES58X_FD_CAN_CMD_ID_ENABLE_CHANNEL: + return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd, + ES58X_RET_TYPE_ENABLE_CHANNEL); + + case ES58X_FD_CAN_CMD_ID_DISABLE_CHANNEL: + return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd, + ES58X_RET_TYPE_DISABLE_CHANNEL); + + case ES58X_FD_CAN_CMD_ID_TX_MSG: + return es58x_fd_tx_ack_msg(netdev, es58x_fd_urb_cmd); + + case ES58X_FD_CAN_CMD_ID_ECHO_MSG: + return es58x_fd_echo_msg(netdev, es58x_fd_urb_cmd); + + case ES58X_FD_CAN_CMD_ID_RX_MSG: + return es58x_fd_rx_can_msg(netdev, es58x_fd_urb_cmd); + + case ES58X_FD_CAN_CMD_ID_RESET_RX: + return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd, + ES58X_RET_TYPE_RESET_RX); + + case ES58X_FD_CAN_CMD_ID_RESET_TX: + return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd, + ES58X_RET_TYPE_RESET_TX); + + case ES58X_FD_CAN_CMD_ID_ERROR_OR_EVENT_MSG: + return es58x_fd_rx_event_msg(netdev, es58x_fd_urb_cmd); + + default: + return -EBADRQC; + } +} + +static int es58x_fd_device_cmd_id(struct es58x_device *es58x_dev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + int ret; + + switch ((enum es58x_fd_dev_cmd_id)es58x_fd_urb_cmd->cmd_id) { + case ES58X_FD_DEV_CMD_ID_TIMESTAMP: + ret = es58x_check_msg_len(es58x_dev->dev, + es58x_fd_urb_cmd->timestamp, msg_len); + if (ret) + return ret; + es58x_rx_timestamp(es58x_dev, + get_unaligned_le64(&es58x_fd_urb_cmd->timestamp)); + return 0; + + default: + return -EBADRQC; + } +} + +static int es58x_fd_handle_urb_cmd(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd) +{ + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd; + int ret; + + es58x_fd_urb_cmd = &urb_cmd->es58x_fd_urb_cmd; + + switch ((enum es58x_fd_cmd_type)es58x_fd_urb_cmd->cmd_type) { + case ES58X_FD_CMD_TYPE_CAN: + case ES58X_FD_CMD_TYPE_CANFD: + ret = es58x_fd_can_cmd_id(es58x_dev, es58x_fd_urb_cmd); + break; + + case ES58X_FD_CMD_TYPE_DEVICE: + ret = es58x_fd_device_cmd_id(es58x_dev, es58x_fd_urb_cmd); + break; + + default: + ret = -EBADRQC; + break; + } + + if (ret == -EBADRQC) + dev_err(es58x_dev->dev, + "%s: Unknown command type (0x%02X) and command ID (0x%02X) combination\n", + __func__, es58x_fd_urb_cmd->cmd_type, + es58x_fd_urb_cmd->cmd_id); + + return ret; +} + +static void es58x_fd_fill_urb_header(union es58x_urb_cmd *urb_cmd, u8 cmd_type, + u8 cmd_id, u8 channel_idx, u16 msg_len) +{ + struct es58x_fd_urb_cmd *es58x_fd_urb_cmd = &urb_cmd->es58x_fd_urb_cmd; + + es58x_fd_urb_cmd->SOF = cpu_to_le16(es58x_fd_param.tx_start_of_frame); + es58x_fd_urb_cmd->cmd_type = cmd_type; + es58x_fd_urb_cmd->cmd_id = cmd_id; + es58x_fd_urb_cmd->channel_idx = channel_idx; + es58x_fd_urb_cmd->msg_len = cpu_to_le16(msg_len); +} + +static int es58x_fd_tx_can_msg(struct es58x_priv *priv, + const struct sk_buff *skb) +{ + struct es58x_device *es58x_dev = priv->es58x_dev; + union es58x_urb_cmd *urb_cmd = priv->tx_urb->transfer_buffer; + struct es58x_fd_urb_cmd *es58x_fd_urb_cmd = &urb_cmd->es58x_fd_urb_cmd; + struct can_frame *cf = (struct can_frame *)skb->data; + struct es58x_fd_tx_can_msg *tx_can_msg; + bool is_fd = can_is_canfd_skb(skb); + u16 msg_len; + int ret; + + if (priv->tx_can_msg_cnt == 0) { + msg_len = 0; + es58x_fd_fill_urb_header(urb_cmd, + is_fd ? ES58X_FD_CMD_TYPE_CANFD + : ES58X_FD_CMD_TYPE_CAN, + ES58X_FD_CAN_CMD_ID_TX_MSG_NO_ACK, + priv->channel_idx, msg_len); + } else { + msg_len = es58x_fd_get_msg_len(urb_cmd); + } + + ret = es58x_check_msg_max_len(es58x_dev->dev, + es58x_fd_urb_cmd->tx_can_msg_buf, + msg_len + sizeof(*tx_can_msg)); + if (ret) + return ret; + + /* Fill message contents. */ + tx_can_msg = (struct es58x_fd_tx_can_msg *) + &es58x_fd_urb_cmd->tx_can_msg_buf[msg_len]; + tx_can_msg->packet_idx = (u8)priv->tx_head; + put_unaligned_le32(es58x_get_raw_can_id(cf), &tx_can_msg->can_id); + tx_can_msg->flags = (u8)es58x_get_flags(skb); + if (is_fd) + tx_can_msg->len = cf->len; + else + tx_can_msg->dlc = can_get_cc_dlc(cf, priv->can.ctrlmode); + memcpy(tx_can_msg->data, cf->data, cf->len); + + /* Calculate new sizes */ + msg_len += es58x_fd_sizeof_rx_tx_msg(*tx_can_msg); + priv->tx_urb->transfer_buffer_length = es58x_get_urb_cmd_len(es58x_dev, + msg_len); + put_unaligned_le16(msg_len, &es58x_fd_urb_cmd->msg_len); + + return 0; +} + +static void es58x_fd_convert_bittiming(struct es58x_fd_bittiming *es58x_fd_bt, + struct can_bittiming *bt) +{ + /* The actual value set in the hardware registers is one less + * than the functional value. + */ + const int offset = 1; + + es58x_fd_bt->bitrate = cpu_to_le32(bt->bitrate); + es58x_fd_bt->tseg1 = + cpu_to_le16(bt->prop_seg + bt->phase_seg1 - offset); + es58x_fd_bt->tseg2 = cpu_to_le16(bt->phase_seg2 - offset); + es58x_fd_bt->brp = cpu_to_le16(bt->brp - offset); + es58x_fd_bt->sjw = cpu_to_le16(bt->sjw - offset); +} + +static int es58x_fd_enable_channel(struct es58x_priv *priv) +{ + struct es58x_device *es58x_dev = priv->es58x_dev; + struct net_device *netdev = es58x_dev->netdev[priv->channel_idx]; + struct es58x_fd_tx_conf_msg tx_conf_msg = { 0 }; + u32 ctrlmode; + size_t conf_len = 0; + + es58x_fd_convert_bittiming(&tx_conf_msg.nominal_bittiming, + &priv->can.bittiming); + ctrlmode = priv->can.ctrlmode; + + if (ctrlmode & CAN_CTRLMODE_3_SAMPLES) + tx_conf_msg.samples_per_bit = ES58X_SAMPLES_PER_BIT_THREE; + else + tx_conf_msg.samples_per_bit = ES58X_SAMPLES_PER_BIT_ONE; + tx_conf_msg.sync_edge = ES58X_SYNC_EDGE_SINGLE; + tx_conf_msg.physical_layer = ES58X_PHYSICAL_LAYER_HIGH_SPEED; + tx_conf_msg.echo_mode = ES58X_ECHO_ON; + if (ctrlmode & CAN_CTRLMODE_LISTENONLY) + tx_conf_msg.ctrlmode |= ES58X_FD_CTRLMODE_PASSIVE; + else + tx_conf_msg.ctrlmode |= ES58X_FD_CTRLMODE_ACTIVE; + + if (ctrlmode & CAN_CTRLMODE_FD_NON_ISO) { + tx_conf_msg.ctrlmode |= ES58X_FD_CTRLMODE_FD_NON_ISO; + tx_conf_msg.canfd_enabled = 1; + } else if (ctrlmode & CAN_CTRLMODE_FD) { + tx_conf_msg.ctrlmode |= ES58X_FD_CTRLMODE_FD; + tx_conf_msg.canfd_enabled = 1; + } + + if (tx_conf_msg.canfd_enabled) { + es58x_fd_convert_bittiming(&tx_conf_msg.data_bittiming, + &priv->can.data_bittiming); + + if (priv->can.tdc.tdco) { + tx_conf_msg.tdc_enabled = 1; + tx_conf_msg.tdco = cpu_to_le16(priv->can.tdc.tdco); + tx_conf_msg.tdcf = cpu_to_le16(priv->can.tdc.tdcf); + } + + conf_len = ES58X_FD_CANFD_CONF_LEN; + } else { + conf_len = ES58X_FD_CAN_CONF_LEN; + } + + return es58x_send_msg(es58x_dev, es58x_fd_cmd_type(netdev), + ES58X_FD_CAN_CMD_ID_ENABLE_CHANNEL, + &tx_conf_msg, conf_len, priv->channel_idx); +} + +static int es58x_fd_disable_channel(struct es58x_priv *priv) +{ + /* The type (ES58X_FD_CMD_TYPE_CAN or ES58X_FD_CMD_TYPE_CANFD) does + * not matter here. + */ + return es58x_send_msg(priv->es58x_dev, ES58X_FD_CMD_TYPE_CAN, + ES58X_FD_CAN_CMD_ID_DISABLE_CHANNEL, + ES58X_EMPTY_MSG, 0, priv->channel_idx); +} + +static int es58x_fd_get_timestamp(struct es58x_device *es58x_dev) +{ + return es58x_send_msg(es58x_dev, ES58X_FD_CMD_TYPE_DEVICE, + ES58X_FD_DEV_CMD_ID_TIMESTAMP, ES58X_EMPTY_MSG, + 0, ES58X_CHANNEL_IDX_NA); +} + +/* Nominal bittiming constants for ES582.1 and ES584.1 as specified in + * the microcontroller datasheet: "SAM E701/S70/V70/V71 Family" + * section 49.6.8 "MCAN Nominal Bit Timing and Prescaler Register" + * from Microchip. + * + * The values from the specification are the hardware register + * values. To convert them to the functional values, all ranges were + * incremented by 1 (e.g. range [0..n-1] changed to [1..n]). + */ +static const struct can_bittiming_const es58x_fd_nom_bittiming_const = { + .name = "ES582.1/ES584.1", + .tseg1_min = 2, + .tseg1_max = 256, + .tseg2_min = 2, + .tseg2_max = 128, + .sjw_max = 128, + .brp_min = 1, + .brp_max = 512, + .brp_inc = 1 +}; + +/* Data bittiming constants for ES582.1 and ES584.1 as specified in + * the microcontroller datasheet: "SAM E701/S70/V70/V71 Family" + * section 49.6.4 "MCAN Data Bit Timing and Prescaler Register" from + * Microchip. + */ +static const struct can_bittiming_const es58x_fd_data_bittiming_const = { + .name = "ES582.1/ES584.1", + .tseg1_min = 2, + .tseg1_max = 32, + .tseg2_min = 1, + .tseg2_max = 16, + .sjw_max = 8, + .brp_min = 1, + .brp_max = 32, + .brp_inc = 1 +}; + +/* Transmission Delay Compensation constants for ES582.1 and ES584.1 + * as specified in the microcontroller datasheet: "SAM + * E701/S70/V70/V71 Family" section 49.6.15 "MCAN Transmitter Delay + * Compensation Register" from Microchip. + */ +static const struct can_tdc_const es58x_tdc_const = { + .tdcv_max = 0, /* Manual mode not supported. */ + .tdco_max = 127, + .tdcf_max = 127 +}; + +const struct es58x_parameters es58x_fd_param = { + .bittiming_const = &es58x_fd_nom_bittiming_const, + .data_bittiming_const = &es58x_fd_data_bittiming_const, + .tdc_const = &es58x_tdc_const, + /* The devices use NXP TJA1044G transievers which guarantee + * the timing for data rates up to 5 Mbps. Bitrates up to 8 + * Mbps work in an optimal environment but are not recommended + * for production environment. + */ + .bitrate_max = 8 * CAN_MBPS, + .clock = {.freq = 80 * CAN_MHZ}, + .ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO | + CAN_CTRLMODE_CC_LEN8_DLC, + .tx_start_of_frame = 0xCEFA, /* FACE in little endian */ + .rx_start_of_frame = 0xFECA, /* CAFE in little endian */ + .tx_urb_cmd_max_len = ES58X_FD_TX_URB_CMD_MAX_LEN, + .rx_urb_cmd_max_len = ES58X_FD_RX_URB_CMD_MAX_LEN, + /* Size of internal device TX queue is 500. + * + * However, when reaching value around 278, the device's busy + * LED turns on and thus maximum value of 500 is never reached + * in practice. Also, when this value is too high, some error + * on the echo_msg were witnessed when the device is + * recovering from bus off. + * + * For above reasons, a value that would prevent the device + * from becoming busy was chosen. In practice, BQL would + * prevent the value from even getting closer to below + * maximum, so no impact on performance was measured. + */ + .fifo_mask = 255, /* echo_skb_max = 256 */ + .dql_min_limit = CAN_FRAME_LEN_MAX * 15, /* Empirical value. */ + .tx_bulk_max = ES58X_FD_TX_BULK_MAX, + .urb_cmd_header_len = ES58X_FD_URB_CMD_HEADER_LEN, + .rx_urb_max = ES58X_RX_URBS_MAX, + .tx_urb_max = ES58X_TX_URBS_MAX +}; + +const struct es58x_operators es58x_fd_ops = { + .get_msg_len = es58x_fd_get_msg_len, + .handle_urb_cmd = es58x_fd_handle_urb_cmd, + .fill_urb_header = es58x_fd_fill_urb_header, + .tx_can_msg = es58x_fd_tx_can_msg, + .enable_channel = es58x_fd_enable_channel, + .disable_channel = es58x_fd_disable_channel, + .reset_device = NULL, /* Not implemented in the device firmware. */ + .get_timestamp = es58x_fd_get_timestamp +}; diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.h b/drivers/net/can/usb/etas_es58x/es58x_fd.h new file mode 100644 index 000000000000..ee18a87e40c0 --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es58x_fd.h @@ -0,0 +1,243 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. + * + * File es58x_fd.h: Definitions and declarations specific to ETAS + * ES582.1 and ES584.1 (naming convention: we use the term "ES58X FD" + * when referring to those two variants together). + * + * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved. + * Copyright (c) 2020 ETAS K.K.. All rights reserved. + * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr> + */ + +#ifndef __ES58X_FD_H__ +#define __ES58X_FD_H__ + +#include <linux/types.h> + +#define ES582_1_NUM_CAN_CH 2 +#define ES584_1_NUM_CAN_CH 1 +#define ES58X_FD_NUM_CAN_CH 2 +#define ES58X_FD_CHANNEL_IDX_OFFSET 0 + +#define ES58X_FD_TX_BULK_MAX 100 +#define ES58X_FD_RX_BULK_MAX 100 +#define ES58X_FD_ECHO_BULK_MAX 100 + +enum es58x_fd_cmd_type { + ES58X_FD_CMD_TYPE_CAN = 0x03, + ES58X_FD_CMD_TYPE_CANFD = 0x04, + ES58X_FD_CMD_TYPE_DEVICE = 0xFF +}; + +/* Command IDs for ES58X_FD_CMD_TYPE_{CAN,CANFD}. */ +enum es58x_fd_can_cmd_id { + ES58X_FD_CAN_CMD_ID_ENABLE_CHANNEL = 0x01, + ES58X_FD_CAN_CMD_ID_DISABLE_CHANNEL = 0x02, + ES58X_FD_CAN_CMD_ID_TX_MSG = 0x05, + ES58X_FD_CAN_CMD_ID_ECHO_MSG = 0x07, + ES58X_FD_CAN_CMD_ID_RX_MSG = 0x10, + ES58X_FD_CAN_CMD_ID_ERROR_OR_EVENT_MSG = 0x11, + ES58X_FD_CAN_CMD_ID_RESET_RX = 0x20, + ES58X_FD_CAN_CMD_ID_RESET_TX = 0x21, + ES58X_FD_CAN_CMD_ID_TX_MSG_NO_ACK = 0x55 +}; + +/* Command IDs for ES58X_FD_CMD_TYPE_DEVICE. */ +enum es58x_fd_dev_cmd_id { + ES58X_FD_DEV_CMD_ID_GETTIMETICKS = 0x01, + ES58X_FD_DEV_CMD_ID_TIMESTAMP = 0x02 +}; + +/** + * enum es58x_fd_ctrlmode - Controller mode. + * @ES58X_FD_CTRLMODE_ACTIVE: send and receive messages. + * @ES58X_FD_CTRLMODE_PASSIVE: only receive messages (monitor). Do not + * send anything, not even the acknowledgment bit. + * @ES58X_FD_CTRLMODE_FD: CAN FD according to ISO11898-1. + * @ES58X_FD_CTRLMODE_FD_NON_ISO: follow Bosch CAN FD Specification + * V1.0 + * @ES58X_FD_CTRLMODE_DISABLE_PROTOCOL_EXCEPTION_HANDLING: How to + * behave when CAN FD reserved bit is monitored as + * dominant. (c.f. ISO 11898-1:2015, section 10.4.2.4 "Control + * field", paragraph "r0 bit"). 0 (not disable = enable): send + * error frame. 1 (disable): goes into bus integration mode + * (c.f. below). + * @ES58X_FD_CTRLMODE_EDGE_FILTER_DURING_BUS_INTEGRATION: 0: Edge + * filtering is disabled. 1: Edge filtering is enabled. Two + * consecutive dominant bits required to detect an edge for hard + * synchronization. + */ +enum es58x_fd_ctrlmode { + ES58X_FD_CTRLMODE_ACTIVE = 0, + ES58X_FD_CTRLMODE_PASSIVE = BIT(0), + ES58X_FD_CTRLMODE_FD = BIT(4), + ES58X_FD_CTRLMODE_FD_NON_ISO = BIT(5), + ES58X_FD_CTRLMODE_DISABLE_PROTOCOL_EXCEPTION_HANDLING = BIT(6), + ES58X_FD_CTRLMODE_EDGE_FILTER_DURING_BUS_INTEGRATION = BIT(7) +}; + +struct es58x_fd_bittiming { + __le32 bitrate; + __le16 tseg1; /* range: [tseg1_min-1..tseg1_max-1] */ + __le16 tseg2; /* range: [tseg2_min-1..tseg2_max-1] */ + __le16 brp; /* range: [brp_min-1..brp_max-1] */ + __le16 sjw; /* range: [0..sjw_max-1] */ +} __packed; + +/** + * struct es58x_fd_tx_conf_msg - Channel configuration. + * @nominal_bittiming: Nominal bittiming. + * @samples_per_bit: type enum es58x_samples_per_bit. + * @sync_edge: type enum es58x_sync_edge. + * @physical_layer: type enum es58x_physical_layer. + * @echo_mode: type enum es58x_echo_mode. + * @ctrlmode: type enum es58x_fd_ctrlmode. + * @canfd_enabled: boolean (0: Classical CAN, 1: CAN and/or CANFD). + * @data_bittiming: Bittiming for flexible data-rate transmission. + * @tdc_enabled: Transmitter Delay Compensation switch (0: disabled, + * 1: enabled). On very high bitrates, the delay between when the + * bit is sent and received on the CANTX and CANRX pins of the + * transceiver start to be significant enough for errors to occur + * and thus need to be compensated. + * @tdco: Transmitter Delay Compensation Offset. Offset value, in time + * quanta, defining the delay between the start of the bit + * reception on the CANRX pin of the transceiver and the SSP + * (Secondary Sample Point). Valid values: 0 to 127. + * @tdcf: Transmitter Delay Compensation Filter window. Defines the + * minimum value for the SSP position, in time quanta. The + * feature is enabled when TDCF is configured to a value greater + * than TDCO. Valid values: 0 to 127. + * + * Please refer to the microcontroller datasheet: "SAM + * E701/S70/V70/V71 Family" section 49 "Controller Area Network + * (MCAN)" for additional information. + */ +struct es58x_fd_tx_conf_msg { + struct es58x_fd_bittiming nominal_bittiming; + u8 samples_per_bit; + u8 sync_edge; + u8 physical_layer; + u8 echo_mode; + u8 ctrlmode; + u8 canfd_enabled; + struct es58x_fd_bittiming data_bittiming; + u8 tdc_enabled; + __le16 tdco; + __le16 tdcf; +} __packed; + +#define ES58X_FD_CAN_CONF_LEN \ + (offsetof(struct es58x_fd_tx_conf_msg, canfd_enabled)) +#define ES58X_FD_CANFD_CONF_LEN (sizeof(struct es58x_fd_tx_conf_msg)) + +struct es58x_fd_tx_can_msg { + u8 packet_idx; + __le32 can_id; + u8 flags; + union { + u8 dlc; /* Only if cmd_id is ES58X_FD_CMD_TYPE_CAN */ + u8 len; /* Only if cmd_id is ES58X_FD_CMD_TYPE_CANFD */ + } __packed; + u8 data[CANFD_MAX_DLEN]; +} __packed; + +#define ES58X_FD_CAN_TX_LEN \ + (offsetof(struct es58x_fd_tx_can_msg, data[CAN_MAX_DLEN])) +#define ES58X_FD_CANFD_TX_LEN (sizeof(struct es58x_fd_tx_can_msg)) + +struct es58x_fd_rx_can_msg { + __le64 timestamp; + __le32 can_id; + u8 flags; + union { + u8 dlc; /* Only if cmd_id is ES58X_FD_CMD_TYPE_CAN */ + u8 len; /* Only if cmd_id is ES58X_FD_CMD_TYPE_CANFD */ + } __packed; + u8 data[CANFD_MAX_DLEN]; +} __packed; + +#define ES58X_FD_CAN_RX_LEN \ + (offsetof(struct es58x_fd_rx_can_msg, data[CAN_MAX_DLEN])) +#define ES58X_FD_CANFD_RX_LEN (sizeof(struct es58x_fd_rx_can_msg)) + +struct es58x_fd_echo_msg { + __le64 timestamp; + u8 packet_idx; +} __packed; + +struct es58x_fd_rx_event_msg { + __le64 timestamp; + __le32 can_id; + u8 flags; /* type enum es58x_flag */ + u8 error_type; /* 0: event, 1: error */ + u8 error_code; + u8 event_code; +} __packed; + +struct es58x_fd_tx_ack_msg { + __le32 rx_cmd_ret_le32; /* type enum es58x_cmd_ret_code_u32 */ + __le16 tx_free_entries; /* Number of remaining free entries in the device TX queue */ +} __packed; + +/** + * struct es58x_fd_urb_cmd - Commands received from or sent to the + * ES58X FD device. + * @SOF: Start of Frame. + * @cmd_type: Command Type (type: enum es58x_fd_cmd_type). The CRC + * calculation starts at this position. + * @cmd_id: Command ID (type: enum es58x_fd_cmd_id). + * @channel_idx: Channel index starting at 0. + * @msg_len: Length of the message, excluding CRC (i.e. length of the + * union). + * @tx_conf_msg: Channel configuration. + * @tx_can_msg_buf: Concatenation of Tx messages. Type is "u8[]" + * instead of "struct es58x_fd_tx_msg[]" because the structure + * has a flexible size. + * @rx_can_msg_buf: Concatenation Rx messages. Type is "u8[]" instead + * of "struct es58x_fd_rx_msg[]" because the structure has a + * flexible size. + * @echo_msg: Array of echo messages (e.g. Tx messages being looped + * back). + * @rx_event_msg: Error or event message. + * @tx_ack_msg: Tx acknowledgment message. + * @timestamp: Timestamp reply. + * @rx_cmd_ret_le32: Rx 32 bits return code (type: enum + * es58x_cmd_ret_code_u32). + * @raw_msg: Message raw payload. + * @reserved_for_crc16_do_not_use: The structure ends with a + * CRC16. Because the structures in above union are of variable + * lengths, we can not predict the offset of the CRC in + * advance. Use functions es58x_get_crc() and es58x_set_crc() to + * manipulate it. + */ +struct es58x_fd_urb_cmd { + __le16 SOF; + u8 cmd_type; + u8 cmd_id; + u8 channel_idx; + __le16 msg_len; + + union { + struct es58x_fd_tx_conf_msg tx_conf_msg; + u8 tx_can_msg_buf[ES58X_FD_TX_BULK_MAX * ES58X_FD_CANFD_TX_LEN]; + u8 rx_can_msg_buf[ES58X_FD_RX_BULK_MAX * ES58X_FD_CANFD_RX_LEN]; + struct es58x_fd_echo_msg echo_msg[ES58X_FD_ECHO_BULK_MAX]; + struct es58x_fd_rx_event_msg rx_event_msg; + struct es58x_fd_tx_ack_msg tx_ack_msg; + __le64 timestamp; + __le32 rx_cmd_ret_le32; + u8 raw_msg[0]; + } __packed; + + __le16 reserved_for_crc16_do_not_use; +} __packed; + +#define ES58X_FD_URB_CMD_HEADER_LEN (offsetof(struct es58x_fd_urb_cmd, raw_msg)) +#define ES58X_FD_TX_URB_CMD_MAX_LEN \ + ES58X_SIZEOF_URB_CMD(struct es58x_fd_urb_cmd, tx_can_msg_buf) +#define ES58X_FD_RX_URB_CMD_MAX_LEN \ + ES58X_SIZEOF_URB_CMD(struct es58x_fd_urb_cmd, rx_can_msg_buf) + +#endif /* __ES58X_FD_H__ */ diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index a00dc1904415..5e892bef46b0 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -533,7 +533,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, if (unlikely(rc)) { /* usb send failed */ atomic_dec(&dev->active_tx_urbs); - can_free_echo_skb(netdev, idx); + can_free_echo_skb(netdev, idx, NULL); gs_free_tx_context(txc); usb_unanchor_urb(urb); diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index 4e97da8434ab..90ebcae13409 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -593,7 +593,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, if (unlikely(err)) { spin_lock_irqsave(&priv->tx_contexts_lock, flags); - can_free_echo_skb(netdev, context->echo_index); + can_free_echo_skb(netdev, context->echo_index, NULL); context->echo_index = dev->max_tx_urbs; --priv->active_tx_contexts; netif_wake_queue(netdev); diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index 1f649d178010..029e77dfa773 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -364,7 +364,7 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; xmit_failed: - can_free_echo_skb(priv->netdev, ctx->ndx); + can_free_echo_skb(priv->netdev, ctx->ndx, NULL); mcba_usb_free_ctx(ctx); dev_kfree_skb(skb); stats->tx_dropped++; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index e393e8457d77..1d6f77252f01 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -11,6 +11,7 @@ #include <linux/netdevice.h> #include <linux/usb.h> #include <linux/module.h> +#include <linux/ethtool.h> #include <linux/can.h> #include <linux/can/dev.h> @@ -40,6 +41,7 @@ #define PCAN_USB_CMD_REGISTER 9 #define PCAN_USB_CMD_EXT_VCC 10 #define PCAN_USB_CMD_ERR_FR 11 +#define PCAN_USB_CMD_LED 12 /* PCAN_USB_CMD_SET_BUS number arg */ #define PCAN_USB_BUS_XCVER 2 @@ -248,6 +250,15 @@ static int pcan_usb_set_ext_vcc(struct peak_usb_device *dev, u8 onoff) return pcan_usb_send_cmd(dev, PCAN_USB_CMD_EXT_VCC, PCAN_USB_SET, args); } +static int pcan_usb_set_led(struct peak_usb_device *dev, u8 onoff) +{ + u8 args[PCAN_USB_CMD_ARGS_LEN] = { + [0] = !!onoff, + }; + + return pcan_usb_send_cmd(dev, PCAN_USB_CMD_LED, PCAN_USB_SET, args); +} + /* * set bittiming value to can */ @@ -354,16 +365,11 @@ static int pcan_usb_get_serial(struct peak_usb_device *dev, u32 *serial_number) int err; err = pcan_usb_wait_rsp(dev, PCAN_USB_CMD_SN, PCAN_USB_GET, args); - if (err) { - netdev_err(dev->netdev, "getting serial failure: %d\n", err); - } else if (serial_number) { - __le32 tmp32; - - memcpy(&tmp32, args, 4); - *serial_number = le32_to_cpu(tmp32); - } + if (err) + return err; + *serial_number = le32_to_cpup((__le32 *)args); - return err; + return 0; } /* @@ -377,8 +383,8 @@ static int pcan_usb_get_device_id(struct peak_usb_device *dev, u32 *device_id) err = pcan_usb_wait_rsp(dev, PCAN_USB_CMD_DEVID, PCAN_USB_GET, args); if (err) netdev_err(dev->netdev, "getting device id failure: %d\n", err); - else if (device_id) - *device_id = args[0]; + + *device_id = args[0]; return err; } @@ -388,14 +394,10 @@ static int pcan_usb_get_device_id(struct peak_usb_device *dev, u32 *device_id) */ static int pcan_usb_update_ts(struct pcan_usb_msg_context *mc) { - __le16 tmp16; - - if ((mc->ptr+2) > mc->end) + if ((mc->ptr + 2) > mc->end) return -EINVAL; - memcpy(&tmp16, mc->ptr, 2); - - mc->ts16 = le16_to_cpu(tmp16); + mc->ts16 = get_unaligned_le16(mc->ptr); if (mc->rec_idx > 0) peak_usb_update_ts_now(&mc->pdev->time_ref, mc->ts16); @@ -412,16 +414,13 @@ static int pcan_usb_decode_ts(struct pcan_usb_msg_context *mc, u8 first_packet) { /* only 1st packet supplies a word timestamp */ if (first_packet) { - __le16 tmp16; - if ((mc->ptr + 2) > mc->end) return -EINVAL; - memcpy(&tmp16, mc->ptr, 2); - mc->ptr += 2; - - mc->ts16 = le16_to_cpu(tmp16); + mc->ts16 = get_unaligned_le16(mc->ptr); mc->prev_ts8 = mc->ts16 & 0x00ff; + + mc->ptr += 2; } else { u8 ts8; @@ -711,25 +710,17 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) return -ENOMEM; if (status_len & PCAN_USB_STATUSLEN_EXT_ID) { - __le32 tmp32; - if ((mc->ptr + 4) > mc->end) goto decode_failed; - memcpy(&tmp32, mc->ptr, 4); + cf->can_id = get_unaligned_le32(mc->ptr) >> 3 | CAN_EFF_FLAG; mc->ptr += 4; - - cf->can_id = (le32_to_cpu(tmp32) >> 3) | CAN_EFF_FLAG; } else { - __le16 tmp16; - if ((mc->ptr + 2) > mc->end) goto decode_failed; - memcpy(&tmp16, mc->ptr, 2); + cf->can_id = get_unaligned_le16(mc->ptr) >> 5; mc->ptr += 2; - - cf->can_id = le16_to_cpu(tmp16) >> 5; } can_frame_set_cc_len(cf, rec_len, mc->pdev->dev.can.ctrlmode); @@ -843,15 +834,15 @@ static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb, /* can id */ if (cf->can_id & CAN_EFF_FLAG) { - __le32 tmp32 = cpu_to_le32((cf->can_id & CAN_ERR_MASK) << 3); - *pc |= PCAN_USB_STATUSLEN_EXT_ID; - memcpy(++pc, &tmp32, 4); + pc++; + + put_unaligned_le32((cf->can_id & CAN_ERR_MASK) << 3, pc); pc += 4; } else { - __le16 tmp16 = cpu_to_le16((cf->can_id & CAN_ERR_MASK) << 5); + pc++; - memcpy(++pc, &tmp16, 2); + put_unaligned_le16((cf->can_id & CAN_ERR_MASK) << 5, pc); pc += 2; } @@ -971,6 +962,40 @@ static int pcan_usb_probe(struct usb_interface *intf) return 0; } +static int pcan_usb_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct peak_usb_device *dev = netdev_priv(netdev); + int err = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + /* call ON/OFF twice a second */ + return 2; + + case ETHTOOL_ID_OFF: + err = pcan_usb_set_led(dev, 0); + break; + + case ETHTOOL_ID_ON: + fallthrough; + + case ETHTOOL_ID_INACTIVE: + /* restore LED default */ + err = pcan_usb_set_led(dev, 1); + break; + + default: + break; + } + + return err; +} + +static const struct ethtool_ops pcan_usb_ethtool_ops = { + .set_phys_id = pcan_usb_set_phys_id, +}; + /* * describe the PCAN-USB adapter */ @@ -994,16 +1019,17 @@ const struct peak_usb_adapter pcan_usb = { CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_CC_LEN8_DLC, .clock = { - .freq = PCAN_USB_CRYSTAL_HZ / 2 , + .freq = PCAN_USB_CRYSTAL_HZ / 2, }, .bittiming_const = &pcan_usb_const, /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb), + .ethtool_ops = &pcan_usb_ethtool_ops, + /* timestamps usage */ .ts_used_bits = 16, - .ts_period = 24575, /* calibration period in ts. */ .us_per_ts_scale = PCAN_USB_TS_US_PER_TICK, /* us=(ts*scale) */ .us_per_ts_shift = PCAN_USB_TS_DIV_SHIFTER, /* >> shift */ diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 28e916a04047..e8f43ed90b72 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -14,6 +14,7 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/usb.h> +#include <linux/ethtool.h> #include <linux/can.h> #include <linux/can/dev.h> @@ -26,28 +27,32 @@ MODULE_DESCRIPTION("CAN driver for PEAK-System USB adapters"); MODULE_LICENSE("GPL v2"); /* Table of devices that work with this driver */ -static struct usb_device_id peak_usb_table[] = { - {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USB_PRODUCT_ID)}, - {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)}, - {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID)}, - {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID)}, - {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBCHIP_PRODUCT_ID)}, - {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID)}, - {} /* Terminating entry */ +static const struct usb_device_id peak_usb_table[] = { + { + USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USB_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&pcan_usb, + }, { + USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&pcan_usb_pro, + }, { + USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&pcan_usb_fd, + }, { + USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&pcan_usb_pro_fd, + }, { + USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBCHIP_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&pcan_usb_chip, + }, { + USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&pcan_usb_x6, + }, { + /* Terminating entry */ + } }; MODULE_DEVICE_TABLE(usb, peak_usb_table); -/* List of supported PCAN-USB adapters (NULL terminated list) */ -static const struct peak_usb_adapter *const peak_usb_adapters_list[] = { - &pcan_usb, - &pcan_usb_pro, - &pcan_usb_fd, - &pcan_usb_pro_fd, - &pcan_usb_chip, - &pcan_usb_x6, -}; - /* * dump memory */ @@ -371,7 +376,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb, err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { - can_free_echo_skb(netdev, context->echo_index); + can_free_echo_skb(netdev, context->echo_index, NULL); usb_unanchor_urb(urb); @@ -623,6 +628,7 @@ static int peak_usb_ndo_stop(struct net_device *netdev) /* can set bus off now */ if (dev->adapter->dev_set_bus) { int err = dev->adapter->dev_set_bus(dev, 0); + if (err) return err; } @@ -820,6 +826,9 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter, netdev->flags |= IFF_ECHO; /* we support local echo */ + /* add ethtool support */ + netdev->ethtool_ops = peak_usb_adapter->ethtool_ops; + init_usb_anchor(&dev->rx_submitted); init_usb_anchor(&dev->tx_submitted); @@ -923,24 +932,11 @@ static void peak_usb_disconnect(struct usb_interface *intf) static int peak_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { - struct usb_device *usb_dev = interface_to_usbdev(intf); - const u16 usb_id_product = le16_to_cpu(usb_dev->descriptor.idProduct); - const struct peak_usb_adapter *peak_usb_adapter = NULL; + const struct peak_usb_adapter *peak_usb_adapter; int i, err = -ENOMEM; /* get corresponding PCAN-USB adapter */ - for (i = 0; i < ARRAY_SIZE(peak_usb_adapters_list); i++) - if (peak_usb_adapters_list[i]->device_id == usb_id_product) { - peak_usb_adapter = peak_usb_adapters_list[i]; - break; - } - - if (!peak_usb_adapter) { - /* should never come except device_id bad usage in this file */ - pr_err("%s: didn't find device id. 0x%x in devices list\n", - PCAN_USB_DRIVER_NAME, usb_id_product); - return -ENODEV; - } + peak_usb_adapter = (const struct peak_usb_adapter *)id->driver_info; /* got corresponding adapter: check if it handles current interface */ if (peak_usb_adapter->intf_probe) { diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h index 4b1528a42a7b..b00a4811bf61 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h @@ -31,7 +31,7 @@ /* usb adapters maximum channels per usb interface */ #define PCAN_USB_MAX_CHANNEL 2 -/* maximum length of the usb commands sent to/received from the devices */ +/* maximum length of the usb commands sent to/received from the devices */ #define PCAN_USB_MAX_CMD_LEN 32 struct peak_usb_device; @@ -46,6 +46,8 @@ struct peak_usb_adapter { const struct can_bittiming_const * const data_bittiming_const; unsigned int ctrl_count; + const struct ethtool_ops *ethtool_ops; + int (*intf_probe)(struct usb_interface *intf); int (*dev_init)(struct peak_usb_device *dev); @@ -71,7 +73,6 @@ struct peak_usb_adapter { u8 ep_msg_in; u8 ep_msg_out[PCAN_USB_MAX_CHANNEL]; u8 ts_used_bits; - u32 ts_period; u8 us_per_ts_shift; u32 us_per_ts_scale; @@ -112,8 +113,6 @@ struct peak_usb_device { unsigned int ctrl_idx; u32 state; - struct sk_buff *echo_skb[PCAN_USB_MAX_TX_URBS]; - struct usb_device *udev; struct net_device *netdev; @@ -130,8 +129,6 @@ struct peak_usb_device { u8 ep_msg_in; u8 ep_msg_out; - u16 bus_load; - struct peak_usb_device *prev_siblings; struct peak_usb_device *next_siblings; }; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index bae078579c0d..b11eabad575b 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c @@ -7,6 +7,7 @@ #include <linux/netdevice.h> #include <linux/usb.h> #include <linux/module.h> +#include <linux/ethtool.h> #include <linux/can.h> #include <linux/can/dev.h> @@ -773,6 +774,10 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev, tx_msg_flags |= PUCAN_MSG_RTR; } + /* Single-Shot frame */ + if (dev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + tx_msg_flags |= PUCAN_MSG_SINGLE_SHOT; + tx_msg->flags = cpu_to_le16(tx_msg_flags); tx_msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(dev->ctrl_idx, dlc); memcpy(tx_msg->d, cfd->data, cfd->len); @@ -1006,6 +1011,31 @@ static void pcan_usb_fd_free(struct peak_usb_device *dev) } } +/* blink LED's */ +static int pcan_usb_fd_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct peak_usb_device *dev = netdev_priv(netdev); + int err = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + err = pcan_usb_fd_set_can_led(dev, PCAN_UFD_LED_FAST); + break; + case ETHTOOL_ID_INACTIVE: + err = pcan_usb_fd_set_can_led(dev, PCAN_UFD_LED_DEF); + break; + default: + break; + } + + return err; +} + +static const struct ethtool_ops pcan_usb_fd_ethtool_ops = { + .set_phys_id = pcan_usb_fd_set_phys_id, +}; + /* describes the PCAN-USB FD adapter */ static const struct can_bittiming_const pcan_usb_fd_const = { .name = "pcan_usb_fd", @@ -1037,7 +1067,7 @@ const struct peak_usb_adapter pcan_usb_fd = { .ctrl_count = PCAN_USBFD_CHANNEL_COUNT, .ctrlmode_supported = CAN_CTRLMODE_FD | CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | - CAN_CTRLMODE_CC_LEN8_DLC, + CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC, .clock = { .freq = PCAN_UFD_CRYSTAL_HZ, }, @@ -1047,9 +1077,10 @@ const struct peak_usb_adapter pcan_usb_fd = { /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), + .ethtool_ops = &pcan_usb_fd_ethtool_ops, + /* timestamps usage */ .ts_used_bits = 32, - .ts_period = 1000000, /* calibration period in ts. */ .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, @@ -1110,7 +1141,7 @@ const struct peak_usb_adapter pcan_usb_chip = { .ctrl_count = PCAN_USBFD_CHANNEL_COUNT, .ctrlmode_supported = CAN_CTRLMODE_FD | CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | - CAN_CTRLMODE_CC_LEN8_DLC, + CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC, .clock = { .freq = PCAN_UFD_CRYSTAL_HZ, }, @@ -1120,9 +1151,10 @@ const struct peak_usb_adapter pcan_usb_chip = { /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), + .ethtool_ops = &pcan_usb_fd_ethtool_ops, + /* timestamps usage */ .ts_used_bits = 32, - .ts_period = 1000000, /* calibration period in ts. */ .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, @@ -1183,7 +1215,7 @@ const struct peak_usb_adapter pcan_usb_pro_fd = { .ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT, .ctrlmode_supported = CAN_CTRLMODE_FD | CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | - CAN_CTRLMODE_CC_LEN8_DLC, + CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC, .clock = { .freq = PCAN_UFD_CRYSTAL_HZ, }, @@ -1193,9 +1225,10 @@ const struct peak_usb_adapter pcan_usb_pro_fd = { /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), + .ethtool_ops = &pcan_usb_fd_ethtool_ops, + /* timestamps usage */ .ts_used_bits = 32, - .ts_period = 1000000, /* calibration period in ts. */ .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, @@ -1256,7 +1289,7 @@ const struct peak_usb_adapter pcan_usb_x6 = { .ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT, .ctrlmode_supported = CAN_CTRLMODE_FD | CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | - CAN_CTRLMODE_CC_LEN8_DLC, + CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC, .clock = { .freq = PCAN_UFD_CRYSTAL_HZ, }, @@ -1266,9 +1299,10 @@ const struct peak_usb_adapter pcan_usb_x6 = { /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), + .ethtool_ops = &pcan_usb_fd_ethtool_ops, + /* timestamps usage */ .ts_used_bits = 32, - .ts_period = 1000000, /* calibration period in ts. */ .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index 18fa180ecc81..858ab22708fc 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -9,6 +9,7 @@ #include <linux/netdevice.h> #include <linux/usb.h> #include <linux/module.h> +#include <linux/ethtool.h> #include <linux/can.h> #include <linux/can/dev.h> @@ -36,6 +37,7 @@ #define PCAN_USBPRO_RTR 0x01 #define PCAN_USBPRO_EXT 0x02 +#define PCAN_USBPRO_SS 0x08 #define PCAN_USBPRO_CMD_BUFFER_SIZE 512 @@ -288,7 +290,7 @@ static int pcan_usb_pro_wait_rsp(struct peak_usb_device *dev, pr->data_type); /* check if channel in response corresponds too */ - else if ((req_channel != 0xff) && \ + else if ((req_channel != 0xff) && (pr->bus_act.channel != req_channel)) netdev_err(dev->netdev, "got rsp %xh but on chan%u: ignored\n", @@ -437,8 +439,7 @@ static int pcan_usb_pro_get_device_id(struct peak_usb_device *dev, return err; pdn = (struct pcan_usb_pro_devid *)pc; - if (device_id) - *device_id = le32_to_cpu(pdn->serial_num); + *device_id = le32_to_cpu(pdn->serial_num); return err; } @@ -776,9 +777,13 @@ static int pcan_usb_pro_encode_msg(struct peak_usb_device *dev, flags = 0; if (cf->can_id & CAN_EFF_FLAG) - flags |= 0x02; + flags |= PCAN_USBPRO_EXT; if (cf->can_id & CAN_RTR_FLAG) - flags |= 0x01; + flags |= PCAN_USBPRO_RTR; + + /* Single-Shot frame */ + if (dev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + flags |= PCAN_USBPRO_SS; pcan_msg_add_rec(&usb_msg, data_type, 0, flags, len, cf->can_id, cf->data); @@ -906,7 +911,7 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev) usb_if->dev[dev->ctrl_idx] = dev; /* set LED in default state (end of init phase) */ - pcan_usb_pro_set_led(dev, 0, 1); + pcan_usb_pro_set_led(dev, PCAN_USBPRO_LED_DEVICE, 1); kfree(bi); kfree(fi); @@ -990,6 +995,35 @@ int pcan_usb_pro_probe(struct usb_interface *intf) return 0; } +static int pcan_usb_pro_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct peak_usb_device *dev = netdev_priv(netdev); + int err = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + /* fast blinking forever */ + err = pcan_usb_pro_set_led(dev, PCAN_USBPRO_LED_BLINK_FAST, + 0xffffffff); + break; + + case ETHTOOL_ID_INACTIVE: + /* restore LED default */ + err = pcan_usb_pro_set_led(dev, PCAN_USBPRO_LED_DEVICE, 1); + break; + + default: + break; + } + + return err; +} + +static const struct ethtool_ops pcan_usb_pro_ethtool_ops = { + .set_phys_id = pcan_usb_pro_set_phys_id, +}; + /* * describe the PCAN-USB Pro adapter */ @@ -1009,7 +1043,8 @@ const struct peak_usb_adapter pcan_usb_pro = { .name = "PCAN-USB Pro", .device_id = PCAN_USBPRO_PRODUCT_ID, .ctrl_count = PCAN_USBPRO_CHANNEL_COUNT, - .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY, + .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_ONE_SHOT, .clock = { .freq = PCAN_USBPRO_CRYSTAL_HZ, }, @@ -1018,9 +1053,10 @@ const struct peak_usb_adapter pcan_usb_pro = { /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb_pro_device), + .ethtool_ops = &pcan_usb_pro_ethtool_ops, + /* timestamps usage */ .ts_used_bits = 32, - .ts_period = 1000000, /* calibration period in ts. */ .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h index 6bb12357d078..5d4cf14eb9d9 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h @@ -34,11 +34,11 @@ /* PCAN_USBPRO_INFO_BL vendor request record type */ struct __packed pcan_usb_pro_blinfo { __le32 ctrl_type; - u8 version[4]; - u8 day; - u8 month; - u8 year; - u8 dummy; + u8 version[4]; + u8 day; + u8 month; + u8 year; + u8 dummy; __le32 serial_num_hi; __le32 serial_num_lo; __le32 hw_type; @@ -48,11 +48,11 @@ struct __packed pcan_usb_pro_blinfo { /* PCAN_USBPRO_INFO_FW vendor request record type */ struct __packed pcan_usb_pro_fwinfo { __le32 ctrl_type; - u8 version[4]; - u8 day; - u8 month; - u8 year; - u8 dummy; + u8 version[4]; + u8 day; + u8 month; + u8 year; + u8 dummy; __le32 fw_type; }; @@ -78,59 +78,65 @@ struct __packed pcan_usb_pro_fwinfo { /* record structures */ struct __packed pcan_usb_pro_btr { - u8 data_type; - u8 channel; + u8 data_type; + u8 channel; __le16 dummy; __le32 CCBT; }; struct __packed pcan_usb_pro_busact { - u8 data_type; - u8 channel; + u8 data_type; + u8 channel; __le16 onoff; }; struct __packed pcan_usb_pro_silent { - u8 data_type; - u8 channel; + u8 data_type; + u8 channel; __le16 onoff; }; struct __packed pcan_usb_pro_filter { - u8 data_type; - u8 dummy; + u8 data_type; + u8 dummy; __le16 filter_mode; }; struct __packed pcan_usb_pro_setts { - u8 data_type; - u8 dummy; + u8 data_type; + u8 dummy; __le16 mode; }; struct __packed pcan_usb_pro_devid { - u8 data_type; - u8 channel; + u8 data_type; + u8 channel; __le16 dummy; __le32 serial_num; }; +#define PCAN_USBPRO_LED_DEVICE 0x00 +#define PCAN_USBPRO_LED_BLINK_FAST 0x01 +#define PCAN_USBPRO_LED_BLINK_SLOW 0x02 +#define PCAN_USBPRO_LED_ON 0x03 +#define PCAN_USBPRO_LED_OFF 0x04 + struct __packed pcan_usb_pro_setled { - u8 data_type; - u8 channel; + u8 data_type; + u8 channel; __le16 mode; __le32 timeout; }; struct __packed pcan_usb_pro_rxmsg { - u8 data_type; - u8 client; - u8 flags; - u8 len; + u8 data_type; + u8 client; + u8 flags; + u8 len; __le32 ts32; __le32 id; - u8 data[8]; + u8 data[8]; }; #define PCAN_USBPRO_STATUS_ERROR 0x0001 @@ -139,26 +145,26 @@ struct __packed pcan_usb_pro_rxmsg { #define PCAN_USBPRO_STATUS_QOVERRUN 0x0008 struct __packed pcan_usb_pro_rxstatus { - u8 data_type; - u8 channel; + u8 data_type; + u8 channel; __le16 status; __le32 ts32; __le32 err_frm; }; struct __packed pcan_usb_pro_rxts { - u8 data_type; - u8 dummy[3]; + u8 data_type; + u8 dummy[3]; __le32 ts64[2]; }; struct __packed pcan_usb_pro_txmsg { - u8 data_type; - u8 client; - u8 flags; - u8 len; + u8 data_type; + u8 client; + u8 flags; + u8 len; __le32 id; - u8 data[8]; + u8 data[8]; }; union pcan_usb_pro_rec { diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c index fa403c080871..1679cbe45ded 100644 --- a/drivers/net/can/usb/ucan.c +++ b/drivers/net/can/usb/ucan.c @@ -246,7 +246,7 @@ struct ucan_message_in { */ struct ucan_tx_complete_entry_t can_tx_complete_msg[0]; } __aligned(0x4) msg; -} __packed; +} __packed __aligned(0x4); /* Macros to calculate message lengths */ #define UCAN_OUT_HDR_SIZE offsetof(struct ucan_message_out, msg) @@ -675,7 +675,7 @@ static void ucan_tx_complete_msg(struct ucan_priv *up, can_get_echo_skb(up->netdev, echo_index, NULL); } else { up->netdev->stats.tx_dropped++; - can_free_echo_skb(up->netdev, echo_index); + can_free_echo_skb(up->netdev, echo_index, NULL); } spin_unlock_irqrestore(&up->echo_skb_lock, flags); } @@ -843,7 +843,7 @@ static void ucan_write_bulk_callback(struct urb *urb) /* update counters an cleanup */ spin_lock_irqsave(&up->echo_skb_lock, flags); - can_free_echo_skb(up->netdev, context - up->context_array); + can_free_echo_skb(up->netdev, context - up->context_array, NULL); spin_unlock_irqrestore(&up->echo_skb_lock, flags); up->netdev->stats.tx_dropped++; @@ -1157,7 +1157,7 @@ static netdev_tx_t ucan_start_xmit(struct sk_buff *skb, * frees the skb */ spin_lock_irqsave(&up->echo_skb_lock, flags); - can_free_echo_skb(up->netdev, echo_index); + can_free_echo_skb(up->netdev, echo_index, NULL); spin_unlock_irqrestore(&up->echo_skb_lock, flags); if (ret == -ENODEV) { diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index e8c42430a4fc..b6e7ef0d5bc6 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -691,7 +691,7 @@ nofreecontext: return NETDEV_TX_BUSY; failed: - can_free_echo_skb(netdev, context->echo_index); + can_free_echo_skb(netdev, context->echo_index, NULL); usb_unanchor_urb(urb); usb_free_coherent(priv->udev, size, buf, urb->transfer_dma); diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 37fa19c62d73..3b883e607d8b 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -1772,17 +1772,15 @@ static int xcan_probe(struct platform_device *pdev) /* Getting the CAN can_clk info */ priv->can_clk = devm_clk_get(&pdev->dev, "can_clk"); if (IS_ERR(priv->can_clk)) { - if (PTR_ERR(priv->can_clk) != -EPROBE_DEFER) - dev_err(&pdev->dev, "Device clock not found.\n"); - ret = PTR_ERR(priv->can_clk); + ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->can_clk), + "device clock not found\n"); goto err_free; } priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name); if (IS_ERR(priv->bus_clk)) { - if (PTR_ERR(priv->bus_clk) != -EPROBE_DEFER) - dev_err(&pdev->dev, "bus clock not found\n"); - ret = PTR_ERR(priv->bus_clk); + ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->bus_clk), + "bus clock not found\n"); goto err_free; } diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 3af373e90806..a5f1aa911fe2 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -1,12 +1,12 @@ # SPDX-License-Identifier: GPL-2.0-only menu "Distributed Switch Architecture drivers" - depends on HAVE_NET_DSA + depends on NET_DSA source "drivers/net/dsa/b53/Kconfig" config NET_DSA_BCM_SF2 tristate "Broadcom Starfighter 2 Ethernet switch support" - depends on HAS_IOMEM && NET_DSA + depends on HAS_IOMEM select NET_DSA_TAG_BRCM select FIXED_PHY select BCM7XXX_PHY @@ -18,7 +18,6 @@ config NET_DSA_BCM_SF2 config NET_DSA_LOOP tristate "DSA mock-up Ethernet switch chip support" - depends on NET_DSA select FIXED_PHY help This enables support for a fake mock-up switch chip which @@ -28,7 +27,7 @@ source "drivers/net/dsa/hirschmann/Kconfig" config NET_DSA_LANTIQ_GSWIP tristate "Lantiq / Intel GSWIP" - depends on HAS_IOMEM && NET_DSA + depends on HAS_IOMEM select NET_DSA_TAG_GSWIP help This enables support for the Lantiq / Intel GSWIP 2.1 found in @@ -36,7 +35,6 @@ config NET_DSA_LANTIQ_GSWIP config NET_DSA_MT7530 tristate "MediaTek MT753x and MT7621 Ethernet switch support" - depends on NET_DSA select NET_DSA_TAG_MTK help This enables support for the MediaTek MT7530, MT7531, and MT7621 @@ -44,7 +42,6 @@ config NET_DSA_MT7530 config NET_DSA_MV88E6060 tristate "Marvell 88E6060 ethernet switch chip support" - depends on NET_DSA select NET_DSA_TAG_TRAILER help This enables support for the Marvell 88E6060 ethernet switch @@ -64,7 +61,6 @@ source "drivers/net/dsa/xrs700x/Kconfig" config NET_DSA_QCA8K tristate "Qualcomm Atheros QCA8K Ethernet switch family support" - depends on NET_DSA select NET_DSA_TAG_QCA select REGMAP help @@ -73,7 +69,6 @@ config NET_DSA_QCA8K config NET_DSA_REALTEK_SMI tristate "Realtek SMI Ethernet switch family support" - depends on NET_DSA select NET_DSA_TAG_RTL4_A select FIXED_PHY select IRQ_DOMAIN @@ -93,7 +88,7 @@ config NET_DSA_SMSC_LAN9303 config NET_DSA_SMSC_LAN9303_I2C tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in I2C managed mode" - depends on NET_DSA && I2C + depends on I2C select NET_DSA_SMSC_LAN9303 select REGMAP_I2C help @@ -102,7 +97,6 @@ config NET_DSA_SMSC_LAN9303_I2C config NET_DSA_SMSC_LAN9303_MDIO tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in MDIO managed mode" - depends on NET_DSA select NET_DSA_SMSC_LAN9303 help Enable access functions if the SMSC/Microchip LAN9303 is configured @@ -110,7 +104,6 @@ config NET_DSA_SMSC_LAN9303_MDIO config NET_DSA_VITESSE_VSC73XX tristate - depends on NET_DSA select FIXED_PHY select VITESSE_PHY select GPIOLIB @@ -120,7 +113,6 @@ config NET_DSA_VITESSE_VSC73XX config NET_DSA_VITESSE_VSC73XX_SPI tristate "Vitesse VSC7385/7388/7395/7398 SPI mode support" - depends on NET_DSA depends on SPI select NET_DSA_VITESSE_VSC73XX help @@ -129,7 +121,6 @@ config NET_DSA_VITESSE_VSC73XX_SPI config NET_DSA_VITESSE_VSC73XX_PLATFORM tristate "Vitesse VSC7385/7388/7395/7398 Platform mode support" - depends on NET_DSA depends on HAS_IOMEM select NET_DSA_VITESSE_VSC73XX help diff --git a/drivers/net/dsa/b53/Kconfig b/drivers/net/dsa/b53/Kconfig index f9891a81c808..90b525160b71 100644 --- a/drivers/net/dsa/b53/Kconfig +++ b/drivers/net/dsa/b53/Kconfig @@ -3,6 +3,7 @@ menuconfig B53 tristate "Broadcom BCM53xx managed switch support" depends on NET_DSA select NET_DSA_TAG_BRCM + select NET_DSA_TAG_BRCM_LEGACY select NET_DSA_TAG_BRCM_PREPEND help This driver adds support for Broadcom managed switch chips. It supports diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index eb443721c58e..3ca6b394dd5f 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -349,7 +349,7 @@ static void b53_set_forwarding(struct b53_device *dev, int enable) b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); } -static void b53_enable_vlan(struct b53_device *dev, bool enable, +static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, bool enable_filtering) { u8 mgmt, vc0, vc1, vc4 = 0, vc5; @@ -431,6 +431,9 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable, b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); dev->vlan_enabled = enable; + + dev_dbg(dev->dev, "Port %d VLAN enabled: %d, filtering: %d\n", + port, enable, enable_filtering); } static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) @@ -743,7 +746,7 @@ int b53_configure_vlan(struct dsa_switch *ds) b53_do_vlan_op(dev, VTA_CMD_CLEAR); } - b53_enable_vlan(dev, dev->vlan_enabled, ds->vlan_filtering); + b53_enable_vlan(dev, -1, dev->vlan_enabled, ds->vlan_filtering); b53_for_each_port(dev, i) b53_write16(dev, B53_VLAN_PAGE, @@ -1422,7 +1425,7 @@ int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, { struct b53_device *dev = ds->priv; - b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering); + b53_enable_vlan(dev, port, dev->vlan_enabled, vlan_filtering); return 0; } @@ -1447,7 +1450,7 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port, if (vlan->vid >= dev->num_vlans) return -ERANGE; - b53_enable_vlan(dev, true, ds->vlan_filtering); + b53_enable_vlan(dev, port, true, ds->vlan_filtering); return 0; } @@ -2045,15 +2048,17 @@ enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port, { struct b53_device *dev = ds->priv; - /* Older models (5325, 5365) support a different tag format that we do - * not support in net/dsa/tag_brcm.c yet. - */ - if (is5325(dev) || is5365(dev) || - !b53_can_enable_brcm_tags(ds, port, mprot)) { + if (!b53_can_enable_brcm_tags(ds, port, mprot)) { dev->tag_protocol = DSA_TAG_PROTO_NONE; goto out; } + /* Older models require a different 6 byte tag */ + if (is5325(dev) || is5365(dev) || is63xx(dev)) { + dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY; + goto out; + } + /* Broadcom BCM58xx chips have a flow accelerator on Port 8 * which requires us to use the prepended Broadcom tag type */ diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c index c628d0980c0b..82680e083cc2 100644 --- a/drivers/net/dsa/b53/b53_mmap.c +++ b/drivers/net/dsa/b53/b53_mmap.c @@ -16,6 +16,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#include <linux/bits.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/io.h> @@ -228,11 +229,65 @@ static const struct b53_io_ops b53_mmap_ops = { .write64 = b53_mmap_write64, }; +static int b53_mmap_probe_of(struct platform_device *pdev, + struct b53_platform_data **ppdata) +{ + struct device_node *np = pdev->dev.of_node; + struct device_node *of_ports, *of_port; + struct device *dev = &pdev->dev; + struct b53_platform_data *pdata; + void __iomem *mem; + + mem = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mem)) + return PTR_ERR(mem); + + pdata = devm_kzalloc(dev, sizeof(struct b53_platform_data), + GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + pdata->regs = mem; + pdata->chip_id = BCM63XX_DEVICE_ID; + pdata->big_endian = of_property_read_bool(np, "big-endian"); + + of_ports = of_get_child_by_name(np, "ports"); + if (!of_ports) { + dev_err(dev, "no ports child node found\n"); + return -EINVAL; + } + + for_each_available_child_of_node(of_ports, of_port) { + u32 reg; + + if (of_property_read_u32(of_port, "reg", ®)) + continue; + + if (reg < B53_CPU_PORT) + pdata->enabled_ports |= BIT(reg); + } + + of_node_put(of_ports); + *ppdata = pdata; + + return 0; +} + static int b53_mmap_probe(struct platform_device *pdev) { + struct device_node *np = pdev->dev.of_node; struct b53_platform_data *pdata = pdev->dev.platform_data; struct b53_mmap_priv *priv; struct b53_device *dev; + int ret; + + if (!pdata && np) { + ret = b53_mmap_probe_of(pdev, &pdata); + if (ret) { + dev_err(&pdev->dev, "OF probe error\n"); + return ret; + } + } if (!pdata) return -EINVAL; diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 8419bb7f4505..82700a5714c1 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -186,11 +186,7 @@ static inline int is531x5(struct b53_device *dev) static inline int is63xx(struct b53_device *dev) { -#ifdef CONFIG_BCM63XX return dev->chip_id == BCM63XX_DEVICE_ID; -#else - return 0; -#endif } static inline int is5301x(struct b53_device *dev) diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c index 7abec8dab8ba..ecb9f7f6b335 100644 --- a/drivers/net/dsa/b53/b53_spi.c +++ b/drivers/net/dsa/b53/b53_spi.c @@ -324,9 +324,23 @@ static int b53_spi_remove(struct spi_device *spi) return 0; } +static const struct of_device_id b53_spi_of_match[] = { + { .compatible = "brcm,bcm5325" }, + { .compatible = "brcm,bcm5365" }, + { .compatible = "brcm,bcm5395" }, + { .compatible = "brcm,bcm5397" }, + { .compatible = "brcm,bcm5398" }, + { .compatible = "brcm,bcm53115" }, + { .compatible = "brcm,bcm53125" }, + { .compatible = "brcm,bcm53128" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, b53_spi_of_match); + static struct spi_driver b53_spi_driver = { .driver = { .name = "b53-switch", + .of_match_table = b53_spi_of_match, }, .probe = b53_spi_probe, .remove = b53_spi_remove, diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index ba5d546d06aa..9150038b60cb 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -32,6 +32,36 @@ #include "b53/b53_priv.h" #include "b53/b53_regs.h" +static u16 bcm_sf2_reg_rgmii_cntrl(struct bcm_sf2_priv *priv, int port) +{ + switch (priv->type) { + case BCM4908_DEVICE_ID: + switch (port) { + case 7: + return REG_RGMII_11_CNTRL; + default: + break; + } + break; + default: + switch (port) { + case 0: + return REG_RGMII_0_CNTRL; + case 1: + return REG_RGMII_1_CNTRL; + case 2: + return REG_RGMII_2_CNTRL; + default: + break; + } + } + + WARN_ONCE(1, "Unsupported port %d\n", port); + + /* RO fallback reg */ + return REG_SWITCH_STATUS; +} + /* Return the number of active ports, not counting the IMP (CPU) port */ static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds) { @@ -435,6 +465,44 @@ static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv) return 0; } +static void bcm_sf2_crossbar_setup(struct bcm_sf2_priv *priv) +{ + struct device *dev = priv->dev->ds->dev; + int shift; + u32 mask; + u32 reg; + int i; + + mask = BIT(priv->num_crossbar_int_ports) - 1; + + reg = reg_readl(priv, REG_CROSSBAR); + switch (priv->type) { + case BCM4908_DEVICE_ID: + shift = CROSSBAR_BCM4908_INT_P7 * priv->num_crossbar_int_ports; + reg &= ~(mask << shift); + if (0) /* FIXME */ + reg |= CROSSBAR_BCM4908_EXT_SERDES << shift; + else if (priv->int_phy_mask & BIT(7)) + reg |= CROSSBAR_BCM4908_EXT_GPHY4 << shift; + else if (phy_interface_mode_is_rgmii(priv->port_sts[7].mode)) + reg |= CROSSBAR_BCM4908_EXT_RGMII << shift; + else if (WARN(1, "Invalid port mode\n")) + return; + break; + default: + return; + } + reg_writel(priv, reg, REG_CROSSBAR); + + reg = reg_readl(priv, REG_CROSSBAR); + for (i = 0; i < priv->num_crossbar_int_ports; i++) { + shift = i * priv->num_crossbar_int_ports; + + dev_dbg(dev, "crossbar int port #%d - ext port #%d\n", i, + (reg >> shift) & mask); + } +} + static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv) { intrl2_0_mask_set(priv, 0xffffffff); @@ -446,10 +514,11 @@ static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv) static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv, struct device_node *dn) { + struct device *dev = priv->dev->ds->dev; + struct bcm_sf2_port_status *port_st; struct device_node *port; unsigned int port_num; struct property *prop; - phy_interface_t mode; int err; priv->moca_port = -1; @@ -458,19 +527,26 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv, if (of_property_read_u32(port, "reg", &port_num)) continue; + if (port_num >= DSA_MAX_PORTS) { + dev_err(dev, "Invalid port number %d\n", port_num); + continue; + } + + port_st = &priv->port_sts[port_num]; + /* Internal PHYs get assigned a specific 'phy-mode' property * value: "internal" to help flag them before MDIO probing * has completed, since they might be turned off at that * time */ - err = of_get_phy_mode(port, &mode); + err = of_get_phy_mode(port, &port_st->mode); if (err) continue; - if (mode == PHY_INTERFACE_MODE_INTERNAL) + if (port_st->mode == PHY_INTERFACE_MODE_INTERNAL) priv->int_phy_mask |= 1 << port_num; - if (mode == PHY_INTERFACE_MODE_MOCA) + if (port_st->mode == PHY_INTERFACE_MODE_MOCA) priv->moca_port = port_num; if (of_property_read_bool(port, "brcm,use-bcm-hdr")) @@ -647,6 +723,7 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); u32 id_mode_dis = 0, port_mode; + u32 reg_rgmii_ctrl; u32 reg; if (port == core_readl(priv, CORE_IMP0_PRT_ID)) @@ -670,10 +747,12 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, return; } + reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port); + /* Clear id_mode_dis bit, and the existing port mode, let * RGMII_MODE_EN bet set by mac_link_{up,down} */ - reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); + reg = reg_readl(priv, reg_rgmii_ctrl); reg &= ~ID_MODE_DIS; reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT); @@ -681,13 +760,14 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, if (id_mode_dis) reg |= ID_MODE_DIS; - reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); + reg_writel(priv, reg, reg_rgmii_ctrl); } static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port, phy_interface_t interface, bool link) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + u32 reg_rgmii_ctrl; u32 reg; if (!phy_interface_mode_is_rgmii(interface) && @@ -695,13 +775,15 @@ static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port, interface != PHY_INTERFACE_MODE_REVMII) return; + reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port); + /* If the link is down, just disable the interface to conserve power */ - reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); + reg = reg_readl(priv, reg_rgmii_ctrl); if (link) reg |= RGMII_MODE_EN; else reg &= ~RGMII_MODE_EN; - reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); + reg_writel(priv, reg, reg_rgmii_ctrl); } static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port, @@ -735,11 +817,15 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port, { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_eee *p = &priv->dev->ports[port].eee; - u32 reg, offset; bcm_sf2_sw_mac_link_set(ds, port, interface, true); if (port != core_readl(priv, CORE_IMP0_PRT_ID)) { + u32 reg_rgmii_ctrl; + u32 reg, offset; + + reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port); + if (priv->type == BCM4908_DEVICE_ID || priv->type == BCM7445_DEVICE_ID) offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); @@ -750,7 +836,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port, interface == PHY_INTERFACE_MODE_RGMII_TXID || interface == PHY_INTERFACE_MODE_MII || interface == PHY_INTERFACE_MODE_REVMII) { - reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); + reg = reg_readl(priv, reg_rgmii_ctrl); reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN); if (tx_pause) @@ -758,7 +844,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port, if (rx_pause) reg |= RX_PAUSE_EN; - reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); + reg_writel(priv, reg, reg_rgmii_ctrl); } reg = SW_OVERRIDE | LINK_STS; @@ -861,6 +947,8 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds) return ret; } + bcm_sf2_crossbar_setup(priv); + ret = bcm_sf2_cfp_resume(ds); if (ret) return ret; @@ -1133,6 +1221,7 @@ struct bcm_sf2_of_data { const u16 *reg_offsets; unsigned int core_reg_align; unsigned int num_cfp_rules; + unsigned int num_crossbar_int_ports; }; static const u16 bcm_sf2_4908_reg_offsets[] = { @@ -1144,9 +1233,7 @@ static const u16 bcm_sf2_4908_reg_offsets[] = { [REG_PHY_REVISION] = 0x14, [REG_SPHY_CNTRL] = 0x24, [REG_CROSSBAR] = 0xc8, - [REG_RGMII_0_CNTRL] = 0xe0, - [REG_RGMII_1_CNTRL] = 0xec, - [REG_RGMII_2_CNTRL] = 0xf8, + [REG_RGMII_11_CNTRL] = 0x014c, [REG_LED_0_CNTRL] = 0x40, [REG_LED_1_CNTRL] = 0x4c, [REG_LED_2_CNTRL] = 0x58, @@ -1156,7 +1243,8 @@ static const struct bcm_sf2_of_data bcm_sf2_4908_data = { .type = BCM4908_DEVICE_ID, .core_reg_align = 0, .reg_offsets = bcm_sf2_4908_reg_offsets, - .num_cfp_rules = 0, /* FIXME */ + .num_cfp_rules = 256, + .num_crossbar_int_ports = 2, }; /* Register offsets for the SWITCH_REG_* block */ @@ -1267,6 +1355,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) priv->reg_offsets = data->reg_offsets; priv->core_reg_align = data->core_reg_align; priv->num_cfp_rules = data->num_cfp_rules; + priv->num_crossbar_int_ports = data->num_crossbar_int_ports; priv->rcdev = devm_reset_control_get_optional_exclusive(&pdev->dev, "switch"); @@ -1340,6 +1429,8 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) goto out_clk_mdiv; } + bcm_sf2_crossbar_setup(priv); + bcm_sf2_gphy_enable_set(priv->dev->ds, true); ret = bcm_sf2_mdio_register(ds); diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index 1ed901a68536..0d48402068d3 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -44,6 +44,7 @@ struct bcm_sf2_hw_params { #define BCM_SF2_REGS_NUM 6 struct bcm_sf2_port_status { + phy_interface_t mode; unsigned int link; bool enabled; }; @@ -73,6 +74,7 @@ struct bcm_sf2_priv { const u16 *reg_offsets; unsigned int core_reg_align; unsigned int num_cfp_rules; + unsigned int num_crossbar_int_ports; /* spinlock protecting access to the indirect registers */ spinlock_t indir_lock; diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index 1d2d55c9f8aa..7bffc80f241f 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -21,6 +21,7 @@ enum bcm_sf2_reg_offs { REG_RGMII_0_CNTRL, REG_RGMII_1_CNTRL, REG_RGMII_2_CNTRL, + REG_RGMII_11_CNTRL, REG_LED_0_CNTRL, REG_LED_1_CNTRL, REG_LED_2_CNTRL, @@ -48,7 +49,12 @@ enum bcm_sf2_reg_offs { #define PHY_PHYAD_SHIFT 8 #define PHY_PHYAD_MASK 0x1F -#define REG_RGMII_CNTRL_P(x) (REG_RGMII_0_CNTRL + (x)) +/* Relative to REG_CROSSBAR */ +#define CROSSBAR_BCM4908_INT_P7 0 +#define CROSSBAR_BCM4908_INT_RUNNER 1 +#define CROSSBAR_BCM4908_EXT_SERDES 0 +#define CROSSBAR_BCM4908_EXT_GPHY4 1 +#define CROSSBAR_BCM4908_EXT_RGMII 2 /* Relative to REG_RGMII_CNTRL */ #define RGMII_MODE_EN (1 << 0) diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c index 463137c39db2..4d78219da253 100644 --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c @@ -433,7 +433,7 @@ static void hellcreek_unapply_vlan(struct hellcreek *hellcreek, int port, mutex_lock(&hellcreek->reg_lock); - hellcreek_select_vlan(hellcreek, vid, 0); + hellcreek_select_vlan(hellcreek, vid, false); /* Setup port vlan membership */ hellcreek_select_vlan_params(hellcreek, port, &shift, &mask); @@ -596,6 +596,83 @@ static void hellcreek_setup_vlan_membership(struct dsa_switch *ds, int port, hellcreek_unapply_vlan(hellcreek, upstream, vid); } +static void hellcreek_port_set_ucast_flood(struct hellcreek *hellcreek, + int port, bool enable) +{ + struct hellcreek_port *hellcreek_port; + u16 val; + + hellcreek_port = &hellcreek->ports[port]; + + dev_dbg(hellcreek->dev, "%s unicast flooding on port %d\n", + enable ? "Enable" : "Disable", port); + + mutex_lock(&hellcreek->reg_lock); + + hellcreek_select_port(hellcreek, port); + val = hellcreek_port->ptcfg; + if (enable) + val &= ~HR_PTCFG_UUC_FLT; + else + val |= HR_PTCFG_UUC_FLT; + hellcreek_write(hellcreek, val, HR_PTCFG); + hellcreek_port->ptcfg = val; + + mutex_unlock(&hellcreek->reg_lock); +} + +static void hellcreek_port_set_mcast_flood(struct hellcreek *hellcreek, + int port, bool enable) +{ + struct hellcreek_port *hellcreek_port; + u16 val; + + hellcreek_port = &hellcreek->ports[port]; + + dev_dbg(hellcreek->dev, "%s multicast flooding on port %d\n", + enable ? "Enable" : "Disable", port); + + mutex_lock(&hellcreek->reg_lock); + + hellcreek_select_port(hellcreek, port); + val = hellcreek_port->ptcfg; + if (enable) + val &= ~HR_PTCFG_UMC_FLT; + else + val |= HR_PTCFG_UMC_FLT; + hellcreek_write(hellcreek, val, HR_PTCFG); + hellcreek_port->ptcfg = val; + + mutex_unlock(&hellcreek->reg_lock); +} + +static int hellcreek_pre_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD)) + return -EINVAL; + + return 0; +} + +static int hellcreek_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + struct hellcreek *hellcreek = ds->priv; + + if (flags.mask & BR_FLOOD) + hellcreek_port_set_ucast_flood(hellcreek, port, + !!(flags.val & BR_FLOOD)); + + if (flags.mask & BR_MCAST_FLOOD) + hellcreek_port_set_mcast_flood(hellcreek, port, + !!(flags.val & BR_MCAST_FLOOD)); + + return 0; +} + static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br) { @@ -670,6 +747,40 @@ static int __hellcreek_fdb_del(struct hellcreek *hellcreek, return hellcreek_wait_fdb_ready(hellcreek); } +static void hellcreek_populate_fdb_entry(struct hellcreek *hellcreek, + struct hellcreek_fdb_entry *entry, + size_t idx) +{ + unsigned char addr[ETH_ALEN]; + u16 meta, mac; + + /* Read values */ + meta = hellcreek_read(hellcreek, HR_FDBMDRD); + mac = hellcreek_read(hellcreek, HR_FDBRDL); + addr[5] = mac & 0xff; + addr[4] = (mac & 0xff00) >> 8; + mac = hellcreek_read(hellcreek, HR_FDBRDM); + addr[3] = mac & 0xff; + addr[2] = (mac & 0xff00) >> 8; + mac = hellcreek_read(hellcreek, HR_FDBRDH); + addr[1] = mac & 0xff; + addr[0] = (mac & 0xff00) >> 8; + + /* Populate @entry */ + memcpy(entry->mac, addr, sizeof(addr)); + entry->idx = idx; + entry->portmask = (meta & HR_FDBMDRD_PORTMASK_MASK) >> + HR_FDBMDRD_PORTMASK_SHIFT; + entry->age = (meta & HR_FDBMDRD_AGE_MASK) >> + HR_FDBMDRD_AGE_SHIFT; + entry->is_obt = !!(meta & HR_FDBMDRD_OBT); + entry->pass_blocked = !!(meta & HR_FDBMDRD_PASS_BLOCKED); + entry->is_static = !!(meta & HR_FDBMDRD_STATIC); + entry->reprio_tc = (meta & HR_FDBMDRD_REPRIO_TC_MASK) >> + HR_FDBMDRD_REPRIO_TC_SHIFT; + entry->reprio_en = !!(meta & HR_FDBMDRD_REPRIO_EN); +} + /* Retrieve the index of a FDB entry by mac address. Currently we search through * the complete table in hardware. If that's too slow, we might have to cache * the complete FDB table in software. @@ -691,39 +802,19 @@ static int hellcreek_fdb_get(struct hellcreek *hellcreek, * enter new entries anywhere. */ for (i = 0; i < hellcreek->fdb_entries; ++i) { - unsigned char addr[ETH_ALEN]; - u16 meta, mac; - - meta = hellcreek_read(hellcreek, HR_FDBMDRD); - mac = hellcreek_read(hellcreek, HR_FDBRDL); - addr[5] = mac & 0xff; - addr[4] = (mac & 0xff00) >> 8; - mac = hellcreek_read(hellcreek, HR_FDBRDM); - addr[3] = mac & 0xff; - addr[2] = (mac & 0xff00) >> 8; - mac = hellcreek_read(hellcreek, HR_FDBRDH); - addr[1] = mac & 0xff; - addr[0] = (mac & 0xff00) >> 8; + struct hellcreek_fdb_entry tmp = { 0 }; + + /* Read entry */ + hellcreek_populate_fdb_entry(hellcreek, &tmp, i); /* Force next entry */ hellcreek_write(hellcreek, 0x00, HR_FDBRDH); - if (memcmp(addr, dest, ETH_ALEN)) + if (memcmp(tmp.mac, dest, ETH_ALEN)) continue; /* Match found */ - entry->idx = i; - entry->portmask = (meta & HR_FDBMDRD_PORTMASK_MASK) >> - HR_FDBMDRD_PORTMASK_SHIFT; - entry->age = (meta & HR_FDBMDRD_AGE_MASK) >> - HR_FDBMDRD_AGE_SHIFT; - entry->is_obt = !!(meta & HR_FDBMDRD_OBT); - entry->pass_blocked = !!(meta & HR_FDBMDRD_PASS_BLOCKED); - entry->is_static = !!(meta & HR_FDBMDRD_STATIC); - entry->reprio_tc = (meta & HR_FDBMDRD_REPRIO_TC_MASK) >> - HR_FDBMDRD_REPRIO_TC_SHIFT; - entry->reprio_en = !!(meta & HR_FDBMDRD_REPRIO_EN); - memcpy(entry->mac, addr, sizeof(addr)); + memcpy(entry, &tmp, sizeof(*entry)); return 0; } @@ -838,18 +929,9 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port, for (i = 0; i < hellcreek->fdb_entries; ++i) { unsigned char null_addr[ETH_ALEN] = { 0 }; struct hellcreek_fdb_entry entry = { 0 }; - u16 meta, mac; - - meta = hellcreek_read(hellcreek, HR_FDBMDRD); - mac = hellcreek_read(hellcreek, HR_FDBRDL); - entry.mac[5] = mac & 0xff; - entry.mac[4] = (mac & 0xff00) >> 8; - mac = hellcreek_read(hellcreek, HR_FDBRDM); - entry.mac[3] = mac & 0xff; - entry.mac[2] = (mac & 0xff00) >> 8; - mac = hellcreek_read(hellcreek, HR_FDBRDH); - entry.mac[1] = mac & 0xff; - entry.mac[0] = (mac & 0xff00) >> 8; + + /* Read entry */ + hellcreek_populate_fdb_entry(hellcreek, &entry, i); /* Force next entry */ hellcreek_write(hellcreek, 0x00, HR_FDBRDH); @@ -858,10 +940,6 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port, if (!memcmp(entry.mac, null_addr, ETH_ALEN)) continue; - entry.portmask = (meta & HR_FDBMDRD_PORTMASK_MASK) >> - HR_FDBMDRD_PORTMASK_SHIFT; - entry.is_static = !!(meta & HR_FDBMDRD_STATIC); - /* Check port mask */ if (!(entry.portmask & BIT(port))) continue; @@ -1004,6 +1082,22 @@ out: return ret; } +static int hellcreek_devlink_info_get(struct dsa_switch *ds, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct hellcreek *hellcreek = ds->priv; + int ret; + + ret = devlink_info_driver_name_put(req, "hellcreek"); + if (ret) + return ret; + + return devlink_info_version_fixed_put(req, + DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, + hellcreek->pdata->name); +} + static u64 hellcreek_devlink_vlan_table_get(void *priv) { struct hellcreek *hellcreek = priv; @@ -1082,6 +1176,129 @@ out: return err; } +static int hellcreek_devlink_region_vlan_snapshot(struct devlink *dl, + const struct devlink_region_ops *ops, + struct netlink_ext_ack *extack, + u8 **data) +{ + struct hellcreek_devlink_vlan_entry *table, *entry; + struct dsa_switch *ds = dsa_devlink_to_ds(dl); + struct hellcreek *hellcreek = ds->priv; + int i; + + table = kcalloc(VLAN_N_VID, sizeof(*entry), GFP_KERNEL); + if (!table) + return -ENOMEM; + + entry = table; + + mutex_lock(&hellcreek->reg_lock); + for (i = 0; i < VLAN_N_VID; ++i, ++entry) { + entry->member = hellcreek->vidmbrcfg[i]; + entry->vid = i; + } + mutex_unlock(&hellcreek->reg_lock); + + *data = (u8 *)table; + + return 0; +} + +static int hellcreek_devlink_region_fdb_snapshot(struct devlink *dl, + const struct devlink_region_ops *ops, + struct netlink_ext_ack *extack, + u8 **data) +{ + struct dsa_switch *ds = dsa_devlink_to_ds(dl); + struct hellcreek_fdb_entry *table, *entry; + struct hellcreek *hellcreek = ds->priv; + size_t i; + + table = kcalloc(hellcreek->fdb_entries, sizeof(*entry), GFP_KERNEL); + if (!table) + return -ENOMEM; + + entry = table; + + mutex_lock(&hellcreek->reg_lock); + + /* Start table read */ + hellcreek_read(hellcreek, HR_FDBMAX); + hellcreek_write(hellcreek, 0x00, HR_FDBMAX); + + for (i = 0; i < hellcreek->fdb_entries; ++i, ++entry) { + /* Read current entry */ + hellcreek_populate_fdb_entry(hellcreek, entry, i); + + /* Advance read pointer */ + hellcreek_write(hellcreek, 0x00, HR_FDBRDH); + } + + mutex_unlock(&hellcreek->reg_lock); + + *data = (u8 *)table; + + return 0; +} + +static struct devlink_region_ops hellcreek_region_vlan_ops = { + .name = "vlan", + .snapshot = hellcreek_devlink_region_vlan_snapshot, + .destructor = kfree, +}; + +static struct devlink_region_ops hellcreek_region_fdb_ops = { + .name = "fdb", + .snapshot = hellcreek_devlink_region_fdb_snapshot, + .destructor = kfree, +}; + +static int hellcreek_setup_devlink_regions(struct dsa_switch *ds) +{ + struct hellcreek *hellcreek = ds->priv; + struct devlink_region_ops *ops; + struct devlink_region *region; + u64 size; + int ret; + + /* VLAN table */ + size = VLAN_N_VID * sizeof(struct hellcreek_devlink_vlan_entry); + ops = &hellcreek_region_vlan_ops; + + region = dsa_devlink_region_create(ds, ops, 1, size); + if (IS_ERR(region)) + return PTR_ERR(region); + + hellcreek->vlan_region = region; + + /* FDB table */ + size = hellcreek->fdb_entries * sizeof(struct hellcreek_fdb_entry); + ops = &hellcreek_region_fdb_ops; + + region = dsa_devlink_region_create(ds, ops, 1, size); + if (IS_ERR(region)) { + ret = PTR_ERR(region); + goto err_fdb; + } + + hellcreek->fdb_region = region; + + return 0; + +err_fdb: + dsa_devlink_region_destroy(hellcreek->vlan_region); + + return ret; +} + +static void hellcreek_teardown_devlink_regions(struct dsa_switch *ds) +{ + struct hellcreek *hellcreek = ds->priv; + + dsa_devlink_region_destroy(hellcreek->fdb_region); + dsa_devlink_region_destroy(hellcreek->vlan_region); +} + static int hellcreek_setup(struct dsa_switch *ds) { struct hellcreek *hellcreek = ds->priv; @@ -1143,11 +1360,24 @@ static int hellcreek_setup(struct dsa_switch *ds) return ret; } + ret = hellcreek_setup_devlink_regions(ds); + if (ret) { + dev_err(hellcreek->dev, + "Failed to setup devlink regions!\n"); + goto err_regions; + } + return 0; + +err_regions: + dsa_devlink_resources_unregister(ds); + + return ret; } static void hellcreek_teardown(struct dsa_switch *ds) { + hellcreek_teardown_devlink_regions(ds); dsa_devlink_resources_unregister(ds); } @@ -1518,31 +1748,34 @@ static int hellcreek_port_setup_tc(struct dsa_switch *ds, int port, } static const struct dsa_switch_ops hellcreek_ds_ops = { - .get_ethtool_stats = hellcreek_get_ethtool_stats, - .get_sset_count = hellcreek_get_sset_count, - .get_strings = hellcreek_get_strings, - .get_tag_protocol = hellcreek_get_tag_protocol, - .get_ts_info = hellcreek_get_ts_info, - .phylink_validate = hellcreek_phylink_validate, - .port_bridge_join = hellcreek_port_bridge_join, - .port_bridge_leave = hellcreek_port_bridge_leave, - .port_disable = hellcreek_port_disable, - .port_enable = hellcreek_port_enable, - .port_fdb_add = hellcreek_fdb_add, - .port_fdb_del = hellcreek_fdb_del, - .port_fdb_dump = hellcreek_fdb_dump, - .port_hwtstamp_set = hellcreek_port_hwtstamp_set, - .port_hwtstamp_get = hellcreek_port_hwtstamp_get, - .port_prechangeupper = hellcreek_port_prechangeupper, - .port_rxtstamp = hellcreek_port_rxtstamp, - .port_setup_tc = hellcreek_port_setup_tc, - .port_stp_state_set = hellcreek_port_stp_state_set, - .port_txtstamp = hellcreek_port_txtstamp, - .port_vlan_add = hellcreek_vlan_add, - .port_vlan_del = hellcreek_vlan_del, - .port_vlan_filtering = hellcreek_vlan_filtering, - .setup = hellcreek_setup, - .teardown = hellcreek_teardown, + .devlink_info_get = hellcreek_devlink_info_get, + .get_ethtool_stats = hellcreek_get_ethtool_stats, + .get_sset_count = hellcreek_get_sset_count, + .get_strings = hellcreek_get_strings, + .get_tag_protocol = hellcreek_get_tag_protocol, + .get_ts_info = hellcreek_get_ts_info, + .phylink_validate = hellcreek_phylink_validate, + .port_bridge_flags = hellcreek_bridge_flags, + .port_bridge_join = hellcreek_port_bridge_join, + .port_bridge_leave = hellcreek_port_bridge_leave, + .port_disable = hellcreek_port_disable, + .port_enable = hellcreek_port_enable, + .port_fdb_add = hellcreek_fdb_add, + .port_fdb_del = hellcreek_fdb_del, + .port_fdb_dump = hellcreek_fdb_dump, + .port_hwtstamp_set = hellcreek_port_hwtstamp_set, + .port_hwtstamp_get = hellcreek_port_hwtstamp_get, + .port_pre_bridge_flags = hellcreek_pre_bridge_flags, + .port_prechangeupper = hellcreek_port_prechangeupper, + .port_rxtstamp = hellcreek_port_rxtstamp, + .port_setup_tc = hellcreek_port_setup_tc, + .port_stp_state_set = hellcreek_port_stp_state_set, + .port_txtstamp = hellcreek_port_txtstamp, + .port_vlan_add = hellcreek_vlan_add, + .port_vlan_del = hellcreek_vlan_del, + .port_vlan_filtering = hellcreek_vlan_filtering, + .setup = hellcreek_setup, + .teardown = hellcreek_teardown, }; static int hellcreek_probe(struct platform_device *pdev) @@ -1609,10 +1842,8 @@ static int hellcreek_probe(struct platform_device *pdev) } hellcreek->base = devm_ioremap_resource(dev, res); - if (IS_ERR(hellcreek->base)) { - dev_err(dev, "No memory available!\n"); + if (IS_ERR(hellcreek->base)) return PTR_ERR(hellcreek->base); - } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ptp"); if (!res) { @@ -1621,10 +1852,8 @@ static int hellcreek_probe(struct platform_device *pdev) } hellcreek->ptp_base = devm_ioremap_resource(dev, res); - if (IS_ERR(hellcreek->ptp_base)) { - dev_err(dev, "No memory available!\n"); + if (IS_ERR(hellcreek->ptp_base)) return PTR_ERR(hellcreek->ptp_base); - } ret = hellcreek_detect(hellcreek); if (ret) { @@ -1693,6 +1922,7 @@ static int hellcreek_remove(struct platform_device *pdev) } static const struct hellcreek_platform_data de1soc_r1_pdata = { + .name = "r4c30", .num_ports = 4, .is_100_mbits = 1, .qbv_support = 1, diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h index 305e76dab34d..9e303b8ab13c 100644 --- a/drivers/net/dsa/hirschmann/hellcreek.h +++ b/drivers/net/dsa/hirschmann/hellcreek.h @@ -278,6 +278,8 @@ struct hellcreek { struct mutex reg_lock; /* Switch IP register lock */ struct mutex vlan_lock; /* VLAN bitmaps lock */ struct mutex ptp_lock; /* PTP IP register lock */ + struct devlink_region *vlan_region; + struct devlink_region *fdb_region; void __iomem *base; void __iomem *ptp_base; u16 swcfg; /* swcfg shadow */ @@ -304,4 +306,9 @@ enum hellcreek_devlink_resource_id { HELLCREEK_DEVLINK_PARAM_ID_FDB_TABLE, }; +struct hellcreek_devlink_vlan_entry { + u16 vid; + u16 member; +}; + #endif /* _HELLCREEK_H_ */ diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c index 69dd9a2e8bb6..40b41c794dfa 100644 --- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c +++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c @@ -373,30 +373,38 @@ long hellcreek_hwtstamp_work(struct ptp_clock_info *ptp) return restart ? 1 : -1; } -bool hellcreek_port_txtstamp(struct dsa_switch *ds, int port, - struct sk_buff *clone, unsigned int type) +void hellcreek_port_txtstamp(struct dsa_switch *ds, int port, + struct sk_buff *skb) { struct hellcreek *hellcreek = ds->priv; struct hellcreek_port_hwtstamp *ps; struct ptp_header *hdr; + struct sk_buff *clone; + unsigned int type; ps = &hellcreek->ports[port].port_hwtstamp; - /* Check if the driver is expected to do HW timestamping */ - if (!(skb_shinfo(clone)->tx_flags & SKBTX_HW_TSTAMP)) - return false; + type = ptp_classify_raw(skb); + if (type == PTP_CLASS_NONE) + return; /* Make sure the message is a PTP message that needs to be timestamped * and the interaction with the HW timestamping is enabled. If not, stop * here */ - hdr = hellcreek_should_tstamp(hellcreek, port, clone, type); + hdr = hellcreek_should_tstamp(hellcreek, port, skb, type); if (!hdr) - return false; + return; + + clone = skb_clone_sk(skb); + if (!clone) + return; if (test_and_set_bit_lock(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS, - &ps->state)) - return false; + &ps->state)) { + kfree_skb(clone); + return; + } ps->tx_skb = clone; @@ -406,8 +414,6 @@ bool hellcreek_port_txtstamp(struct dsa_switch *ds, int port, ps->tx_tstamp_start = jiffies; ptp_schedule_worker(hellcreek->ptp_clock, 0); - - return true; } bool hellcreek_port_rxtstamp(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h index c0745ffa1ebb..71af77efb28b 100644 --- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h +++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h @@ -44,8 +44,8 @@ int hellcreek_port_hwtstamp_get(struct dsa_switch *ds, int port, bool hellcreek_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *clone, unsigned int type); -bool hellcreek_port_txtstamp(struct dsa_switch *ds, int port, - struct sk_buff *clone, unsigned int type); +void hellcreek_port_txtstamp(struct dsa_switch *ds, int port, + struct sk_buff *skb); int hellcreek_get_ts_info(struct dsa_switch *ds, int port, struct ethtool_ts_info *info); diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index bf5c62e5c0b0..314ae78bbdd6 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Lantiq / Intel GSWIP switch driver for VRX200 SoCs + * Lantiq / Intel GSWIP switch driver for VRX200, xRX300 and xRX330 SoCs * * Copyright (C) 2010 Lantiq Deutschland * Copyright (C) 2012 John Crispin <john@phrozen.org> @@ -104,6 +104,7 @@ #define GSWIP_MII_CFG_MODE_RMIIP 0x2 #define GSWIP_MII_CFG_MODE_RMIIM 0x3 #define GSWIP_MII_CFG_MODE_RGMII 0x4 +#define GSWIP_MII_CFG_MODE_GMII 0x9 #define GSWIP_MII_CFG_MODE_MASK 0xf #define GSWIP_MII_CFG_RATE_M2P5 0x00 #define GSWIP_MII_CFG_RATE_M25 0x10 @@ -241,6 +242,7 @@ struct gswip_hw_info { int max_ports; int cpu_port; + const struct dsa_switch_ops *ops; }; struct xway_gphy_match_data { @@ -1412,12 +1414,42 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port, return 0; } -static void gswip_phylink_validate(struct dsa_switch *ds, int port, - unsigned long *supported, - struct phylink_link_state *state) +static void gswip_phylink_set_capab(unsigned long *supported, + struct phylink_link_state *state) { __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + /* Allow all the expected bits */ + phylink_set(mask, Autoneg); + phylink_set_port_modes(mask); + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + + /* With the exclusion of MII, Reverse MII and Reduced MII, we + * support Gigabit, including Half duplex + */ + if (state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_REVMII && + state->interface != PHY_INTERFACE_MODE_RMII) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseT_Half); + } + + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + + bitmap_and(supported, supported, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ switch (port) { case 0: case 1: @@ -1444,38 +1476,54 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port, return; } - /* Allow all the expected bits */ - phylink_set(mask, Autoneg); - phylink_set_port_modes(mask); - phylink_set(mask, Pause); - phylink_set(mask, Asym_Pause); + gswip_phylink_set_capab(supported, state); - /* With the exclusion of MII, Reverse MII and Reduced MII, we - * support Gigabit, including Half duplex - */ - if (state->interface != PHY_INTERFACE_MODE_MII && - state->interface != PHY_INTERFACE_MODE_REVMII && - state->interface != PHY_INTERFACE_MODE_RMII) { - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseT_Half); + return; + +unsupported: + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + dev_err(ds->dev, "Unsupported interface '%s' for port %d\n", + phy_modes(state->interface), port); +} + +static void gswip_xrx300_phylink_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ + switch (port) { + case 0: + if (!phy_interface_mode_is_rgmii(state->interface) && + state->interface != PHY_INTERFACE_MODE_GMII && + state->interface != PHY_INTERFACE_MODE_RMII) + goto unsupported; + break; + case 1: + case 2: + case 3: + case 4: + if (state->interface != PHY_INTERFACE_MODE_INTERNAL) + goto unsupported; + break; + case 5: + if (!phy_interface_mode_is_rgmii(state->interface) && + state->interface != PHY_INTERFACE_MODE_INTERNAL && + state->interface != PHY_INTERFACE_MODE_RMII) + goto unsupported; + break; + default: + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + dev_err(ds->dev, "Unsupported port: %i\n", port); + return; } - phylink_set(mask, 10baseT_Half); - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); + gswip_phylink_set_capab(supported, state); - bitmap_and(supported, supported, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); return; unsupported: bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); dev_err(ds->dev, "Unsupported interface '%s' for port %d\n", phy_modes(state->interface), port); - return; } static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link) @@ -1613,6 +1661,9 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port, case PHY_INTERFACE_MODE_RGMII_TXID: miicfg |= GSWIP_MII_CFG_MODE_RGMII; break; + case PHY_INTERFACE_MODE_GMII: + miicfg |= GSWIP_MII_CFG_MODE_GMII; + break; default: dev_err(ds->dev, "Unsupported interface: %d\n", state->interface); @@ -1739,7 +1790,7 @@ static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset) return ARRAY_SIZE(gswip_rmon_cnt); } -static const struct dsa_switch_ops gswip_switch_ops = { +static const struct dsa_switch_ops gswip_xrx200_switch_ops = { .get_tag_protocol = gswip_get_tag_protocol, .setup = gswip_setup, .port_enable = gswip_port_enable, @@ -1754,7 +1805,31 @@ static const struct dsa_switch_ops gswip_switch_ops = { .port_fdb_add = gswip_port_fdb_add, .port_fdb_del = gswip_port_fdb_del, .port_fdb_dump = gswip_port_fdb_dump, - .phylink_validate = gswip_phylink_validate, + .phylink_validate = gswip_xrx200_phylink_validate, + .phylink_mac_config = gswip_phylink_mac_config, + .phylink_mac_link_down = gswip_phylink_mac_link_down, + .phylink_mac_link_up = gswip_phylink_mac_link_up, + .get_strings = gswip_get_strings, + .get_ethtool_stats = gswip_get_ethtool_stats, + .get_sset_count = gswip_get_sset_count, +}; + +static const struct dsa_switch_ops gswip_xrx300_switch_ops = { + .get_tag_protocol = gswip_get_tag_protocol, + .setup = gswip_setup, + .port_enable = gswip_port_enable, + .port_disable = gswip_port_disable, + .port_bridge_join = gswip_port_bridge_join, + .port_bridge_leave = gswip_port_bridge_leave, + .port_fast_age = gswip_port_fast_age, + .port_vlan_filtering = gswip_port_vlan_filtering, + .port_vlan_add = gswip_port_vlan_add, + .port_vlan_del = gswip_port_vlan_del, + .port_stp_state_set = gswip_port_stp_state_set, + .port_fdb_add = gswip_port_fdb_add, + .port_fdb_del = gswip_port_fdb_del, + .port_fdb_dump = gswip_port_fdb_dump, + .phylink_validate = gswip_xrx300_phylink_validate, .phylink_mac_config = gswip_phylink_mac_config, .phylink_mac_link_down = gswip_phylink_mac_link_down, .phylink_mac_link_up = gswip_phylink_mac_link_up, @@ -1983,7 +2058,7 @@ remove_gphy: static int gswip_probe(struct platform_device *pdev) { struct gswip_priv *priv; - struct device_node *mdio_np, *gphy_fw_np; + struct device_node *np, *mdio_np, *gphy_fw_np; struct device *dev = &pdev->dev; int err; int i; @@ -2016,10 +2091,28 @@ static int gswip_probe(struct platform_device *pdev) priv->ds->dev = dev; priv->ds->num_ports = priv->hw_info->max_ports; priv->ds->priv = priv; - priv->ds->ops = &gswip_switch_ops; + priv->ds->ops = priv->hw_info->ops; priv->dev = dev; version = gswip_switch_r(priv, GSWIP_VERSION); + np = dev->of_node; + switch (version) { + case GSWIP_VERSION_2_0: + case GSWIP_VERSION_2_1: + if (!of_device_is_compatible(np, "lantiq,xrx200-gswip")) + return -EINVAL; + break; + case GSWIP_VERSION_2_2: + case GSWIP_VERSION_2_2_ETC: + if (!of_device_is_compatible(np, "lantiq,xrx300-gswip") && + !of_device_is_compatible(np, "lantiq,xrx330-gswip")) + return -EINVAL; + break; + default: + dev_err(dev, "unknown GSWIP version: 0x%x", version); + return -ENOENT; + } + /* bring up the mdio bus */ gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw"); if (gphy_fw_np) { @@ -2097,10 +2190,19 @@ static int gswip_remove(struct platform_device *pdev) static const struct gswip_hw_info gswip_xrx200 = { .max_ports = 7, .cpu_port = 6, + .ops = &gswip_xrx200_switch_ops, +}; + +static const struct gswip_hw_info gswip_xrx300 = { + .max_ports = 7, + .cpu_port = 6, + .ops = &gswip_xrx300_switch_ops, }; static const struct of_device_id gswip_of_match[] = { { .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 }, + { .compatible = "lantiq,xrx300-gswip", .data = &gswip_xrx300 }, + { .compatible = "lantiq,xrx330-gswip", .data = &gswip_xrx300 }, {}, }; MODULE_DEVICE_TABLE(of, gswip_of_match); diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig index 4ec6a47b7f72..c9e2a8989556 100644 --- a/drivers/net/dsa/microchip/Kconfig +++ b/drivers/net/dsa/microchip/Kconfig @@ -29,7 +29,7 @@ menuconfig NET_DSA_MICROCHIP_KSZ8795 depends on NET_DSA select NET_DSA_MICROCHIP_KSZ_COMMON help - This driver adds support for Microchip KSZ8795 switch chips. + This driver adds support for Microchip KSZ8795/KSZ88X3 switch chips. config NET_DSA_MICROCHIP_KSZ8795_SPI tristate "KSZ8795 series SPI connected switch driver" @@ -40,3 +40,11 @@ config NET_DSA_MICROCHIP_KSZ8795_SPI It is required to use the KSZ8795 switch driver as the only access is through SPI. + +config NET_DSA_MICROCHIP_KSZ8863_SMI + tristate "KSZ series SMI connected switch driver" + depends on NET_DSA_MICROCHIP_KSZ8795 + select MDIO_BITBANG + help + Select to enable support for registering switches configured through + Microchip SMI. It supports the KSZ8863 and KSZ8873 switch. diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile index 929caa81e782..2a03b21a3386 100644 --- a/drivers/net/dsa/microchip/Makefile +++ b/drivers/net/dsa/microchip/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_I2C) += ksz9477_i2c.o obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI) += ksz9477_spi.o obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8795) += ksz8795.o obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8795_SPI) += ksz8795_spi.o +obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8863_SMI) += ksz8863_smi.o diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h new file mode 100644 index 000000000000..9d611895d3cf --- /dev/null +++ b/drivers/net/dsa/microchip/ksz8.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Microchip KSZ8XXX series register access + * + * Copyright (C) 2020 Pengutronix, Michael Grzeschik <kernel@pengutronix.de> + */ + +#ifndef __KSZ8XXX_H +#define __KSZ8XXX_H +#include <linux/kernel.h> + +enum ksz_regs { + REG_IND_CTRL_0, + REG_IND_DATA_8, + REG_IND_DATA_CHECK, + REG_IND_DATA_HI, + REG_IND_DATA_LO, + REG_IND_MIB_CHECK, + P_FORCE_CTRL, + P_LINK_STATUS, + P_LOCAL_CTRL, + P_NEG_RESTART_CTRL, + P_REMOTE_STATUS, + P_SPEED_STATUS, + S_TAIL_TAG_CTRL, +}; + +enum ksz_masks { + PORT_802_1P_REMAPPING, + SW_TAIL_TAG_ENABLE, + MIB_COUNTER_OVERFLOW, + MIB_COUNTER_VALID, + VLAN_TABLE_FID, + VLAN_TABLE_MEMBERSHIP, + VLAN_TABLE_VALID, + STATIC_MAC_TABLE_VALID, + STATIC_MAC_TABLE_USE_FID, + STATIC_MAC_TABLE_FID, + STATIC_MAC_TABLE_OVERRIDE, + STATIC_MAC_TABLE_FWD_PORTS, + DYNAMIC_MAC_TABLE_ENTRIES_H, + DYNAMIC_MAC_TABLE_MAC_EMPTY, + DYNAMIC_MAC_TABLE_NOT_READY, + DYNAMIC_MAC_TABLE_ENTRIES, + DYNAMIC_MAC_TABLE_FID, + DYNAMIC_MAC_TABLE_SRC_PORT, + DYNAMIC_MAC_TABLE_TIMESTAMP, +}; + +enum ksz_shifts { + VLAN_TABLE_MEMBERSHIP_S, + VLAN_TABLE, + STATIC_MAC_FWD_PORTS, + STATIC_MAC_FID, + DYNAMIC_MAC_ENTRIES_H, + DYNAMIC_MAC_ENTRIES, + DYNAMIC_MAC_FID, + DYNAMIC_MAC_TIMESTAMP, + DYNAMIC_MAC_SRC_PORT, +}; + +struct ksz8 { + const u8 *regs; + const u32 *masks; + const u8 *shifts; + void *priv; +}; + +#endif diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index b4b7de63ca79..ad509a57a945 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -20,10 +20,112 @@ #include "ksz_common.h" #include "ksz8795_reg.h" +#include "ksz8.h" + +static const u8 ksz8795_regs[] = { + [REG_IND_CTRL_0] = 0x6E, + [REG_IND_DATA_8] = 0x70, + [REG_IND_DATA_CHECK] = 0x72, + [REG_IND_DATA_HI] = 0x71, + [REG_IND_DATA_LO] = 0x75, + [REG_IND_MIB_CHECK] = 0x74, + [P_FORCE_CTRL] = 0x0C, + [P_LINK_STATUS] = 0x0E, + [P_LOCAL_CTRL] = 0x07, + [P_NEG_RESTART_CTRL] = 0x0D, + [P_REMOTE_STATUS] = 0x08, + [P_SPEED_STATUS] = 0x09, + [S_TAIL_TAG_CTRL] = 0x0C, +}; + +static const u32 ksz8795_masks[] = { + [PORT_802_1P_REMAPPING] = BIT(7), + [SW_TAIL_TAG_ENABLE] = BIT(1), + [MIB_COUNTER_OVERFLOW] = BIT(6), + [MIB_COUNTER_VALID] = BIT(5), + [VLAN_TABLE_FID] = GENMASK(6, 0), + [VLAN_TABLE_MEMBERSHIP] = GENMASK(11, 7), + [VLAN_TABLE_VALID] = BIT(12), + [STATIC_MAC_TABLE_VALID] = BIT(21), + [STATIC_MAC_TABLE_USE_FID] = BIT(23), + [STATIC_MAC_TABLE_FID] = GENMASK(30, 24), + [STATIC_MAC_TABLE_OVERRIDE] = BIT(26), + [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(24, 20), + [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(6, 0), + [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(8), + [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7), + [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 29), + [DYNAMIC_MAC_TABLE_FID] = GENMASK(26, 20), + [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(26, 24), + [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(28, 27), +}; + +static const u8 ksz8795_shifts[] = { + [VLAN_TABLE_MEMBERSHIP_S] = 7, + [VLAN_TABLE] = 16, + [STATIC_MAC_FWD_PORTS] = 16, + [STATIC_MAC_FID] = 24, + [DYNAMIC_MAC_ENTRIES_H] = 3, + [DYNAMIC_MAC_ENTRIES] = 29, + [DYNAMIC_MAC_FID] = 16, + [DYNAMIC_MAC_TIMESTAMP] = 27, + [DYNAMIC_MAC_SRC_PORT] = 24, +}; + +static const u8 ksz8863_regs[] = { + [REG_IND_CTRL_0] = 0x79, + [REG_IND_DATA_8] = 0x7B, + [REG_IND_DATA_CHECK] = 0x7B, + [REG_IND_DATA_HI] = 0x7C, + [REG_IND_DATA_LO] = 0x80, + [REG_IND_MIB_CHECK] = 0x80, + [P_FORCE_CTRL] = 0x0C, + [P_LINK_STATUS] = 0x0E, + [P_LOCAL_CTRL] = 0x0C, + [P_NEG_RESTART_CTRL] = 0x0D, + [P_REMOTE_STATUS] = 0x0E, + [P_SPEED_STATUS] = 0x0F, + [S_TAIL_TAG_CTRL] = 0x03, +}; + +static const u32 ksz8863_masks[] = { + [PORT_802_1P_REMAPPING] = BIT(3), + [SW_TAIL_TAG_ENABLE] = BIT(6), + [MIB_COUNTER_OVERFLOW] = BIT(7), + [MIB_COUNTER_VALID] = BIT(6), + [VLAN_TABLE_FID] = GENMASK(15, 12), + [VLAN_TABLE_MEMBERSHIP] = GENMASK(18, 16), + [VLAN_TABLE_VALID] = BIT(19), + [STATIC_MAC_TABLE_VALID] = BIT(19), + [STATIC_MAC_TABLE_USE_FID] = BIT(21), + [STATIC_MAC_TABLE_FID] = GENMASK(29, 26), + [STATIC_MAC_TABLE_OVERRIDE] = BIT(20), + [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(18, 16), + [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(5, 0), + [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(7), + [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7), + [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 28), + [DYNAMIC_MAC_TABLE_FID] = GENMASK(19, 16), + [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(21, 20), + [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(23, 22), +}; -static const struct { +static u8 ksz8863_shifts[] = { + [VLAN_TABLE_MEMBERSHIP_S] = 16, + [STATIC_MAC_FWD_PORTS] = 16, + [STATIC_MAC_FID] = 22, + [DYNAMIC_MAC_ENTRIES_H] = 3, + [DYNAMIC_MAC_ENTRIES] = 24, + [DYNAMIC_MAC_FID] = 16, + [DYNAMIC_MAC_TIMESTAMP] = 24, + [DYNAMIC_MAC_SRC_PORT] = 20, +}; + +struct mib_names { char string[ETH_GSTRING_LEN]; -} mib_names[] = { +}; + +static const struct mib_names ksz87xx_mib_names[] = { { "rx_hi" }, { "rx_undersize" }, { "rx_fragments" }, @@ -62,6 +164,48 @@ static const struct { { "tx_discards" }, }; +static const struct mib_names ksz88xx_mib_names[] = { + { "rx" }, + { "rx_hi" }, + { "rx_undersize" }, + { "rx_fragments" }, + { "rx_oversize" }, + { "rx_jabbers" }, + { "rx_symbol_err" }, + { "rx_crc_err" }, + { "rx_align_err" }, + { "rx_mac_ctrl" }, + { "rx_pause" }, + { "rx_bcast" }, + { "rx_mcast" }, + { "rx_ucast" }, + { "rx_64_or_less" }, + { "rx_65_127" }, + { "rx_128_255" }, + { "rx_256_511" }, + { "rx_512_1023" }, + { "rx_1024_1522" }, + { "tx" }, + { "tx_hi" }, + { "tx_late_col" }, + { "tx_pause" }, + { "tx_bcast" }, + { "tx_mcast" }, + { "tx_ucast" }, + { "tx_deferred" }, + { "tx_total_col" }, + { "tx_exc_col" }, + { "tx_single_col" }, + { "tx_mult_col" }, + { "rx_discards" }, + { "tx_discards" }, +}; + +static bool ksz_is_ksz88x3(struct ksz_device *dev) +{ + return dev->chip_id == 0x8830; +} + static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set) { regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0); @@ -74,12 +218,20 @@ static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits, bits, set ? bits : 0); } -static int ksz8795_reset_switch(struct ksz_device *dev) +static int ksz8_reset_switch(struct ksz_device *dev) { - /* reset switch */ - ksz_write8(dev, REG_POWER_MANAGEMENT_1, - SW_SOFTWARE_POWER_DOWN << SW_POWER_MANAGEMENT_MODE_S); - ksz_write8(dev, REG_POWER_MANAGEMENT_1, 0); + if (ksz_is_ksz88x3(dev)) { + /* reset switch */ + ksz_cfg(dev, KSZ8863_REG_SW_RESET, + KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET, true); + ksz_cfg(dev, KSZ8863_REG_SW_RESET, + KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET, false); + } else { + /* reset switch */ + ksz_write8(dev, REG_POWER_MANAGEMENT_1, + SW_SOFTWARE_POWER_DOWN << SW_POWER_MANAGEMENT_MODE_S); + ksz_write8(dev, REG_POWER_MANAGEMENT_1, 0); + } return 0; } @@ -117,29 +269,34 @@ static void ksz8795_set_prio_queue(struct ksz_device *dev, int port, int queue) true); } -static void ksz8795_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, - u64 *cnt) +static void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt) { + struct ksz8 *ksz8 = dev->priv; + const u32 *masks; + const u8 *regs; u16 ctrl_addr; u32 data; u8 check; int loop; + masks = ksz8->masks; + regs = ksz8->regs; + ctrl_addr = addr + dev->reg_mib_cnt * port; ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ); mutex_lock(&dev->alu_mutex); - ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr); + ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); /* It is almost guaranteed to always read the valid bit because of * slow SPI speed. */ for (loop = 2; loop > 0; loop--) { - ksz_read8(dev, REG_IND_MIB_CHECK, &check); + ksz_read8(dev, regs[REG_IND_MIB_CHECK], &check); - if (check & MIB_COUNTER_VALID) { - ksz_read32(dev, REG_IND_DATA_LO, &data); - if (check & MIB_COUNTER_OVERFLOW) + if (check & masks[MIB_COUNTER_VALID]) { + ksz_read32(dev, regs[REG_IND_DATA_LO], &data); + if (check & masks[MIB_COUNTER_OVERFLOW]) *cnt += MIB_COUNTER_VALUE + 1; *cnt += data & MIB_COUNTER_VALUE; break; @@ -151,27 +308,33 @@ static void ksz8795_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, u64 *dropped, u64 *cnt) { + struct ksz8 *ksz8 = dev->priv; + const u32 *masks; + const u8 *regs; u16 ctrl_addr; u32 data; u8 check; int loop; + masks = ksz8->masks; + regs = ksz8->regs; + addr -= dev->reg_mib_cnt; - ctrl_addr = (KS_MIB_TOTAL_RX_1 - KS_MIB_TOTAL_RX_0) * port; - ctrl_addr += addr + KS_MIB_TOTAL_RX_0; + ctrl_addr = (KSZ8795_MIB_TOTAL_RX_1 - KSZ8795_MIB_TOTAL_RX_0) * port; + ctrl_addr += addr + KSZ8795_MIB_TOTAL_RX_0; ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ); mutex_lock(&dev->alu_mutex); - ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr); + ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); /* It is almost guaranteed to always read the valid bit because of * slow SPI speed. */ for (loop = 2; loop > 0; loop--) { - ksz_read8(dev, REG_IND_MIB_CHECK, &check); + ksz_read8(dev, regs[REG_IND_MIB_CHECK], &check); - if (check & MIB_COUNTER_VALID) { - ksz_read32(dev, REG_IND_DATA_LO, &data); + if (check & masks[MIB_COUNTER_VALID]) { + ksz_read32(dev, regs[REG_IND_DATA_LO], &data); if (addr < 2) { u64 total; @@ -179,13 +342,13 @@ static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, total <<= 32; *cnt += total; *cnt += data; - if (check & MIB_COUNTER_OVERFLOW) { + if (check & masks[MIB_COUNTER_OVERFLOW]) { total = MIB_TOTAL_BYTES_H + 1; total <<= 32; *cnt += total; } } else { - if (check & MIB_COUNTER_OVERFLOW) + if (check & masks[MIB_COUNTER_OVERFLOW]) *cnt += MIB_PACKET_DROPPED + 1; *cnt += data & MIB_PACKET_DROPPED; } @@ -195,8 +358,52 @@ static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, mutex_unlock(&dev->alu_mutex); } -static void ksz8795_freeze_mib(struct ksz_device *dev, int port, bool freeze) +static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, + u64 *dropped, u64 *cnt) +{ + struct ksz8 *ksz8 = dev->priv; + const u8 *regs = ksz8->regs; + u32 *last = (u32 *)dropped; + u16 ctrl_addr; + u32 data; + u32 cur; + + addr -= dev->reg_mib_cnt; + ctrl_addr = addr ? KSZ8863_MIB_PACKET_DROPPED_TX_0 : + KSZ8863_MIB_PACKET_DROPPED_RX_0; + ctrl_addr += port; + ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ); + + mutex_lock(&dev->alu_mutex); + ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); + ksz_read32(dev, regs[REG_IND_DATA_LO], &data); + mutex_unlock(&dev->alu_mutex); + + data &= MIB_PACKET_DROPPED; + cur = last[addr]; + if (data != cur) { + last[addr] = data; + if (data < cur) + data += MIB_PACKET_DROPPED + 1; + data -= cur; + *cnt += data; + } +} + +static void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, + u64 *dropped, u64 *cnt) { + if (ksz_is_ksz88x3(dev)) + ksz8863_r_mib_pkt(dev, port, addr, dropped, cnt); + else + ksz8795_r_mib_pkt(dev, port, addr, dropped, cnt); +} + +static void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze) +{ + if (ksz_is_ksz88x3(dev)) + return; + /* enable the port for flush/freeze function */ if (freeze) ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true); @@ -207,14 +414,17 @@ static void ksz8795_freeze_mib(struct ksz_device *dev, int port, bool freeze) ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false); } -static void ksz8795_port_init_cnt(struct ksz_device *dev, int port) +static void ksz8_port_init_cnt(struct ksz_device *dev, int port) { struct ksz_port_mib *mib = &dev->ports[port].mib; + u64 *dropped; - /* flush all enabled port MIB counters */ - ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true); - ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FLUSH, true); - ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false); + if (!ksz_is_ksz88x3(dev)) { + /* flush all enabled port MIB counters */ + ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true); + ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FLUSH, true); + ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false); + } mib->cnt_ptr = 0; @@ -225,80 +435,99 @@ static void ksz8795_port_init_cnt(struct ksz_device *dev, int port) ++mib->cnt_ptr; } + /* last one in storage */ + dropped = &mib->counters[dev->mib_cnt]; + /* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */ while (mib->cnt_ptr < dev->mib_cnt) { dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr, - NULL, &mib->counters[mib->cnt_ptr]); + dropped, &mib->counters[mib->cnt_ptr]); ++mib->cnt_ptr; } mib->cnt_ptr = 0; memset(mib->counters, 0, dev->mib_cnt * sizeof(u64)); } -static void ksz8795_r_table(struct ksz_device *dev, int table, u16 addr, - u64 *data) +static void ksz8_r_table(struct ksz_device *dev, int table, u16 addr, u64 *data) { + struct ksz8 *ksz8 = dev->priv; + const u8 *regs = ksz8->regs; u16 ctrl_addr; ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr; mutex_lock(&dev->alu_mutex); - ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr); - ksz_read64(dev, REG_IND_DATA_HI, data); + ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); + ksz_read64(dev, regs[REG_IND_DATA_HI], data); mutex_unlock(&dev->alu_mutex); } -static void ksz8795_w_table(struct ksz_device *dev, int table, u16 addr, - u64 data) +static void ksz8_w_table(struct ksz_device *dev, int table, u16 addr, u64 data) { + struct ksz8 *ksz8 = dev->priv; + const u8 *regs = ksz8->regs; u16 ctrl_addr; ctrl_addr = IND_ACC_TABLE(table) | addr; mutex_lock(&dev->alu_mutex); - ksz_write64(dev, REG_IND_DATA_HI, data); - ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr); + ksz_write64(dev, regs[REG_IND_DATA_HI], data); + ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); mutex_unlock(&dev->alu_mutex); } -static int ksz8795_valid_dyn_entry(struct ksz_device *dev, u8 *data) +static int ksz8_valid_dyn_entry(struct ksz_device *dev, u8 *data) { + struct ksz8 *ksz8 = dev->priv; int timeout = 100; + const u32 *masks; + const u8 *regs; + + masks = ksz8->masks; + regs = ksz8->regs; do { - ksz_read8(dev, REG_IND_DATA_CHECK, data); + ksz_read8(dev, regs[REG_IND_DATA_CHECK], data); timeout--; - } while ((*data & DYNAMIC_MAC_TABLE_NOT_READY) && timeout); + } while ((*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) && timeout); /* Entry is not ready for accessing. */ - if (*data & DYNAMIC_MAC_TABLE_NOT_READY) { + if (*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) { return -EAGAIN; /* Entry is ready for accessing. */ } else { - ksz_read8(dev, REG_IND_DATA_8, data); + ksz_read8(dev, regs[REG_IND_DATA_8], data); /* There is no valid entry in the table. */ - if (*data & DYNAMIC_MAC_TABLE_MAC_EMPTY) + if (*data & masks[DYNAMIC_MAC_TABLE_MAC_EMPTY]) return -ENXIO; } return 0; } -static int ksz8795_r_dyn_mac_table(struct ksz_device *dev, u16 addr, - u8 *mac_addr, u8 *fid, u8 *src_port, - u8 *timestamp, u16 *entries) +static int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, + u8 *mac_addr, u8 *fid, u8 *src_port, + u8 *timestamp, u16 *entries) { + struct ksz8 *ksz8 = dev->priv; u32 data_hi, data_lo; + const u8 *shifts; + const u32 *masks; + const u8 *regs; u16 ctrl_addr; u8 data; int rc; + shifts = ksz8->shifts; + masks = ksz8->masks; + regs = ksz8->regs; + ctrl_addr = IND_ACC_TABLE(TABLE_DYNAMIC_MAC | TABLE_READ) | addr; mutex_lock(&dev->alu_mutex); - ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr); + ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); - rc = ksz8795_valid_dyn_entry(dev, &data); + rc = ksz8_valid_dyn_entry(dev, &data); if (rc == -EAGAIN) { if (addr == 0) *entries = 0; @@ -309,23 +538,23 @@ static int ksz8795_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u64 buf = 0; int cnt; - ksz_read64(dev, REG_IND_DATA_HI, &buf); + ksz_read64(dev, regs[REG_IND_DATA_HI], &buf); data_hi = (u32)(buf >> 32); data_lo = (u32)buf; /* Check out how many valid entry in the table. */ - cnt = data & DYNAMIC_MAC_TABLE_ENTRIES_H; - cnt <<= DYNAMIC_MAC_ENTRIES_H_S; - cnt |= (data_hi & DYNAMIC_MAC_TABLE_ENTRIES) >> - DYNAMIC_MAC_ENTRIES_S; + cnt = data & masks[DYNAMIC_MAC_TABLE_ENTRIES_H]; + cnt <<= shifts[DYNAMIC_MAC_ENTRIES_H]; + cnt |= (data_hi & masks[DYNAMIC_MAC_TABLE_ENTRIES]) >> + shifts[DYNAMIC_MAC_ENTRIES]; *entries = cnt + 1; - *fid = (data_hi & DYNAMIC_MAC_TABLE_FID) >> - DYNAMIC_MAC_FID_S; - *src_port = (data_hi & DYNAMIC_MAC_TABLE_SRC_PORT) >> - DYNAMIC_MAC_SRC_PORT_S; - *timestamp = (data_hi & DYNAMIC_MAC_TABLE_TIMESTAMP) >> - DYNAMIC_MAC_TIMESTAMP_S; + *fid = (data_hi & masks[DYNAMIC_MAC_TABLE_FID]) >> + shifts[DYNAMIC_MAC_FID]; + *src_port = (data_hi & masks[DYNAMIC_MAC_TABLE_SRC_PORT]) >> + shifts[DYNAMIC_MAC_SRC_PORT]; + *timestamp = (data_hi & masks[DYNAMIC_MAC_TABLE_TIMESTAMP]) >> + shifts[DYNAMIC_MAC_TIMESTAMP]; mac_addr[5] = (u8)data_lo; mac_addr[4] = (u8)(data_lo >> 8); @@ -341,91 +570,128 @@ static int ksz8795_r_dyn_mac_table(struct ksz_device *dev, u16 addr, return rc; } -static int ksz8795_r_sta_mac_table(struct ksz_device *dev, u16 addr, - struct alu_struct *alu) +static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr, + struct alu_struct *alu) { + struct ksz8 *ksz8 = dev->priv; u32 data_hi, data_lo; + const u8 *shifts; + const u32 *masks; u64 data; - ksz8795_r_table(dev, TABLE_STATIC_MAC, addr, &data); + shifts = ksz8->shifts; + masks = ksz8->masks; + + ksz8_r_table(dev, TABLE_STATIC_MAC, addr, &data); data_hi = data >> 32; data_lo = (u32)data; - if (data_hi & (STATIC_MAC_TABLE_VALID | STATIC_MAC_TABLE_OVERRIDE)) { + if (data_hi & (masks[STATIC_MAC_TABLE_VALID] | + masks[STATIC_MAC_TABLE_OVERRIDE])) { alu->mac[5] = (u8)data_lo; alu->mac[4] = (u8)(data_lo >> 8); alu->mac[3] = (u8)(data_lo >> 16); alu->mac[2] = (u8)(data_lo >> 24); alu->mac[1] = (u8)data_hi; alu->mac[0] = (u8)(data_hi >> 8); - alu->port_forward = (data_hi & STATIC_MAC_TABLE_FWD_PORTS) >> - STATIC_MAC_FWD_PORTS_S; + alu->port_forward = + (data_hi & masks[STATIC_MAC_TABLE_FWD_PORTS]) >> + shifts[STATIC_MAC_FWD_PORTS]; alu->is_override = - (data_hi & STATIC_MAC_TABLE_OVERRIDE) ? 1 : 0; + (data_hi & masks[STATIC_MAC_TABLE_OVERRIDE]) ? 1 : 0; data_hi >>= 1; - alu->is_use_fid = (data_hi & STATIC_MAC_TABLE_USE_FID) ? 1 : 0; - alu->fid = (data_hi & STATIC_MAC_TABLE_FID) >> - STATIC_MAC_FID_S; + alu->is_static = true; + alu->is_use_fid = + (data_hi & masks[STATIC_MAC_TABLE_USE_FID]) ? 1 : 0; + alu->fid = (data_hi & masks[STATIC_MAC_TABLE_FID]) >> + shifts[STATIC_MAC_FID]; return 0; } return -ENXIO; } -static void ksz8795_w_sta_mac_table(struct ksz_device *dev, u16 addr, - struct alu_struct *alu) +static void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr, + struct alu_struct *alu) { + struct ksz8 *ksz8 = dev->priv; u32 data_hi, data_lo; + const u8 *shifts; + const u32 *masks; u64 data; + shifts = ksz8->shifts; + masks = ksz8->masks; + data_lo = ((u32)alu->mac[2] << 24) | ((u32)alu->mac[3] << 16) | ((u32)alu->mac[4] << 8) | alu->mac[5]; data_hi = ((u32)alu->mac[0] << 8) | alu->mac[1]; - data_hi |= (u32)alu->port_forward << STATIC_MAC_FWD_PORTS_S; + data_hi |= (u32)alu->port_forward << shifts[STATIC_MAC_FWD_PORTS]; if (alu->is_override) - data_hi |= STATIC_MAC_TABLE_OVERRIDE; + data_hi |= masks[STATIC_MAC_TABLE_OVERRIDE]; if (alu->is_use_fid) { - data_hi |= STATIC_MAC_TABLE_USE_FID; - data_hi |= (u32)alu->fid << STATIC_MAC_FID_S; + data_hi |= masks[STATIC_MAC_TABLE_USE_FID]; + data_hi |= (u32)alu->fid << shifts[STATIC_MAC_FID]; } if (alu->is_static) - data_hi |= STATIC_MAC_TABLE_VALID; + data_hi |= masks[STATIC_MAC_TABLE_VALID]; else - data_hi &= ~STATIC_MAC_TABLE_OVERRIDE; + data_hi &= ~masks[STATIC_MAC_TABLE_OVERRIDE]; data = (u64)data_hi << 32 | data_lo; - ksz8795_w_table(dev, TABLE_STATIC_MAC, addr, data); + ksz8_w_table(dev, TABLE_STATIC_MAC, addr, data); } -static void ksz8795_from_vlan(u16 vlan, u8 *fid, u8 *member, u8 *valid) +static void ksz8_from_vlan(struct ksz_device *dev, u32 vlan, u8 *fid, + u8 *member, u8 *valid) { - *fid = vlan & VLAN_TABLE_FID; - *member = (vlan & VLAN_TABLE_MEMBERSHIP) >> VLAN_TABLE_MEMBERSHIP_S; - *valid = !!(vlan & VLAN_TABLE_VALID); + struct ksz8 *ksz8 = dev->priv; + const u8 *shifts; + const u32 *masks; + + shifts = ksz8->shifts; + masks = ksz8->masks; + + *fid = vlan & masks[VLAN_TABLE_FID]; + *member = (vlan & masks[VLAN_TABLE_MEMBERSHIP]) >> + shifts[VLAN_TABLE_MEMBERSHIP_S]; + *valid = !!(vlan & masks[VLAN_TABLE_VALID]); } -static void ksz8795_to_vlan(u8 fid, u8 member, u8 valid, u16 *vlan) +static void ksz8_to_vlan(struct ksz_device *dev, u8 fid, u8 member, u8 valid, + u16 *vlan) { + struct ksz8 *ksz8 = dev->priv; + const u8 *shifts; + const u32 *masks; + + shifts = ksz8->shifts; + masks = ksz8->masks; + *vlan = fid; - *vlan |= (u16)member << VLAN_TABLE_MEMBERSHIP_S; + *vlan |= (u16)member << shifts[VLAN_TABLE_MEMBERSHIP_S]; if (valid) - *vlan |= VLAN_TABLE_VALID; + *vlan |= masks[VLAN_TABLE_VALID]; } -static void ksz8795_r_vlan_entries(struct ksz_device *dev, u16 addr) +static void ksz8_r_vlan_entries(struct ksz_device *dev, u16 addr) { + struct ksz8 *ksz8 = dev->priv; + const u8 *shifts; u64 data; int i; - ksz8795_r_table(dev, TABLE_VLAN, addr, &data); + shifts = ksz8->shifts; + + ksz8_r_table(dev, TABLE_VLAN, addr, &data); addr *= dev->phy_port_cnt; for (i = 0; i < dev->phy_port_cnt; i++) { dev->vlan_cache[addr + i].table[0] = (u16)data; - data >>= VLAN_TABLE_S; + data >>= shifts[VLAN_TABLE]; } } -static void ksz8795_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan) +static void ksz8_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan) { int index; u16 *data; @@ -435,11 +701,11 @@ static void ksz8795_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan) data = (u16 *)&buf; addr = vid / dev->phy_port_cnt; index = vid & 3; - ksz8795_r_table(dev, TABLE_VLAN, addr, &buf); + ksz8_r_table(dev, TABLE_VLAN, addr, &buf); *vlan = data[index]; } -static void ksz8795_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan) +static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan) { int index; u16 *data; @@ -449,30 +715,37 @@ static void ksz8795_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan) data = (u16 *)&buf; addr = vid / dev->phy_port_cnt; index = vid & 3; - ksz8795_r_table(dev, TABLE_VLAN, addr, &buf); + ksz8_r_table(dev, TABLE_VLAN, addr, &buf); data[index] = vlan; dev->vlan_cache[vid].table[0] = vlan; - ksz8795_w_table(dev, TABLE_VLAN, addr, buf); + ksz8_w_table(dev, TABLE_VLAN, addr, buf); } -static void ksz8795_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val) +static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val) { + struct ksz8 *ksz8 = dev->priv; u8 restart, speed, ctrl, link; + const u8 *regs = ksz8->regs; int processed = true; u16 data = 0; u8 p = phy; switch (reg) { case PHY_REG_CTRL: - ksz_pread8(dev, p, P_NEG_RESTART_CTRL, &restart); - ksz_pread8(dev, p, P_SPEED_STATUS, &speed); - ksz_pread8(dev, p, P_FORCE_CTRL, &ctrl); + ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart); + ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed); + ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl); if (restart & PORT_PHY_LOOPBACK) data |= PHY_LOOPBACK; if (ctrl & PORT_FORCE_100_MBIT) data |= PHY_SPEED_100MBIT; - if (!(ctrl & PORT_AUTO_NEG_DISABLE)) - data |= PHY_AUTO_NEG_ENABLE; + if (ksz_is_ksz88x3(dev)) { + if ((ctrl & PORT_AUTO_NEG_ENABLE)) + data |= PHY_AUTO_NEG_ENABLE; + } else { + if (!(ctrl & PORT_AUTO_NEG_DISABLE)) + data |= PHY_AUTO_NEG_ENABLE; + } if (restart & PORT_POWER_DOWN) data |= PHY_POWER_DOWN; if (restart & PORT_AUTO_NEG_RESTART) @@ -491,7 +764,7 @@ static void ksz8795_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val) data |= PHY_LED_DISABLE; break; case PHY_REG_STATUS: - ksz_pread8(dev, p, P_LINK_STATUS, &link); + ksz_pread8(dev, p, regs[P_LINK_STATUS], &link); data = PHY_100BTX_FD_CAPABLE | PHY_100BTX_CAPABLE | PHY_10BT_FD_CAPABLE | @@ -506,10 +779,13 @@ static void ksz8795_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val) data = KSZ8795_ID_HI; break; case PHY_REG_ID_2: - data = KSZ8795_ID_LO; + if (ksz_is_ksz88x3(dev)) + data = KSZ8863_ID_LO; + else + data = KSZ8795_ID_LO; break; case PHY_REG_AUTO_NEGOTIATION: - ksz_pread8(dev, p, P_LOCAL_CTRL, &ctrl); + ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl); data = PHY_AUTO_NEG_802_3; if (ctrl & PORT_AUTO_NEG_SYM_PAUSE) data |= PHY_AUTO_NEG_SYM_PAUSE; @@ -523,7 +799,7 @@ static void ksz8795_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val) data |= PHY_AUTO_NEG_10BT; break; case PHY_REG_REMOTE_CAPABILITY: - ksz_pread8(dev, p, P_REMOTE_STATUS, &link); + ksz_pread8(dev, p, regs[P_REMOTE_STATUS], &link); data = PHY_AUTO_NEG_802_3; if (link & PORT_REMOTE_SYM_PAUSE) data |= PHY_AUTO_NEG_SYM_PAUSE; @@ -546,10 +822,12 @@ static void ksz8795_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val) *val = data; } -static void ksz8795_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val) +static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val) { - u8 p = phy; + struct ksz8 *ksz8 = dev->priv; u8 restart, speed, ctrl, data; + const u8 *regs = ksz8->regs; + u8 p = phy; switch (reg) { case PHY_REG_CTRL: @@ -557,24 +835,32 @@ static void ksz8795_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val) /* Do not support PHY reset function. */ if (val & PHY_RESET) break; - ksz_pread8(dev, p, P_SPEED_STATUS, &speed); + ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed); data = speed; if (val & PHY_HP_MDIX) data |= PORT_HP_MDIX; else data &= ~PORT_HP_MDIX; if (data != speed) - ksz_pwrite8(dev, p, P_SPEED_STATUS, data); - ksz_pread8(dev, p, P_FORCE_CTRL, &ctrl); + ksz_pwrite8(dev, p, regs[P_SPEED_STATUS], data); + ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl); data = ctrl; - if (!(val & PHY_AUTO_NEG_ENABLE)) - data |= PORT_AUTO_NEG_DISABLE; - else - data &= ~PORT_AUTO_NEG_DISABLE; + if (ksz_is_ksz88x3(dev)) { + if ((val & PHY_AUTO_NEG_ENABLE)) + data |= PORT_AUTO_NEG_ENABLE; + else + data &= ~PORT_AUTO_NEG_ENABLE; + } else { + if (!(val & PHY_AUTO_NEG_ENABLE)) + data |= PORT_AUTO_NEG_DISABLE; + else + data &= ~PORT_AUTO_NEG_DISABLE; + + /* Fiber port does not support auto-negotiation. */ + if (dev->ports[p].fiber) + data |= PORT_AUTO_NEG_DISABLE; + } - /* Fiber port does not support auto-negotiation. */ - if (dev->ports[p].fiber) - data |= PORT_AUTO_NEG_DISABLE; if (val & PHY_SPEED_100MBIT) data |= PORT_FORCE_100_MBIT; else @@ -584,8 +870,8 @@ static void ksz8795_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val) else data &= ~PORT_FORCE_FULL_DUPLEX; if (data != ctrl) - ksz_pwrite8(dev, p, P_FORCE_CTRL, data); - ksz_pread8(dev, p, P_NEG_RESTART_CTRL, &restart); + ksz_pwrite8(dev, p, regs[P_FORCE_CTRL], data); + ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart); data = restart; if (val & PHY_LED_DISABLE) data |= PORT_LED_OFF; @@ -616,10 +902,10 @@ static void ksz8795_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val) else data &= ~PORT_PHY_LOOPBACK; if (data != restart) - ksz_pwrite8(dev, p, P_NEG_RESTART_CTRL, data); + ksz_pwrite8(dev, p, regs[P_NEG_RESTART_CTRL], data); break; case PHY_REG_AUTO_NEGOTIATION: - ksz_pread8(dev, p, P_LOCAL_CTRL, &ctrl); + ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl); data = ctrl; data &= ~(PORT_AUTO_NEG_SYM_PAUSE | PORT_AUTO_NEG_100BTX_FD | @@ -637,34 +923,37 @@ static void ksz8795_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val) if (val & PHY_AUTO_NEG_10BT) data |= PORT_AUTO_NEG_10BT; if (data != ctrl) - ksz_pwrite8(dev, p, P_LOCAL_CTRL, data); + ksz_pwrite8(dev, p, regs[P_LOCAL_CTRL], data); break; default: break; } } -static enum dsa_tag_protocol ksz8795_get_tag_protocol(struct dsa_switch *ds, - int port, - enum dsa_tag_protocol mp) +static enum dsa_tag_protocol ksz8_get_tag_protocol(struct dsa_switch *ds, + int port, + enum dsa_tag_protocol mp) { - return DSA_TAG_PROTO_KSZ8795; + struct ksz_device *dev = ds->priv; + + /* ksz88x3 uses the same tag schema as KSZ9893 */ + return ksz_is_ksz88x3(dev) ? + DSA_TAG_PROTO_KSZ9893 : DSA_TAG_PROTO_KSZ8795; } -static void ksz8795_get_strings(struct dsa_switch *ds, int port, - u32 stringset, uint8_t *buf) +static void ksz8_get_strings(struct dsa_switch *ds, int port, + u32 stringset, uint8_t *buf) { struct ksz_device *dev = ds->priv; int i; for (i = 0; i < dev->mib_cnt; i++) { - memcpy(buf + i * ETH_GSTRING_LEN, mib_names[i].string, - ETH_GSTRING_LEN); + memcpy(buf + i * ETH_GSTRING_LEN, + dev->mib_names[i].string, ETH_GSTRING_LEN); } } -static void ksz8795_cfg_port_member(struct ksz_device *dev, int port, - u8 member) +static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member) { u8 data; @@ -675,8 +964,7 @@ static void ksz8795_cfg_port_member(struct ksz_device *dev, int port, dev->ports[port].member = member; } -static void ksz8795_port_stp_state_set(struct dsa_switch *ds, int port, - u8 state) +static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) { struct ksz_device *dev = ds->priv; int forward = dev->member; @@ -734,7 +1022,7 @@ static void ksz8795_port_stp_state_set(struct dsa_switch *ds, int port, p->stp_state = state; /* Port membership may share register with STP state. */ if (member >= 0 && member != p->member) - ksz8795_cfg_port_member(dev, port, (u8)member); + ksz8_cfg_port_member(dev, port, (u8)member); /* Check if forwarding needs to be updated. */ if (state != BR_STATE_FORWARDING) { @@ -749,7 +1037,7 @@ static void ksz8795_port_stp_state_set(struct dsa_switch *ds, int port, ksz_update_port_member(dev, port); } -static void ksz8795_flush_dyn_mac_table(struct ksz_device *dev, int port) +static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port) { u8 learn[DSA_MAX_PORTS]; int first, index, cnt; @@ -782,30 +1070,35 @@ static void ksz8795_flush_dyn_mac_table(struct ksz_device *dev, int port) } } -static int ksz8795_port_vlan_filtering(struct dsa_switch *ds, int port, - bool flag, - struct netlink_ext_ack *extack) +static int ksz8_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag, + struct netlink_ext_ack *extack) { struct ksz_device *dev = ds->priv; + if (ksz_is_ksz88x3(dev)) + return -ENOTSUPP; + ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag); return 0; } -static int ksz8795_port_vlan_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct netlink_ext_ack *extack) +static int ksz8_port_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) { bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; struct ksz_device *dev = ds->priv; u16 data, new_pvid = 0; u8 fid, member, valid; + if (ksz_is_ksz88x3(dev)) + return -ENOTSUPP; + ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged); - ksz8795_r_vlan_table(dev, vlan->vid, &data); - ksz8795_from_vlan(data, &fid, &member, &valid); + ksz8_r_vlan_table(dev, vlan->vid, &data); + ksz8_from_vlan(dev, data, &fid, &member, &valid); /* First time to setup the VLAN entry. */ if (!valid) { @@ -815,8 +1108,8 @@ static int ksz8795_port_vlan_add(struct dsa_switch *ds, int port, } member |= BIT(port); - ksz8795_to_vlan(fid, member, valid, &data); - ksz8795_w_vlan_table(dev, vlan->vid, data); + ksz8_to_vlan(dev, fid, member, valid, &data); + ksz8_w_vlan_table(dev, vlan->vid, data); /* change PVID */ if (vlan->flags & BRIDGE_VLAN_INFO_PVID) @@ -834,21 +1127,24 @@ static int ksz8795_port_vlan_add(struct dsa_switch *ds, int port, return 0; } -static int ksz8795_port_vlan_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan) +static int ksz8_port_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) { bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; struct ksz_device *dev = ds->priv; u16 data, pvid, new_pvid = 0; u8 fid, member, valid; + if (ksz_is_ksz88x3(dev)) + return -ENOTSUPP; + ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid); pvid = pvid & 0xFFF; ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged); - ksz8795_r_vlan_table(dev, vlan->vid, &data); - ksz8795_from_vlan(data, &fid, &member, &valid); + ksz8_r_vlan_table(dev, vlan->vid, &data); + ksz8_from_vlan(dev, data, &fid, &member, &valid); member &= ~BIT(port); @@ -861,8 +1157,8 @@ static int ksz8795_port_vlan_del(struct dsa_switch *ds, int port, if (pvid == vlan->vid) new_pvid = 1; - ksz8795_to_vlan(fid, member, valid, &data); - ksz8795_w_vlan_table(dev, vlan->vid, data); + ksz8_to_vlan(dev, fid, member, valid, &data); + ksz8_w_vlan_table(dev, vlan->vid, data); if (new_pvid != pvid) ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, pvid); @@ -870,9 +1166,9 @@ static int ksz8795_port_vlan_del(struct dsa_switch *ds, int port, return 0; } -static int ksz8795_port_mirror_add(struct dsa_switch *ds, int port, - struct dsa_mall_mirror_tc_entry *mirror, - bool ingress) +static int ksz8_port_mirror_add(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror, + bool ingress) { struct ksz_device *dev = ds->priv; @@ -894,8 +1190,8 @@ static int ksz8795_port_mirror_add(struct dsa_switch *ds, int port, return 0; } -static void ksz8795_port_mirror_del(struct dsa_switch *ds, int port, - struct dsa_mall_mirror_tc_entry *mirror) +static void ksz8_port_mirror_del(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror) { struct ksz_device *dev = ds->priv; u8 data; @@ -915,91 +1211,111 @@ static void ksz8795_port_mirror_del(struct dsa_switch *ds, int port, PORT_MIRROR_SNIFFER, false); } -static void ksz8795_port_setup(struct ksz_device *dev, int port, bool cpu_port) +static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port) +{ + struct ksz_port *p = &dev->ports[port]; + u8 data8; + + if (!p->interface && dev->compat_interface) { + dev_warn(dev->dev, + "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. " + "Please update your device tree.\n", + port); + p->interface = dev->compat_interface; + } + + /* Configure MII interface for proper network communication. */ + ksz_read8(dev, REG_PORT_5_CTRL_6, &data8); + data8 &= ~PORT_INTERFACE_TYPE; + data8 &= ~PORT_GMII_1GPS_MODE; + switch (p->interface) { + case PHY_INTERFACE_MODE_MII: + p->phydev.speed = SPEED_100; + break; + case PHY_INTERFACE_MODE_RMII: + data8 |= PORT_INTERFACE_RMII; + p->phydev.speed = SPEED_100; + break; + case PHY_INTERFACE_MODE_GMII: + data8 |= PORT_GMII_1GPS_MODE; + data8 |= PORT_INTERFACE_GMII; + p->phydev.speed = SPEED_1000; + break; + default: + data8 &= ~PORT_RGMII_ID_IN_ENABLE; + data8 &= ~PORT_RGMII_ID_OUT_ENABLE; + if (p->interface == PHY_INTERFACE_MODE_RGMII_ID || + p->interface == PHY_INTERFACE_MODE_RGMII_RXID) + data8 |= PORT_RGMII_ID_IN_ENABLE; + if (p->interface == PHY_INTERFACE_MODE_RGMII_ID || + p->interface == PHY_INTERFACE_MODE_RGMII_TXID) + data8 |= PORT_RGMII_ID_OUT_ENABLE; + data8 |= PORT_GMII_1GPS_MODE; + data8 |= PORT_INTERFACE_RGMII; + p->phydev.speed = SPEED_1000; + break; + } + ksz_write8(dev, REG_PORT_5_CTRL_6, data8); + p->phydev.duplex = 1; +} + +static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port) { struct ksz_port *p = &dev->ports[port]; - u8 data8, member; + struct ksz8 *ksz8 = dev->priv; + const u32 *masks; + u8 member; + + masks = ksz8->masks; /* enable broadcast storm limit */ ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true); - ksz8795_set_prio_queue(dev, port, 4); + if (!ksz_is_ksz88x3(dev)) + ksz8795_set_prio_queue(dev, port, 4); /* disable DiffServ priority */ ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_ENABLE, false); /* replace priority */ - ksz_port_cfg(dev, port, P_802_1P_CTRL, PORT_802_1P_REMAPPING, false); + ksz_port_cfg(dev, port, P_802_1P_CTRL, + masks[PORT_802_1P_REMAPPING], false); /* enable 802.1p priority */ ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_ENABLE, true); if (cpu_port) { - if (!p->interface && dev->compat_interface) { - dev_warn(dev->dev, - "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. " - "Please update your device tree.\n", - port); - p->interface = dev->compat_interface; - } - - /* Configure MII interface for proper network communication. */ - ksz_read8(dev, REG_PORT_5_CTRL_6, &data8); - data8 &= ~PORT_INTERFACE_TYPE; - data8 &= ~PORT_GMII_1GPS_MODE; - switch (p->interface) { - case PHY_INTERFACE_MODE_MII: - p->phydev.speed = SPEED_100; - break; - case PHY_INTERFACE_MODE_RMII: - data8 |= PORT_INTERFACE_RMII; - p->phydev.speed = SPEED_100; - break; - case PHY_INTERFACE_MODE_GMII: - data8 |= PORT_GMII_1GPS_MODE; - data8 |= PORT_INTERFACE_GMII; - p->phydev.speed = SPEED_1000; - break; - default: - data8 &= ~PORT_RGMII_ID_IN_ENABLE; - data8 &= ~PORT_RGMII_ID_OUT_ENABLE; - if (p->interface == PHY_INTERFACE_MODE_RGMII_ID || - p->interface == PHY_INTERFACE_MODE_RGMII_RXID) - data8 |= PORT_RGMII_ID_IN_ENABLE; - if (p->interface == PHY_INTERFACE_MODE_RGMII_ID || - p->interface == PHY_INTERFACE_MODE_RGMII_TXID) - data8 |= PORT_RGMII_ID_OUT_ENABLE; - data8 |= PORT_GMII_1GPS_MODE; - data8 |= PORT_INTERFACE_RGMII; - p->phydev.speed = SPEED_1000; - break; - } - ksz_write8(dev, REG_PORT_5_CTRL_6, data8); - p->phydev.duplex = 1; + if (!ksz_is_ksz88x3(dev)) + ksz8795_cpu_interface_select(dev, port); member = dev->port_mask; } else { member = dev->host_mask | p->vid_member; } - ksz8795_cfg_port_member(dev, port, member); + ksz8_cfg_port_member(dev, port, member); } -static void ksz8795_config_cpu_port(struct dsa_switch *ds) +static void ksz8_config_cpu_port(struct dsa_switch *ds) { struct ksz_device *dev = ds->priv; + struct ksz8 *ksz8 = dev->priv; + const u8 *regs = ksz8->regs; struct ksz_port *p; + const u32 *masks; u8 remote; int i; + masks = ksz8->masks; + /* Switch marks the maximum frame with extra byte as oversize. */ ksz_cfg(dev, REG_SW_CTRL_2, SW_LEGAL_PACKET_DISABLE, true); - ksz_cfg(dev, S_TAIL_TAG_CTRL, SW_TAIL_TAG_ENABLE, true); + ksz_cfg(dev, regs[S_TAIL_TAG_CTRL], masks[SW_TAIL_TAG_ENABLE], true); p = &dev->ports[dev->cpu_port]; p->vid_member = dev->port_mask; p->on = 1; - ksz8795_port_setup(dev, dev->cpu_port, true); + ksz8_port_setup(dev, dev->cpu_port, true); dev->member = dev->host_mask; for (i = 0; i < dev->phy_port_cnt; i++) { @@ -1010,7 +1326,7 @@ static void ksz8795_config_cpu_port(struct dsa_switch *ds) */ p->vid_member = BIT(i); p->member = dev->port_mask; - ksz8795_port_stp_state_set(ds, i, BR_STATE_DISABLED); + ksz8_port_stp_state_set(ds, i, BR_STATE_DISABLED); /* Last port may be disabled. */ if (i == dev->phy_port_cnt) @@ -1022,9 +1338,11 @@ static void ksz8795_config_cpu_port(struct dsa_switch *ds) p = &dev->ports[i]; if (!p->on) continue; - ksz_pread8(dev, i, P_REMOTE_STATUS, &remote); - if (remote & PORT_FIBER_MODE) - p->fiber = 1; + if (!ksz_is_ksz88x3(dev)) { + ksz_pread8(dev, i, regs[P_REMOTE_STATUS], &remote); + if (remote & PORT_FIBER_MODE) + p->fiber = 1; + } if (p->fiber) ksz_port_cfg(dev, i, P_STP_CTRL, PORT_FORCE_FLOW_CTRL, true); @@ -1034,7 +1352,7 @@ static void ksz8795_config_cpu_port(struct dsa_switch *ds) } } -static int ksz8795_setup(struct dsa_switch *ds) +static int ksz8_setup(struct dsa_switch *ds) { struct ksz_device *dev = ds->priv; struct alu_struct alu; @@ -1045,7 +1363,7 @@ static int ksz8795_setup(struct dsa_switch *ds) if (!dev->vlan_cache) return -ENOMEM; - ret = ksz8795_reset_switch(dev); + ret = ksz8_reset_switch(dev); if (ret) { dev_err(ds->dev, "failed to reset switch\n"); return ret; @@ -1068,7 +1386,7 @@ static int ksz8795_setup(struct dsa_switch *ds) UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP, UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP); - ksz8795_config_cpu_port(ds); + ksz8_config_cpu_port(ds); ksz_cfg(dev, REG_SW_CTRL_2, MULTICAST_STORM_DISABLE, true); @@ -1083,7 +1401,7 @@ static int ksz8795_setup(struct dsa_switch *ds) BROADCAST_STORM_PROT_RATE) / 100); for (i = 0; i < (dev->num_vlans / 4); i++) - ksz8795_r_vlan_entries(dev, i); + ksz8_r_vlan_entries(dev, i); /* Setup STP address for STP operation. */ memset(&alu, 0, sizeof(alu)); @@ -1092,7 +1410,7 @@ static int ksz8795_setup(struct dsa_switch *ds) alu.is_override = true; alu.port_forward = dev->host_mask; - ksz8795_w_sta_mac_table(dev, 0, &alu); + ksz8_w_sta_mac_table(dev, 0, &alu); ksz_init_mib_timer(dev); @@ -1101,36 +1419,36 @@ static int ksz8795_setup(struct dsa_switch *ds) return 0; } -static const struct dsa_switch_ops ksz8795_switch_ops = { - .get_tag_protocol = ksz8795_get_tag_protocol, - .setup = ksz8795_setup, +static const struct dsa_switch_ops ksz8_switch_ops = { + .get_tag_protocol = ksz8_get_tag_protocol, + .setup = ksz8_setup, .phy_read = ksz_phy_read16, .phy_write = ksz_phy_write16, .phylink_mac_link_down = ksz_mac_link_down, .port_enable = ksz_enable_port, - .get_strings = ksz8795_get_strings, + .get_strings = ksz8_get_strings, .get_ethtool_stats = ksz_get_ethtool_stats, .get_sset_count = ksz_sset_count, .port_bridge_join = ksz_port_bridge_join, .port_bridge_leave = ksz_port_bridge_leave, - .port_stp_state_set = ksz8795_port_stp_state_set, + .port_stp_state_set = ksz8_port_stp_state_set, .port_fast_age = ksz_port_fast_age, - .port_vlan_filtering = ksz8795_port_vlan_filtering, - .port_vlan_add = ksz8795_port_vlan_add, - .port_vlan_del = ksz8795_port_vlan_del, + .port_vlan_filtering = ksz8_port_vlan_filtering, + .port_vlan_add = ksz8_port_vlan_add, + .port_vlan_del = ksz8_port_vlan_del, .port_fdb_dump = ksz_port_fdb_dump, .port_mdb_add = ksz_port_mdb_add, .port_mdb_del = ksz_port_mdb_del, - .port_mirror_add = ksz8795_port_mirror_add, - .port_mirror_del = ksz8795_port_mirror_del, + .port_mirror_add = ksz8_port_mirror_add, + .port_mirror_del = ksz8_port_mirror_del, }; -static u32 ksz8795_get_port_addr(int port, int offset) +static u32 ksz8_get_port_addr(int port, int offset) { return PORT_CTRL_ADDR(port, offset); } -static int ksz8795_switch_detect(struct ksz_device *dev) +static int ksz8_switch_detect(struct ksz_device *dev) { u8 id1, id2; u16 id16; @@ -1143,19 +1461,30 @@ static int ksz8795_switch_detect(struct ksz_device *dev) id1 = id16 >> 8; id2 = id16 & SW_CHIP_ID_M; - if (id1 != FAMILY_ID || - (id2 != CHIP_ID_94 && id2 != CHIP_ID_95)) - return -ENODEV; - if (id2 == CHIP_ID_95) { - u8 val; + switch (id1) { + case KSZ87_FAMILY_ID: + if ((id2 != CHIP_ID_94 && id2 != CHIP_ID_95)) + return -ENODEV; + + if (id2 == CHIP_ID_95) { + u8 val; - id2 = 0x95; - ksz_read8(dev, REG_PORT_1_STATUS_0, &val); - if (val & PORT_FIBER_MODE) - id2 = 0x65; - } else if (id2 == CHIP_ID_94) { - id2 = 0x94; + id2 = 0x95; + ksz_read8(dev, REG_PORT_STATUS_0, &val); + if (val & PORT_FIBER_MODE) + id2 = 0x65; + } else if (id2 == CHIP_ID_94) { + id2 = 0x94; + } + break; + case KSZ88_FAMILY_ID: + if (id2 != CHIP_ID_63) + return -ENODEV; + break; + default: + dev_err(dev->dev, "invalid family id: %d\n", id1); + return -ENODEV; } id16 &= ~0xff; id16 |= id2; @@ -1174,7 +1503,7 @@ struct ksz_chip_data { int port_cnt; }; -static const struct ksz_chip_data ksz8795_switch_chips[] = { +static const struct ksz_chip_data ksz8_switch_chips[] = { { .chip_id = 0x8795, .dev_name = "KSZ8795", @@ -1216,16 +1545,26 @@ static const struct ksz_chip_data ksz8795_switch_chips[] = { .cpu_ports = 0x10, /* can be configured as cpu port */ .port_cnt = 5, /* total cpu and user ports */ }, + { + .chip_id = 0x8830, + .dev_name = "KSZ8863/KSZ8873", + .num_vlans = 16, + .num_alus = 0, + .num_statics = 8, + .cpu_ports = 0x4, /* can be configured as cpu port */ + .port_cnt = 3, + }, }; -static int ksz8795_switch_init(struct ksz_device *dev) +static int ksz8_switch_init(struct ksz_device *dev) { + struct ksz8 *ksz8 = dev->priv; int i; - dev->ds->ops = &ksz8795_switch_ops; + dev->ds->ops = &ksz8_switch_ops; - for (i = 0; i < ARRAY_SIZE(ksz8795_switch_chips); i++) { - const struct ksz_chip_data *chip = &ksz8795_switch_chips[i]; + for (i = 0; i < ARRAY_SIZE(ksz8_switch_chips); i++) { + const struct ksz_chip_data *chip = &ksz8_switch_chips[i]; if (dev->chip_id == chip->chip_id) { dev->name = chip->dev_name; @@ -1247,8 +1586,21 @@ static int ksz8795_switch_init(struct ksz_device *dev) if (!dev->cpu_ports) return -ENODEV; - dev->reg_mib_cnt = KSZ8795_COUNTER_NUM; - dev->mib_cnt = ARRAY_SIZE(mib_names); + if (ksz_is_ksz88x3(dev)) { + ksz8->regs = ksz8863_regs; + ksz8->masks = ksz8863_masks; + ksz8->shifts = ksz8863_shifts; + dev->mib_cnt = ARRAY_SIZE(ksz88xx_mib_names); + dev->mib_names = ksz88xx_mib_names; + } else { + ksz8->regs = ksz8795_regs; + ksz8->masks = ksz8795_masks; + ksz8->shifts = ksz8795_shifts; + dev->mib_cnt = ARRAY_SIZE(ksz87xx_mib_names); + dev->mib_names = ksz87xx_mib_names; + } + + dev->reg_mib_cnt = MIB_COUNTER_NUM; dev->ports = devm_kzalloc(dev->dev, dev->port_cnt * sizeof(struct ksz_port), @@ -1272,36 +1624,36 @@ static int ksz8795_switch_init(struct ksz_device *dev) return 0; } -static void ksz8795_switch_exit(struct ksz_device *dev) +static void ksz8_switch_exit(struct ksz_device *dev) { - ksz8795_reset_switch(dev); + ksz8_reset_switch(dev); } -static const struct ksz_dev_ops ksz8795_dev_ops = { - .get_port_addr = ksz8795_get_port_addr, - .cfg_port_member = ksz8795_cfg_port_member, - .flush_dyn_mac_table = ksz8795_flush_dyn_mac_table, - .port_setup = ksz8795_port_setup, - .r_phy = ksz8795_r_phy, - .w_phy = ksz8795_w_phy, - .r_dyn_mac_table = ksz8795_r_dyn_mac_table, - .r_sta_mac_table = ksz8795_r_sta_mac_table, - .w_sta_mac_table = ksz8795_w_sta_mac_table, - .r_mib_cnt = ksz8795_r_mib_cnt, - .r_mib_pkt = ksz8795_r_mib_pkt, - .freeze_mib = ksz8795_freeze_mib, - .port_init_cnt = ksz8795_port_init_cnt, - .shutdown = ksz8795_reset_switch, - .detect = ksz8795_switch_detect, - .init = ksz8795_switch_init, - .exit = ksz8795_switch_exit, +static const struct ksz_dev_ops ksz8_dev_ops = { + .get_port_addr = ksz8_get_port_addr, + .cfg_port_member = ksz8_cfg_port_member, + .flush_dyn_mac_table = ksz8_flush_dyn_mac_table, + .port_setup = ksz8_port_setup, + .r_phy = ksz8_r_phy, + .w_phy = ksz8_w_phy, + .r_dyn_mac_table = ksz8_r_dyn_mac_table, + .r_sta_mac_table = ksz8_r_sta_mac_table, + .w_sta_mac_table = ksz8_w_sta_mac_table, + .r_mib_cnt = ksz8_r_mib_cnt, + .r_mib_pkt = ksz8_r_mib_pkt, + .freeze_mib = ksz8_freeze_mib, + .port_init_cnt = ksz8_port_init_cnt, + .shutdown = ksz8_reset_switch, + .detect = ksz8_switch_detect, + .init = ksz8_switch_init, + .exit = ksz8_switch_exit, }; -int ksz8795_switch_register(struct ksz_device *dev) +int ksz8_switch_register(struct ksz_device *dev) { - return ksz_switch_register(dev, &ksz8795_dev_ops); + return ksz_switch_register(dev, &ksz8_dev_ops); } -EXPORT_SYMBOL(ksz8795_switch_register); +EXPORT_SYMBOL(ksz8_switch_register); MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>"); MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch DSA Driver"); diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h index 40372047d40d..c2e52c40a54c 100644 --- a/drivers/net/dsa/microchip/ksz8795_reg.h +++ b/drivers/net/dsa/microchip/ksz8795_reg.h @@ -16,7 +16,8 @@ #define REG_CHIP_ID0 0x00 -#define FAMILY_ID 0x87 +#define KSZ87_FAMILY_ID 0x87 +#define KSZ88_FAMILY_ID 0x88 #define REG_CHIP_ID1 0x01 @@ -28,6 +29,12 @@ #define CHIP_ID_94 0x60 #define CHIP_ID_95 0x90 +#define CHIP_ID_63 0x30 + +#define KSZ8863_REG_SW_RESET 0x43 + +#define KSZ8863_GLOBAL_SOFTWARE_RESET BIT(4) +#define KSZ8863_PCS_RESET BIT(0) #define REG_SW_CTRL_0 0x02 @@ -98,7 +105,6 @@ #define REG_SW_CTRL_10 0x0C -#define SW_TAIL_TAG_ENABLE BIT(1) #define SW_PASS_PAUSE BIT(0) #define REG_SW_CTRL_11 0x0D @@ -150,7 +156,6 @@ #define REG_PORT_4_CTRL_2 0x42 #define REG_PORT_5_CTRL_2 0x52 -#define PORT_802_1P_REMAPPING BIT(7) #define PORT_INGRESS_FILTER BIT(6) #define PORT_DISCARD_NON_VID BIT(5) #define PORT_FORCE_FLOW_CTRL BIT(4) @@ -269,6 +274,7 @@ #define REG_PORT_3_CTRL_9 0x3C #define REG_PORT_4_CTRL_9 0x4C +#define PORT_AUTO_NEG_ENABLE BIT(7) #define PORT_AUTO_NEG_DISABLE BIT(7) #define PORT_FORCE_100_MBIT BIT(6) #define PORT_FORCE_FULL_DUPLEX BIT(5) @@ -319,14 +325,12 @@ #define REG_PORT_CTRL_5 0x05 -#define REG_PORT_CTRL_7 0x07 #define REG_PORT_STATUS_0 0x08 #define REG_PORT_STATUS_1 0x09 #define REG_PORT_LINK_MD_CTRL 0x0A #define REG_PORT_LINK_MD_RESULT 0x0B #define REG_PORT_CTRL_9 0x0C #define REG_PORT_CTRL_10 0x0D -#define REG_PORT_STATUS_2 0x0E #define REG_PORT_STATUS_3 0x0F #define REG_PORT_CTRL_12 0xA0 @@ -356,8 +360,6 @@ #define REG_SW_MAC_ADDR_4 0x6C #define REG_SW_MAC_ADDR_5 0x6D -#define REG_IND_CTRL_0 0x6E - #define TABLE_EXT_SELECT_S 5 #define TABLE_EEE_V 1 #define TABLE_ACL_V 2 @@ -383,23 +385,13 @@ #define TABLE_ENTRY_MASK 0x03FF #define TABLE_EXT_ENTRY_MASK 0x0FFF -#define REG_IND_DATA_8 0x70 -#define REG_IND_DATA_7 0x71 -#define REG_IND_DATA_6 0x72 #define REG_IND_DATA_5 0x73 -#define REG_IND_DATA_4 0x74 -#define REG_IND_DATA_3 0x75 #define REG_IND_DATA_2 0x76 #define REG_IND_DATA_1 0x77 #define REG_IND_DATA_0 0x78 #define REG_IND_DATA_PME_EEE_ACL 0xA0 -#define REG_IND_DATA_CHECK REG_IND_DATA_6 -#define REG_IND_MIB_CHECK REG_IND_DATA_4 -#define REG_IND_DATA_HI REG_IND_DATA_7 -#define REG_IND_DATA_LO REG_IND_DATA_3 - #define REG_INT_STATUS 0x7C #define REG_INT_ENABLE 0x7D @@ -816,6 +808,7 @@ #define KSZ8795_ID_HI 0x0022 #define KSZ8795_ID_LO 0x1550 +#define KSZ8863_ID_LO 0x1430 #define KSZ8795_SW_ID 0x8795 @@ -846,7 +839,7 @@ #define KS_PRIO_IN_REG 4 -#define KSZ8795_COUNTER_NUM 0x20 +#define MIB_COUNTER_NUM 0x20 /* Common names used by other drivers */ @@ -856,12 +849,6 @@ #define P_MIRROR_CTRL REG_PORT_CTRL_1 #define P_802_1P_CTRL REG_PORT_CTRL_2 #define P_STP_CTRL REG_PORT_CTRL_2 -#define P_LOCAL_CTRL REG_PORT_CTRL_7 -#define P_REMOTE_STATUS REG_PORT_STATUS_0 -#define P_FORCE_CTRL REG_PORT_CTRL_9 -#define P_NEG_RESTART_CTRL REG_PORT_CTRL_10 -#define P_SPEED_STATUS REG_PORT_STATUS_1 -#define P_LINK_STATUS REG_PORT_STATUS_2 #define P_PASS_ALL_CTRL REG_PORT_CTRL_12 #define P_INS_SRC_PVID_CTRL REG_PORT_CTRL_12 #define P_DROP_TAG_CTRL REG_PORT_CTRL_13 @@ -876,7 +863,6 @@ #define S_MIRROR_CTRL REG_SW_CTRL_3 #define S_REPLACE_VID_CTRL REG_SW_CTRL_4 #define S_PASS_PAUSE_CTRL REG_SW_CTRL_10 -#define S_TAIL_TAG_CTRL REG_SW_CTRL_10 #define S_802_1P_PRIO_CTRL REG_SW_CTRL_12 #define S_TOS_PRIO_CTRL REG_TOS_PRIO_CTRL_0 #define S_IPV6_MLD_CTRL REG_SW_CTRL_21 @@ -890,65 +876,6 @@ #define BROADCAST_STORM_VALUE 9969 /** - * STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF - * STATIC_MAC_TABLE_FWD_PORTS 00-001F0000-00000000 - * STATIC_MAC_TABLE_VALID 00-00200000-00000000 - * STATIC_MAC_TABLE_OVERRIDE 00-00400000-00000000 - * STATIC_MAC_TABLE_USE_FID 00-00800000-00000000 - * STATIC_MAC_TABLE_FID 00-7F000000-00000000 - */ - -#define STATIC_MAC_TABLE_ADDR 0x0000FFFF -#define STATIC_MAC_TABLE_FWD_PORTS 0x001F0000 -#define STATIC_MAC_TABLE_VALID 0x00200000 -#define STATIC_MAC_TABLE_OVERRIDE 0x00400000 -#define STATIC_MAC_TABLE_USE_FID 0x00800000 -#define STATIC_MAC_TABLE_FID 0x7F000000 - -#define STATIC_MAC_FWD_PORTS_S 16 -#define STATIC_MAC_FID_S 24 - -/** - * VLAN_TABLE_FID 00-007F007F-007F007F - * VLAN_TABLE_MEMBERSHIP 00-0F800F80-0F800F80 - * VLAN_TABLE_VALID 00-10001000-10001000 - */ - -#define VLAN_TABLE_FID 0x007F -#define VLAN_TABLE_MEMBERSHIP 0x0F80 -#define VLAN_TABLE_VALID 0x1000 - -#define VLAN_TABLE_MEMBERSHIP_S 7 -#define VLAN_TABLE_S 16 - -/** - * DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF - * DYNAMIC_MAC_TABLE_FID 00-007F0000-00000000 - * DYNAMIC_MAC_TABLE_NOT_READY 00-00800000-00000000 - * DYNAMIC_MAC_TABLE_SRC_PORT 00-07000000-00000000 - * DYNAMIC_MAC_TABLE_TIMESTAMP 00-18000000-00000000 - * DYNAMIC_MAC_TABLE_ENTRIES 7F-E0000000-00000000 - * DYNAMIC_MAC_TABLE_MAC_EMPTY 80-00000000-00000000 - */ - -#define DYNAMIC_MAC_TABLE_ADDR 0x0000FFFF -#define DYNAMIC_MAC_TABLE_FID 0x007F0000 -#define DYNAMIC_MAC_TABLE_SRC_PORT 0x07000000 -#define DYNAMIC_MAC_TABLE_TIMESTAMP 0x18000000 -#define DYNAMIC_MAC_TABLE_ENTRIES 0xE0000000 - -#define DYNAMIC_MAC_TABLE_NOT_READY 0x80 - -#define DYNAMIC_MAC_TABLE_ENTRIES_H 0x7F -#define DYNAMIC_MAC_TABLE_MAC_EMPTY 0x80 - -#define DYNAMIC_MAC_FID_S 16 -#define DYNAMIC_MAC_SRC_PORT_S 24 -#define DYNAMIC_MAC_TIMESTAMP_S 27 -#define DYNAMIC_MAC_ENTRIES_S 29 -#define DYNAMIC_MAC_ENTRIES_H_S 3 - -/** * MIB_COUNTER_VALUE 00-00000000-3FFFFFFF * MIB_TOTAL_BYTES 00-0000000F-FFFFFFFF * MIB_PACKET_DROPPED 00-00000000-0000FFFF @@ -956,31 +883,15 @@ * MIB_COUNTER_OVERFLOW 00-00000040-00000000 */ -#define MIB_COUNTER_OVERFLOW BIT(6) -#define MIB_COUNTER_VALID BIT(5) - #define MIB_COUNTER_VALUE 0x3FFFFFFF -#define KS_MIB_TOTAL_RX_0 0x100 -#define KS_MIB_TOTAL_TX_0 0x101 -#define KS_MIB_PACKET_DROPPED_RX_0 0x102 -#define KS_MIB_PACKET_DROPPED_TX_0 0x103 -#define KS_MIB_TOTAL_RX_1 0x104 -#define KS_MIB_TOTAL_TX_1 0x105 -#define KS_MIB_PACKET_DROPPED_TX_1 0x106 -#define KS_MIB_PACKET_DROPPED_RX_1 0x107 -#define KS_MIB_TOTAL_RX_2 0x108 -#define KS_MIB_TOTAL_TX_2 0x109 -#define KS_MIB_PACKET_DROPPED_TX_2 0x10A -#define KS_MIB_PACKET_DROPPED_RX_2 0x10B -#define KS_MIB_TOTAL_RX_3 0x10C -#define KS_MIB_TOTAL_TX_3 0x10D -#define KS_MIB_PACKET_DROPPED_TX_3 0x10E -#define KS_MIB_PACKET_DROPPED_RX_3 0x10F -#define KS_MIB_TOTAL_RX_4 0x110 -#define KS_MIB_TOTAL_TX_4 0x111 -#define KS_MIB_PACKET_DROPPED_TX_4 0x112 -#define KS_MIB_PACKET_DROPPED_RX_4 0x113 +#define KSZ8795_MIB_TOTAL_RX_0 0x100 +#define KSZ8795_MIB_TOTAL_TX_0 0x101 +#define KSZ8795_MIB_TOTAL_RX_1 0x104 +#define KSZ8795_MIB_TOTAL_TX_1 0x105 + +#define KSZ8863_MIB_PACKET_DROPPED_TX_0 0x100 +#define KSZ8863_MIB_PACKET_DROPPED_RX_0 0x105 #define MIB_PACKET_DROPPED 0x0000FFFF diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c index f98432a3e2b5..85ba12aa82d8 100644 --- a/drivers/net/dsa/microchip/ksz8795_spi.c +++ b/drivers/net/dsa/microchip/ksz8795_spi.c @@ -14,34 +14,52 @@ #include <linux/regmap.h> #include <linux/spi/spi.h> +#include "ksz8.h" #include "ksz_common.h" -#define SPI_ADDR_SHIFT 12 -#define SPI_ADDR_ALIGN 3 -#define SPI_TURNAROUND_SHIFT 1 +#define KSZ8795_SPI_ADDR_SHIFT 12 +#define KSZ8795_SPI_ADDR_ALIGN 3 +#define KSZ8795_SPI_TURNAROUND_SHIFT 1 -KSZ_REGMAP_TABLE(ksz8795, 16, SPI_ADDR_SHIFT, - SPI_TURNAROUND_SHIFT, SPI_ADDR_ALIGN); +#define KSZ8863_SPI_ADDR_SHIFT 8 +#define KSZ8863_SPI_ADDR_ALIGN 8 +#define KSZ8863_SPI_TURNAROUND_SHIFT 0 + +KSZ_REGMAP_TABLE(ksz8795, 16, KSZ8795_SPI_ADDR_SHIFT, + KSZ8795_SPI_TURNAROUND_SHIFT, KSZ8795_SPI_ADDR_ALIGN); + +KSZ_REGMAP_TABLE(ksz8863, 16, KSZ8863_SPI_ADDR_SHIFT, + KSZ8863_SPI_TURNAROUND_SHIFT, KSZ8863_SPI_ADDR_ALIGN); static int ksz8795_spi_probe(struct spi_device *spi) { + const struct regmap_config *regmap_config; + struct device *ddev = &spi->dev; struct regmap_config rc; struct ksz_device *dev; - int i, ret; + struct ksz8 *ksz8; + int i, ret = 0; - dev = ksz_switch_alloc(&spi->dev, spi); + ksz8 = devm_kzalloc(&spi->dev, sizeof(struct ksz8), GFP_KERNEL); + ksz8->priv = spi; + + dev = ksz_switch_alloc(&spi->dev, ksz8); if (!dev) return -ENOMEM; + regmap_config = device_get_match_data(ddev); + if (!regmap_config) + return -EINVAL; + for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) { - rc = ksz8795_regmap_config[i]; + rc = regmap_config[i]; rc.lock_arg = &dev->regmap_mutex; dev->regmap[i] = devm_regmap_init_spi(spi, &rc); if (IS_ERR(dev->regmap[i])) { ret = PTR_ERR(dev->regmap[i]); dev_err(&spi->dev, "Failed to initialize regmap%i: %d\n", - ksz8795_regmap_config[i].val_bits, ret); + regmap_config[i].val_bits, ret); return ret; } } @@ -55,7 +73,7 @@ static int ksz8795_spi_probe(struct spi_device *spi) if (ret) return ret; - ret = ksz8795_switch_register(dev); + ret = ksz8_switch_register(dev); /* Main DSA driver may not be started yet. */ if (ret) @@ -85,9 +103,11 @@ static void ksz8795_spi_shutdown(struct spi_device *spi) } static const struct of_device_id ksz8795_dt_ids[] = { - { .compatible = "microchip,ksz8765" }, - { .compatible = "microchip,ksz8794" }, - { .compatible = "microchip,ksz8795" }, + { .compatible = "microchip,ksz8765", .data = &ksz8795_regmap_config }, + { .compatible = "microchip,ksz8794", .data = &ksz8795_regmap_config }, + { .compatible = "microchip,ksz8795", .data = &ksz8795_regmap_config }, + { .compatible = "microchip,ksz8863", .data = &ksz8863_regmap_config }, + { .compatible = "microchip,ksz8873", .data = &ksz8863_regmap_config }, {}, }; MODULE_DEVICE_TABLE(of, ksz8795_dt_ids); diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c new file mode 100644 index 000000000000..30d97ea7a949 --- /dev/null +++ b/drivers/net/dsa/microchip/ksz8863_smi.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Microchip KSZ8863 series register access through SMI + * + * Copyright (C) 2019 Pengutronix, Michael Grzeschik <kernel@pengutronix.de> + */ + +#include "ksz8.h" +#include "ksz_common.h" + +/* Serial Management Interface (SMI) uses the following frame format: + * + * preamble|start|Read/Write| PHY | REG |TA| Data bits | Idle + * |frame| OP code |address |address| | | + * read | 32x1´s | 01 | 00 | 1xRRR | RRRRR |Z0| 00000000DDDDDDDD | Z + * write| 32x1´s | 01 | 00 | 0xRRR | RRRRR |10| xxxxxxxxDDDDDDDD | Z + * + */ + +#define SMI_KSZ88XX_READ_PHY BIT(4) + +static int ksz8863_mdio_read(void *ctx, const void *reg_buf, size_t reg_len, + void *val_buf, size_t val_len) +{ + struct ksz_device *dev = ctx; + struct mdio_device *mdev; + u8 reg = *(u8 *)reg_buf; + u8 *val = val_buf; + struct ksz8 *ksz8; + int i, ret = 0; + + ksz8 = dev->priv; + mdev = ksz8->priv; + + mutex_lock_nested(&mdev->bus->mdio_lock, MDIO_MUTEX_NESTED); + for (i = 0; i < val_len; i++) { + int tmp = reg + i; + + ret = __mdiobus_read(mdev->bus, ((tmp & 0xE0) >> 5) | + SMI_KSZ88XX_READ_PHY, tmp); + if (ret < 0) + goto out; + + val[i] = ret; + } + ret = 0; + + out: + mutex_unlock(&mdev->bus->mdio_lock); + + return ret; +} + +static int ksz8863_mdio_write(void *ctx, const void *data, size_t count) +{ + struct ksz_device *dev = ctx; + struct mdio_device *mdev; + struct ksz8 *ksz8; + int i, ret = 0; + u32 reg; + u8 *val; + + ksz8 = dev->priv; + mdev = ksz8->priv; + + val = (u8 *)(data + 4); + reg = *(u32 *)data; + + mutex_lock_nested(&mdev->bus->mdio_lock, MDIO_MUTEX_NESTED); + for (i = 0; i < (count - 4); i++) { + int tmp = reg + i; + + ret = __mdiobus_write(mdev->bus, ((tmp & 0xE0) >> 5), + tmp, val[i]); + if (ret < 0) + goto out; + } + + out: + mutex_unlock(&mdev->bus->mdio_lock); + + return ret; +} + +static const struct regmap_bus regmap_smi[] = { + { + .read = ksz8863_mdio_read, + .write = ksz8863_mdio_write, + .max_raw_read = 1, + .max_raw_write = 1, + }, + { + .read = ksz8863_mdio_read, + .write = ksz8863_mdio_write, + .val_format_endian_default = REGMAP_ENDIAN_BIG, + .max_raw_read = 2, + .max_raw_write = 2, + }, + { + .read = ksz8863_mdio_read, + .write = ksz8863_mdio_write, + .val_format_endian_default = REGMAP_ENDIAN_BIG, + .max_raw_read = 4, + .max_raw_write = 4, + } +}; + +static const struct regmap_config ksz8863_regmap_config[] = { + { + .name = "#8", + .reg_bits = 8, + .pad_bits = 24, + .val_bits = 8, + .cache_type = REGCACHE_NONE, + .use_single_read = 1, + .lock = ksz_regmap_lock, + .unlock = ksz_regmap_unlock, + }, + { + .name = "#16", + .reg_bits = 8, + .pad_bits = 24, + .val_bits = 16, + .cache_type = REGCACHE_NONE, + .use_single_read = 1, + .lock = ksz_regmap_lock, + .unlock = ksz_regmap_unlock, + }, + { + .name = "#32", + .reg_bits = 8, + .pad_bits = 24, + .val_bits = 32, + .cache_type = REGCACHE_NONE, + .use_single_read = 1, + .lock = ksz_regmap_lock, + .unlock = ksz_regmap_unlock, + } +}; + +static int ksz8863_smi_probe(struct mdio_device *mdiodev) +{ + struct regmap_config rc; + struct ksz_device *dev; + struct ksz8 *ksz8; + int ret; + int i; + + ksz8 = devm_kzalloc(&mdiodev->dev, sizeof(struct ksz8), GFP_KERNEL); + ksz8->priv = mdiodev; + + dev = ksz_switch_alloc(&mdiodev->dev, ksz8); + if (!dev) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(ksz8863_regmap_config); i++) { + rc = ksz8863_regmap_config[i]; + rc.lock_arg = &dev->regmap_mutex; + dev->regmap[i] = devm_regmap_init(&mdiodev->dev, + ®map_smi[i], dev, + &rc); + if (IS_ERR(dev->regmap[i])) { + ret = PTR_ERR(dev->regmap[i]); + dev_err(&mdiodev->dev, + "Failed to initialize regmap%i: %d\n", + ksz8863_regmap_config[i].val_bits, ret); + return ret; + } + } + + if (mdiodev->dev.platform_data) + dev->pdata = mdiodev->dev.platform_data; + + ret = ksz8_switch_register(dev); + + /* Main DSA driver may not be started yet. */ + if (ret) + return ret; + + dev_set_drvdata(&mdiodev->dev, dev); + + return 0; +} + +static void ksz8863_smi_remove(struct mdio_device *mdiodev) +{ + struct ksz_device *dev = dev_get_drvdata(&mdiodev->dev); + + if (dev) + ksz_switch_remove(dev); +} + +static const struct of_device_id ksz8863_dt_ids[] = { + { .compatible = "microchip,ksz8863" }, + { .compatible = "microchip,ksz8873" }, + { }, +}; +MODULE_DEVICE_TABLE(of, ksz8863_dt_ids); + +static struct mdio_driver ksz8863_driver = { + .probe = ksz8863_smi_probe, + .remove = ksz8863_smi_remove, + .mdiodrv.driver = { + .name = "ksz8863-switch", + .of_match_table = ksz8863_dt_ids, + }, +}; + +mdio_module_driver(ksz8863_driver); + +MODULE_AUTHOR("Michael Grzeschik <m.grzeschik@pengutronix.de>"); +MODULE_DESCRIPTION("Microchip KSZ8863 SMI Switch driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index f212775372ce..2e6bfd333f50 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -69,8 +69,9 @@ struct ksz_device { int cpu_ports; /* port bitmap can be cpu port */ int phy_port_cnt; int port_cnt; - int reg_mib_cnt; + u8 reg_mib_cnt; int mib_cnt; + const struct mib_names *mib_names; phy_interface_t compat_interface; u32 regs_size; bool phy_errata_9477; @@ -142,7 +143,7 @@ int ksz_switch_register(struct ksz_device *dev, const struct ksz_dev_ops *ops); void ksz_switch_remove(struct ksz_device *dev); -int ksz8795_switch_register(struct ksz_device *dev); +int ksz8_switch_register(struct ksz_device *dev); int ksz9477_switch_register(struct ksz_device *dev); void ksz_update_port_member(struct ksz_device *dev, int port); diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 9871d7cff93a..96f7c9eede35 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -67,6 +67,11 @@ static const struct mt7530_mib_desc mt7530_mib[] = { MIB_DESC(1, 0xb8, "RxArlDrop"), }; +/* Since phy_device has not yet been created and + * phy_{read,write}_mmd_indirect is not available, we provide our own + * core_{read,write}_mmd_indirect with core_{clear,write,set} wrappers + * to complete this function. + */ static int core_read_mmd_indirect(struct mt7530_priv *priv, int prtad, int devad) { @@ -435,19 +440,13 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface) mt7530_write(priv, MT7530_TRGMII_TD_ODT(i), TD_DM_DRVP(8) | TD_DM_DRVN(8)); - /* Setup core clock for MT7530 */ - /* Disable MT7530 core clock */ - core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); + /* Disable MT7530 core and TRGMII Tx clocks */ + core_clear(priv, CORE_TRGMII_GSW_CLK_CG, + REG_GSWCK_EN | REG_TRGMIICK_EN); - /* Disable PLL, since phy_device has not yet been created - * provided for phy_[read,write]_mmd_indirect is called, we - * provide our own core_write_mmd_indirect to complete this - * function. - */ - core_write_mmd_indirect(priv, - CORE_GSWPLL_GRP1, - MDIO_MMD_VEND2, - 0); + /* Setup core clock for MT7530 */ + /* Disable PLL */ + core_write(priv, CORE_GSWPLL_GRP1, 0); /* Set core clock into 500Mhz */ core_write(priv, CORE_GSWPLL_GRP2, @@ -460,11 +459,7 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface) RG_GSWPLL_POSDIV_200M(2) | RG_GSWPLL_FBKDIV_200M(32)); - /* Enable MT7530 core clock */ - core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); - /* Setup the MT7530 TRGMII Tx Clock */ - core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1)); core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0)); core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta)); @@ -478,6 +473,8 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface) core_write(priv, CORE_PLL_GROUP7, RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) | RG_LCDDS_PWDB | RG_LCDDS_ISO_EN); + + /* Enable MT7530 core and TRGMII Tx clocks */ core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN | REG_TRGMIICK_EN); @@ -997,8 +994,9 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port) mt7530_write(priv, MT7530_PVC_P(port), PORT_SPEC_TAG); - /* Unknown multicast frame forwarding to the cpu port */ - mt7530_rmw(priv, MT7530_MFC, UNM_FFP_MASK, UNM_FFP(BIT(port))); + /* Disable flooding by default */ + mt7530_rmw(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK | UNU_FFP_MASK, + BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) | UNU_FFP(BIT(port))); /* Set CPU port number */ if (priv->id == ID_MT7621) @@ -1136,6 +1134,56 @@ mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state) } static int +mt7530_port_pre_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | + BR_BCAST_FLOOD)) + return -EINVAL; + + return 0; +} + +static int +mt7530_port_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + struct mt7530_priv *priv = ds->priv; + + if (flags.mask & BR_LEARNING) + mt7530_rmw(priv, MT7530_PSC_P(port), SA_DIS, + flags.val & BR_LEARNING ? 0 : SA_DIS); + + if (flags.mask & BR_FLOOD) + mt7530_rmw(priv, MT7530_MFC, UNU_FFP(BIT(port)), + flags.val & BR_FLOOD ? UNU_FFP(BIT(port)) : 0); + + if (flags.mask & BR_MCAST_FLOOD) + mt7530_rmw(priv, MT7530_MFC, UNM_FFP(BIT(port)), + flags.val & BR_MCAST_FLOOD ? UNM_FFP(BIT(port)) : 0); + + if (flags.mask & BR_BCAST_FLOOD) + mt7530_rmw(priv, MT7530_MFC, BC_FFP(BIT(port)), + flags.val & BR_BCAST_FLOOD ? BC_FFP(BIT(port)) : 0); + + return 0; +} + +static int +mt7530_port_set_mrouter(struct dsa_switch *ds, int port, bool mrouter, + struct netlink_ext_ack *extack) +{ + struct mt7530_priv *priv = ds->priv; + + mt7530_rmw(priv, MT7530_MFC, UNM_FFP(BIT(port)), + mrouter ? UNM_FFP(BIT(port)) : 0); + + return 0; +} + +static int mt7530_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *bridge) { @@ -1347,6 +1395,59 @@ err: } static int +mt7530_port_mdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct mt7530_priv *priv = ds->priv; + const u8 *addr = mdb->addr; + u16 vid = mdb->vid; + u8 port_mask = 0; + int ret; + + mutex_lock(&priv->reg_mutex); + + mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP); + if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL)) + port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP) + & PORT_MAP_MASK; + + port_mask |= BIT(port); + mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT); + ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); + + mutex_unlock(&priv->reg_mutex); + + return ret; +} + +static int +mt7530_port_mdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct mt7530_priv *priv = ds->priv; + const u8 *addr = mdb->addr; + u16 vid = mdb->vid; + u8 port_mask = 0; + int ret; + + mutex_lock(&priv->reg_mutex); + + mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP); + if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL)) + port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP) + & PORT_MAP_MASK; + + port_mask &= ~BIT(port); + mt7530_fdb_write(priv, vid, port_mask, addr, -1, + port_mask ? STATIC_ENT : STATIC_EMP); + ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); + + mutex_unlock(&priv->reg_mutex); + + return ret; +} + +static int mt7530_vlan_cmd(struct mt7530_priv *priv, enum mt7530_vlan_cmd cmd, u16 vid) { struct mt7530_dummy_poll p; @@ -1818,9 +1919,12 @@ mt7530_setup(struct dsa_switch *ds) ret = mt753x_cpu_port_enable(ds, i); if (ret) return ret; - } else + } else { mt7530_port_disable(ds, i); + /* Disable learning by default on all user ports */ + mt7530_set(priv, MT7530_PSC_P(i), SA_DIS); + } /* Enable consistent egress tag */ mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK, PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); @@ -1982,9 +2086,13 @@ mt7531_setup(struct dsa_switch *ds) ret = mt753x_cpu_port_enable(ds, i); if (ret) return ret; - } else + } else { mt7530_port_disable(ds, i); + /* Disable learning by default on all user ports */ + mt7530_set(priv, MT7530_PSC_P(i), SA_DIS); + } + /* Enable consistent egress tag */ mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK, PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); @@ -2462,6 +2570,17 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port, mcr |= PMCR_RX_FC_EN; } + if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, 0) >= 0) { + switch (speed) { + case SPEED_1000: + mcr |= PMCR_FORCE_EEE1G; + break; + case SPEED_100: + mcr |= PMCR_FORCE_EEE100; + break; + } + } + mt7530_set(priv, MT7530_PMCR_P(port), mcr); } @@ -2692,6 +2811,36 @@ mt753x_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) return priv->info->phy_write(ds, port, regnum, val); } +static int mt753x_get_mac_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) +{ + struct mt7530_priv *priv = ds->priv; + u32 eeecr = mt7530_read(priv, MT7530_PMEEECR_P(port)); + + e->tx_lpi_enabled = !(eeecr & LPI_MODE_EN); + e->tx_lpi_timer = GET_LPI_THRESH(eeecr); + + return 0; +} + +static int mt753x_set_mac_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) +{ + struct mt7530_priv *priv = ds->priv; + u32 set, mask = LPI_THRESH_MASK | LPI_MODE_EN; + + if (e->tx_lpi_timer > 0xFFF) + return -EINVAL; + + set = SET_LPI_THRESH(e->tx_lpi_timer); + if (!e->tx_lpi_enabled) + /* Force LPI Mode without a delay */ + set |= LPI_MODE_EN; + mt7530_rmw(priv, MT7530_PMEEECR_P(port), mask, set); + + return 0; +} + static const struct dsa_switch_ops mt7530_switch_ops = { .get_tag_protocol = mtk_get_tag_protocol, .setup = mt753x_setup, @@ -2706,11 +2855,16 @@ static const struct dsa_switch_ops mt7530_switch_ops = { .port_change_mtu = mt7530_port_change_mtu, .port_max_mtu = mt7530_port_max_mtu, .port_stp_state_set = mt7530_stp_state_set, + .port_pre_bridge_flags = mt7530_port_pre_bridge_flags, + .port_bridge_flags = mt7530_port_bridge_flags, + .port_set_mrouter = mt7530_port_set_mrouter, .port_bridge_join = mt7530_port_bridge_join, .port_bridge_leave = mt7530_port_bridge_leave, .port_fdb_add = mt7530_port_fdb_add, .port_fdb_del = mt7530_port_fdb_del, .port_fdb_dump = mt7530_port_fdb_dump, + .port_mdb_add = mt7530_port_mdb_add, + .port_mdb_del = mt7530_port_mdb_del, .port_vlan_filtering = mt7530_port_vlan_filtering, .port_vlan_add = mt7530_port_vlan_add, .port_vlan_del = mt7530_port_vlan_del, @@ -2722,6 +2876,8 @@ static const struct dsa_switch_ops mt7530_switch_ops = { .phylink_mac_an_restart = mt753x_phylink_mac_an_restart, .phylink_mac_link_down = mt753x_phylink_mac_link_down, .phylink_mac_link_up = mt753x_phylink_mac_link_up, + .get_mac_eee = mt753x_get_mac_eee, + .set_mac_eee = mt753x_set_mac_eee, }; static const struct mt753x_info mt753x_table[] = { diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index 64a9bb377e15..0204da486f3a 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -34,6 +34,7 @@ enum mt753x_id { /* Registers to mac forward control for unknown frames */ #define MT7530_MFC 0x10 #define BC_FFP(x) (((x) & 0xff) << 24) +#define BC_FFP_MASK BC_FFP(~0) #define UNM_FFP(x) (((x) & 0xff) << 16) #define UNM_FFP_MASK UNM_FFP(~0) #define UNU_FFP(x) (((x) & 0xff) << 8) @@ -256,6 +257,8 @@ enum mt7530_vlan_port_attr { #define PMCR_RX_EN BIT(13) #define PMCR_BACKOFF_EN BIT(9) #define PMCR_BACKPR_EN BIT(8) +#define PMCR_FORCE_EEE1G BIT(7) +#define PMCR_FORCE_EEE100 BIT(6) #define PMCR_TX_FC_EN BIT(5) #define PMCR_RX_FC_EN BIT(4) #define PMCR_FORCE_SPEED_1000 BIT(3) @@ -280,7 +283,8 @@ enum mt7530_vlan_port_attr { #define PMCR_LINK_SETTINGS_MASK (PMCR_TX_EN | PMCR_FORCE_SPEED_1000 | \ PMCR_RX_EN | PMCR_FORCE_SPEED_100 | \ PMCR_TX_FC_EN | PMCR_RX_FC_EN | \ - PMCR_FORCE_FDX | PMCR_FORCE_LNK) + PMCR_FORCE_FDX | PMCR_FORCE_LNK | \ + PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100) #define PMCR_CPU_PORT_SETTING(id) (PMCR_FORCE_MODE_ID((id)) | \ PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \ PMCR_BACKOFF_EN | PMCR_BACKPR_EN | \ @@ -289,6 +293,15 @@ enum mt7530_vlan_port_attr { PMCR_FORCE_SPEED_1000 | \ PMCR_FORCE_FDX | PMCR_FORCE_LNK) +#define MT7530_PMEEECR_P(x) (0x3004 + (x) * 0x100) +#define WAKEUP_TIME_1000(x) (((x) & 0xFF) << 24) +#define WAKEUP_TIME_100(x) (((x) & 0xFF) << 16) +#define LPI_THRESH_MASK GENMASK(15, 4) +#define LPI_THRESH_SHT 4 +#define SET_LPI_THRESH(x) (((x) << LPI_THRESH_SHT) & LPI_THRESH_MASK) +#define GET_LPI_THRESH(x) (((x) & LPI_THRESH_MASK) >> LPI_THRESH_SHT) +#define LPI_MODE_EN BIT(0) + #define MT7530_PMSR_P(x) (0x3008 + (x) * 0x100) #define PMSR_EEE1G BIT(7) #define PMSR_EEE100M BIT(6) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index e08bf9377140..eca285aaf72f 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -485,12 +485,12 @@ static int mv88e6xxx_serdes_pcs_get_state(struct dsa_switch *ds, int port, struct phylink_link_state *state) { struct mv88e6xxx_chip *chip = ds->priv; - u8 lane; + int lane; int err; mv88e6xxx_reg_lock(chip); lane = mv88e6xxx_serdes_get_lane(chip, port); - if (lane && chip->info->ops->serdes_pcs_get_state) + if (lane >= 0 && chip->info->ops->serdes_pcs_get_state) err = chip->info->ops->serdes_pcs_get_state(chip, port, lane, state); else @@ -506,11 +506,11 @@ static int mv88e6xxx_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, const unsigned long *advertise) { const struct mv88e6xxx_ops *ops = chip->info->ops; - u8 lane; + int lane; if (ops->serdes_pcs_config) { lane = mv88e6xxx_serdes_get_lane(chip, port); - if (lane) + if (lane >= 0) return ops->serdes_pcs_config(chip, port, lane, mode, interface, advertise); } @@ -523,14 +523,14 @@ static void mv88e6xxx_serdes_pcs_an_restart(struct dsa_switch *ds, int port) struct mv88e6xxx_chip *chip = ds->priv; const struct mv88e6xxx_ops *ops; int err = 0; - u8 lane; + int lane; ops = chip->info->ops; if (ops->serdes_pcs_an_restart) { mv88e6xxx_reg_lock(chip); lane = mv88e6xxx_serdes_get_lane(chip, port); - if (lane) + if (lane >= 0) err = ops->serdes_pcs_an_restart(chip, port, lane); mv88e6xxx_reg_unlock(chip); @@ -544,11 +544,11 @@ static int mv88e6xxx_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port, int speed, int duplex) { const struct mv88e6xxx_ops *ops = chip->info->ops; - u8 lane; + int lane; if (!phylink_autoneg_inband(mode) && ops->serdes_pcs_link_up) { lane = mv88e6xxx_serdes_get_lane(chip, port); - if (lane) + if (lane >= 0) return ops->serdes_pcs_link_up(chip, port, lane, speed, duplex); } @@ -635,6 +635,29 @@ static void mv88e6390x_phylink_validate(struct mv88e6xxx_chip *chip, int port, mv88e6390_phylink_validate(chip, port, mask, state); } +static void mv88e6393x_phylink_validate(struct mv88e6xxx_chip *chip, int port, + unsigned long *mask, + struct phylink_link_state *state) +{ + if (port == 0 || port == 9 || port == 10) { + phylink_set(mask, 10000baseT_Full); + phylink_set(mask, 10000baseKR_Full); + phylink_set(mask, 10000baseCR_Full); + phylink_set(mask, 10000baseSR_Full); + phylink_set(mask, 10000baseLR_Full); + phylink_set(mask, 10000baseLRM_Full); + phylink_set(mask, 10000baseER_Full); + phylink_set(mask, 5000baseT_Full); + phylink_set(mask, 2500baseX_Full); + phylink_set(mask, 2500baseT_Full); + } + + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + + mv88e6065_phylink_validate(chip, port, mask, state); +} + static void mv88e6xxx_validate(struct dsa_switch *ds, int port, unsigned long *supported, struct phylink_link_state *state) @@ -1417,7 +1440,7 @@ static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port) * the special "LAG device" in the PVT, using * the LAG ID as the port number. */ - dev = MV88E6XXX_G2_PVT_ADRR_DEV_TRUNK; + dev = MV88E6XXX_G2_PVT_ADDR_DEV_TRUNK; port = dsa_lag_id(dst, dp->lag_dev); } } @@ -1456,6 +1479,13 @@ static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port) struct mv88e6xxx_chip *chip = ds->priv; int err; + if (dsa_to_port(ds, port)->lag_dev) + /* Hardware is incapable of fast-aging a LAG through a + * regular ATU move operation. Until we have something + * more fancy in place this is a no-op. + */ + return; + mv88e6xxx_reg_lock(chip); err = mv88e6xxx_g1_atu_remove(chip, 0, port, false); mv88e6xxx_reg_unlock(chip); @@ -1472,13 +1502,54 @@ static int mv88e6xxx_vtu_setup(struct mv88e6xxx_chip *chip) return mv88e6xxx_g1_vtu_flush(chip); } -static int mv88e6xxx_vtu_getnext(struct mv88e6xxx_chip *chip, - struct mv88e6xxx_vtu_entry *entry) +static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid, + struct mv88e6xxx_vtu_entry *entry) { + int err; + if (!chip->info->ops->vtu_getnext) return -EOPNOTSUPP; - return chip->info->ops->vtu_getnext(chip, entry); + entry->vid = vid ? vid - 1 : mv88e6xxx_max_vid(chip); + entry->valid = false; + + err = chip->info->ops->vtu_getnext(chip, entry); + + if (entry->vid != vid) + entry->valid = false; + + return err; +} + +static int mv88e6xxx_vtu_walk(struct mv88e6xxx_chip *chip, + int (*cb)(struct mv88e6xxx_chip *chip, + const struct mv88e6xxx_vtu_entry *entry, + void *priv), + void *priv) +{ + struct mv88e6xxx_vtu_entry entry = { + .vid = mv88e6xxx_max_vid(chip), + .valid = false, + }; + int err; + + if (!chip->info->ops->vtu_getnext) + return -EOPNOTSUPP; + + do { + err = chip->info->ops->vtu_getnext(chip, &entry); + if (err) + return err; + + if (!entry.valid) + break; + + err = cb(chip, &entry, priv); + if (err) + return err; + } while (entry.vid < mv88e6xxx_max_vid(chip)); + + return 0; } static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip, @@ -1490,9 +1561,18 @@ static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip, return chip->info->ops->vtu_loadpurge(chip, entry); } +static int mv88e6xxx_fid_map_vlan(struct mv88e6xxx_chip *chip, + const struct mv88e6xxx_vtu_entry *entry, + void *_fid_bitmap) +{ + unsigned long *fid_bitmap = _fid_bitmap; + + set_bit(entry->fid, fid_bitmap); + return 0; +} + int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap) { - struct mv88e6xxx_vtu_entry vlan; int i, err; u16 fid; @@ -1508,21 +1588,7 @@ int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap) } /* Set every FID bit used by the VLAN entries */ - vlan.vid = mv88e6xxx_max_vid(chip); - vlan.valid = false; - - do { - err = mv88e6xxx_vtu_getnext(chip, &vlan); - if (err) - return err; - - if (!vlan.valid) - break; - - set_bit(vlan.fid, fid_bitmap); - } while (vlan.vid < mv88e6xxx_max_vid(chip)); - - return 0; + return mv88e6xxx_vtu_walk(chip, mv88e6xxx_fid_map_vlan, fid_bitmap); } static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid) @@ -1559,19 +1625,13 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) return 0; - vlan.vid = vid - 1; - vlan.valid = false; - - err = mv88e6xxx_vtu_getnext(chip, &vlan); + err = mv88e6xxx_vtu_get(chip, vid, &vlan); if (err) return err; if (!vlan.valid) return 0; - if (vlan.vid != vid) - return 0; - for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) { if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i)) continue; @@ -1653,15 +1713,12 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, if (err) return err; } else { - vlan.vid = vid - 1; - vlan.valid = false; - - err = mv88e6xxx_vtu_getnext(chip, &vlan); + err = mv88e6xxx_vtu_get(chip, vid, &vlan); if (err) return err; /* switchdev expects -EOPNOTSUPP to honor software VLANs */ - if (vlan.vid != vid || !vlan.valid) + if (!vlan.valid) return -EOPNOTSUPP; fid = vlan.fid; @@ -1911,8 +1968,10 @@ static int mv88e6xxx_set_rxnfc(struct dsa_switch *ds, int port, static int mv88e6xxx_port_add_broadcast(struct mv88e6xxx_chip *chip, int port, u16 vid) { - const char broadcast[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; u8 state = MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC; + u8 broadcast[ETH_ALEN]; + + eth_broadcast_addr(broadcast); return mv88e6xxx_port_db_load_purge(chip, port, broadcast, vid, state); } @@ -1923,6 +1982,19 @@ static int mv88e6xxx_broadcast_setup(struct mv88e6xxx_chip *chip, u16 vid) int err; for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { + struct dsa_port *dp = dsa_to_port(chip->ds, port); + struct net_device *brport; + + if (dsa_is_unused_port(chip->ds, port)) + continue; + + brport = dsa_port_to_bridge_port(dp); + if (brport && !br_port_flag_is_set(brport, BR_BCAST_FLOOD)) + /* Skip bridged user ports where broadcast + * flooding is disabled. + */ + continue; + err = mv88e6xxx_port_add_broadcast(chip, port, vid); if (err) return err; @@ -1931,6 +2003,53 @@ static int mv88e6xxx_broadcast_setup(struct mv88e6xxx_chip *chip, u16 vid) return 0; } +struct mv88e6xxx_port_broadcast_sync_ctx { + int port; + bool flood; +}; + +static int +mv88e6xxx_port_broadcast_sync_vlan(struct mv88e6xxx_chip *chip, + const struct mv88e6xxx_vtu_entry *vlan, + void *_ctx) +{ + struct mv88e6xxx_port_broadcast_sync_ctx *ctx = _ctx; + u8 broadcast[ETH_ALEN]; + u8 state; + + if (ctx->flood) + state = MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC; + else + state = MV88E6XXX_G1_ATU_DATA_STATE_MC_UNUSED; + + eth_broadcast_addr(broadcast); + + return mv88e6xxx_port_db_load_purge(chip, ctx->port, broadcast, + vlan->vid, state); +} + +static int mv88e6xxx_port_broadcast_sync(struct mv88e6xxx_chip *chip, int port, + bool flood) +{ + struct mv88e6xxx_port_broadcast_sync_ctx ctx = { + .port = port, + .flood = flood, + }; + struct mv88e6xxx_vtu_entry vid0 = { + .vid = 0, + }; + int err; + + /* Update the port's private database... */ + err = mv88e6xxx_port_broadcast_sync_vlan(chip, &vid0, &ctx); + if (err) + return err; + + /* ...and the database for all VLANs. */ + return mv88e6xxx_vtu_walk(chip, mv88e6xxx_port_broadcast_sync_vlan, + &ctx); +} + static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port, u16 vid, u8 member, bool warn) { @@ -1938,14 +2057,11 @@ static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port, struct mv88e6xxx_vtu_entry vlan; int i, err; - vlan.vid = vid - 1; - vlan.valid = false; - - err = mv88e6xxx_vtu_getnext(chip, &vlan); + err = mv88e6xxx_vtu_get(chip, vid, &vlan); if (err) return err; - if (vlan.vid != vid || !vlan.valid) { + if (!vlan.valid) { memset(&vlan, 0, sizeof(vlan)); err = mv88e6xxx_atu_new(chip, &vlan.fid); @@ -2041,17 +2157,14 @@ static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip, if (!vid) return -EOPNOTSUPP; - vlan.vid = vid - 1; - vlan.valid = false; - - err = mv88e6xxx_vtu_getnext(chip, &vlan); + err = mv88e6xxx_vtu_get(chip, vid, &vlan); if (err) return err; /* If the VLAN doesn't exist in hardware or the port isn't a member, * tell switchdev that this VLAN is likely handled in software. */ - if (vlan.vid != vid || !vlan.valid || + if (!vlan.valid || vlan.member[port] == MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER) return -EOPNOTSUPP; @@ -2168,10 +2281,30 @@ static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip, return err; } +struct mv88e6xxx_port_db_dump_vlan_ctx { + int port; + dsa_fdb_dump_cb_t *cb; + void *data; +}; + +static int mv88e6xxx_port_db_dump_vlan(struct mv88e6xxx_chip *chip, + const struct mv88e6xxx_vtu_entry *entry, + void *_data) +{ + struct mv88e6xxx_port_db_dump_vlan_ctx *ctx = _data; + + return mv88e6xxx_port_db_dump_fid(chip, entry->fid, entry->vid, + ctx->port, ctx->cb, ctx->data); +} + static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, dsa_fdb_dump_cb_t *cb, void *data) { - struct mv88e6xxx_vtu_entry vlan; + struct mv88e6xxx_port_db_dump_vlan_ctx ctx = { + .port = port, + .cb = cb, + .data = data, + }; u16 fid; int err; @@ -2184,25 +2317,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, if (err) return err; - /* Dump VLANs' Filtering Information Databases */ - vlan.vid = mv88e6xxx_max_vid(chip); - vlan.valid = false; - - do { - err = mv88e6xxx_vtu_getnext(chip, &vlan); - if (err) - return err; - - if (!vlan.valid) - break; - - err = mv88e6xxx_port_db_dump_fid(chip, vlan.fid, vlan.vid, port, - cb, data); - if (err) - return err; - } while (vlan.vid < mv88e6xxx_max_vid(chip)); - - return err; + return mv88e6xxx_vtu_walk(chip, mv88e6xxx_port_db_dump_vlan, &ctx); } static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, @@ -2416,10 +2531,10 @@ static int mv88e6xxx_setup_port_mode(struct mv88e6xxx_chip *chip, int port) return mv88e6xxx_set_port_mode_normal(chip, port); /* Setup CPU port mode depending on its supported tag format */ - if (chip->info->tag_protocol == DSA_TAG_PROTO_DSA) + if (chip->tag_protocol == DSA_TAG_PROTO_DSA) return mv88e6xxx_set_port_mode_dsa(chip, port); - if (chip->info->tag_protocol == DSA_TAG_PROTO_EDSA) + if (chip->tag_protocol == DSA_TAG_PROTO_EDSA) return mv88e6xxx_set_port_mode_edsa(chip, port); return -EINVAL; @@ -2434,19 +2549,15 @@ static int mv88e6xxx_setup_message_port(struct mv88e6xxx_chip *chip, int port) static int mv88e6xxx_setup_egress_floods(struct mv88e6xxx_chip *chip, int port) { - struct dsa_switch *ds = chip->ds; - bool flood; int err; - /* Upstream ports flood frames with unknown unicast or multicast DA */ - flood = dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port); if (chip->info->ops->port_set_ucast_flood) { - err = chip->info->ops->port_set_ucast_flood(chip, port, flood); + err = chip->info->ops->port_set_ucast_flood(chip, port, true); if (err) return err; } if (chip->info->ops->port_set_mcast_flood) { - err = chip->info->ops->port_set_mcast_flood(chip, port, flood); + err = chip->info->ops->port_set_mcast_flood(chip, port, true); if (err) return err; } @@ -2460,11 +2571,11 @@ static irqreturn_t mv88e6xxx_serdes_irq_thread_fn(int irq, void *dev_id) struct mv88e6xxx_chip *chip = mvp->chip; irqreturn_t ret = IRQ_NONE; int port = mvp->port; - u8 lane; + int lane; mv88e6xxx_reg_lock(chip); lane = mv88e6xxx_serdes_get_lane(chip, port); - if (lane) + if (lane >= 0) ret = mv88e6xxx_serdes_irq_status(chip, port, lane); mv88e6xxx_reg_unlock(chip); @@ -2472,7 +2583,7 @@ static irqreturn_t mv88e6xxx_serdes_irq_thread_fn(int irq, void *dev_id) } static int mv88e6xxx_serdes_irq_request(struct mv88e6xxx_chip *chip, int port, - u8 lane) + int lane) { struct mv88e6xxx_port *dev_id = &chip->ports[port]; unsigned int irq; @@ -2501,7 +2612,7 @@ static int mv88e6xxx_serdes_irq_request(struct mv88e6xxx_chip *chip, int port, } static int mv88e6xxx_serdes_irq_free(struct mv88e6xxx_chip *chip, int port, - u8 lane) + int lane) { struct mv88e6xxx_port *dev_id = &chip->ports[port]; unsigned int irq = dev_id->serdes_irq; @@ -2526,11 +2637,11 @@ static int mv88e6xxx_serdes_irq_free(struct mv88e6xxx_chip *chip, int port, static int mv88e6xxx_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) { - u8 lane; + int lane; int err; lane = mv88e6xxx_serdes_get_lane(chip, port); - if (!lane) + if (lane < 0) return 0; if (on) { @@ -2550,6 +2661,27 @@ static int mv88e6xxx_serdes_power(struct mv88e6xxx_chip *chip, int port, return err; } +static int mv88e6xxx_set_egress_port(struct mv88e6xxx_chip *chip, + enum mv88e6xxx_egress_direction direction, + int port) +{ + int err; + + if (!chip->info->ops->set_egress_port) + return -EOPNOTSUPP; + + err = chip->info->ops->set_egress_port(chip, direction, port); + if (err) + return err; + + if (direction == MV88E6XXX_EGRESS_DIR_INGRESS) + chip->ingress_dest_port = port; + else + chip->egress_dest_port = port; + + return 0; +} + static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port) { struct dsa_switch *ds = chip->ds; @@ -2572,19 +2704,17 @@ static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port) return err; } - if (chip->info->ops->set_egress_port) { - err = chip->info->ops->set_egress_port(chip, + err = mv88e6xxx_set_egress_port(chip, MV88E6XXX_EGRESS_DIR_INGRESS, upstream_port); - if (err) - return err; + if (err && err != -EOPNOTSUPP) + return err; - err = chip->info->ops->set_egress_port(chip, + err = mv88e6xxx_set_egress_port(chip, MV88E6XXX_EGRESS_DIR_EGRESS, upstream_port); - if (err) - return err; - } + if (err && err != -EOPNOTSUPP) + return err; } return 0; @@ -2670,15 +2800,20 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) return err; } - /* Port Association Vector: when learning source addresses - * of packets, add the address to the address database using - * a port bitmap that has only the bit for this port set and - * the other bits clear. + /* Port Association Vector: disable automatic address learning + * on all user ports since they start out in standalone + * mode. When joining a bridge, learning will be configured to + * match the bridge port settings. Enable learning on all + * DSA/CPU ports. NOTE: FROM_CPU frames always bypass the + * learning process. + * + * Disable HoldAt1, IntOnAgeOut, LockedPort, IgnoreWrongData, + * and RefreshLocked. I.e. setup standard automatic learning. */ - reg = 1 << port; - /* Disable learning for CPU port */ - if (dsa_is_cpu_port(ds, port)) + if (dsa_is_user_port(ds, port)) reg = 0; + else + reg = 1 << port; err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR, reg); @@ -3030,6 +3165,7 @@ out_resources: static const u16 family_prod_id_table[] = { [MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341, [MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390, + [MV88E6XXX_FAMILY_6393] = MV88E6XXX_PORT_SWITCH_ID_PROD_6393X, }; static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg) @@ -4566,6 +4702,70 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .phylink_validate = mv88e6390x_phylink_validate, }; +static const struct mv88e6xxx_ops mv88e6393x_ops = { + /* MV88E6XXX_FAMILY_6393 */ + .setup_errata = mv88e6393x_serdes_setup_errata, + .irl_init_all = mv88e6390_g2_irl_init_all, + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, + .set_switch_mac = mv88e6xxx_g2_set_switch_mac, + .phy_read = mv88e6xxx_g2_smi_phy_read, + .phy_write = mv88e6xxx_g2_smi_phy_write, + .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, + .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, + .port_set_speed_duplex = mv88e6393x_port_set_speed_duplex, + .port_max_speed_mode = mv88e6393x_port_max_speed_mode, + .port_tag_remap = mv88e6390_port_tag_remap, + .port_set_policy = mv88e6393x_port_set_policy, + .port_set_frame_mode = mv88e6351_port_set_frame_mode, + .port_set_ucast_flood = mv88e6352_port_set_ucast_flood, + .port_set_mcast_flood = mv88e6352_port_set_mcast_flood, + .port_set_ether_type = mv88e6393x_port_set_ether_type, + .port_set_jumbo_size = mv88e6165_port_set_jumbo_size, + .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, + .port_pause_limit = mv88e6390_port_pause_limit, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_get_cmode = mv88e6352_port_get_cmode, + .port_set_cmode = mv88e6393x_port_set_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, + .port_set_upstream_port = mv88e6393x_port_set_upstream_port, + .stats_snapshot = mv88e6390_g1_stats_snapshot, + .stats_set_histogram = mv88e6390_g1_stats_set_histogram, + .stats_get_sset_count = mv88e6320_stats_get_sset_count, + .stats_get_strings = mv88e6320_stats_get_strings, + .stats_get_stats = mv88e6390_stats_get_stats, + /* .set_cpu_port is missing because this family does not support a global + * CPU port, only per port CPU port which is set via + * .port_set_upstream_port method. + */ + .set_egress_port = mv88e6393x_set_egress_port, + .watchdog_ops = &mv88e6390_watchdog_ops, + .mgmt_rsvd2cpu = mv88e6393x_port_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, + .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6390_g1_rmu_disable, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, + .vtu_getnext = mv88e6390_g1_vtu_getnext, + .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, + .serdes_power = mv88e6393x_serdes_power, + .serdes_get_lane = mv88e6393x_serdes_get_lane, + .serdes_pcs_get_state = mv88e6393x_serdes_pcs_get_state, + .serdes_pcs_config = mv88e6390_serdes_pcs_config, + .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart, + .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up, + .serdes_irq_mapping = mv88e6390_serdes_irq_mapping, + .serdes_irq_enable = mv88e6393x_serdes_irq_enable, + .serdes_irq_status = mv88e6393x_serdes_irq_status, + /* TODO: serdes stats */ + .gpio_ops = &mv88e6352_gpio_ops, + .avb_ops = &mv88e6390_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, + .phylink_validate = mv88e6393x_phylink_validate, +}; + static const struct mv88e6xxx_info mv88e6xxx_table[] = { [MV88E6085] = { .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6085, @@ -4586,7 +4786,6 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_DSA, .ops = &mv88e6085_ops, }, @@ -4607,7 +4806,6 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g1_irqs = 8, .atu_move_port_mask = 0xf, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_DSA, .ops = &mv88e6095_ops, }, @@ -4630,7 +4828,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_EDSA, + .edsa_support = MV88E6XXX_EDSA_SUPPORTED, .ops = &mv88e6097_ops, }, @@ -4653,7 +4851,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_EDSA, + .edsa_support = MV88E6XXX_EDSA_SUPPORTED, .ops = &mv88e6123_ops, }, @@ -4674,7 +4872,6 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g1_irqs = 9, .atu_move_port_mask = 0xf, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_DSA, .ops = &mv88e6131_ops, }, @@ -4698,7 +4895,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 10, .pvt = true, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_EDSA, + .edsa_support = MV88E6XXX_EDSA_SUPPORTED, .ops = &mv88e6141_ops, }, @@ -4721,7 +4918,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_EDSA, + .edsa_support = MV88E6XXX_EDSA_SUPPORTED, .ptp_support = true, .ops = &mv88e6161_ops, }, @@ -4745,7 +4942,6 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_DSA, .ptp_support = true, .ops = &mv88e6165_ops, }, @@ -4769,7 +4965,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_EDSA, + .edsa_support = MV88E6XXX_EDSA_SUPPORTED, .ops = &mv88e6171_ops, }, @@ -4793,7 +4989,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_EDSA, + .edsa_support = MV88E6XXX_EDSA_SUPPORTED, .ops = &mv88e6172_ops, }, @@ -4816,7 +5012,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_EDSA, + .edsa_support = MV88E6XXX_EDSA_SUPPORTED, .ops = &mv88e6175_ops, }, @@ -4840,7 +5036,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_EDSA, + .edsa_support = MV88E6XXX_EDSA_SUPPORTED, .ops = &mv88e6176_ops, }, @@ -4861,7 +5057,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g1_irqs = 8, .atu_move_port_mask = 0xf, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_EDSA, + .edsa_support = MV88E6XXX_EDSA_SUPPORTED, .ops = &mv88e6185_ops, }, @@ -4879,7 +5075,6 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, - .tag_protocol = DSA_TAG_PROTO_DSA, .age_time_coeff = 3750, .g1_irqs = 9, .g2_irqs = 14, @@ -4909,7 +5104,6 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0x1f, .pvt = true, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_DSA, .ops = &mv88e6190x_ops, }, @@ -4932,11 +5126,54 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0x1f, .pvt = true, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_DSA, .ptp_support = true, .ops = &mv88e6191_ops, }, + [MV88E6191X] = { + .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6191X, + .family = MV88E6XXX_FAMILY_6393, + .name = "Marvell 88E6191X", + .num_databases = 4096, + .num_ports = 11, /* 10 + Z80 */ + .num_internal_phys = 9, + .max_vid = 8191, + .port_base_addr = 0x0, + .phy_base_addr = 0x0, + .global1_addr = 0x1b, + .global2_addr = 0x1c, + .age_time_coeff = 3750, + .g1_irqs = 10, + .g2_irqs = 14, + .atu_move_port_mask = 0x1f, + .pvt = true, + .multi_chip = true, + .ptp_support = true, + .ops = &mv88e6393x_ops, + }, + + [MV88E6193X] = { + .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6193X, + .family = MV88E6XXX_FAMILY_6393, + .name = "Marvell 88E6193X", + .num_databases = 4096, + .num_ports = 11, /* 10 + Z80 */ + .num_internal_phys = 9, + .max_vid = 8191, + .port_base_addr = 0x0, + .phy_base_addr = 0x0, + .global1_addr = 0x1b, + .global2_addr = 0x1c, + .age_time_coeff = 3750, + .g1_irqs = 10, + .g2_irqs = 14, + .atu_move_port_mask = 0x1f, + .pvt = true, + .multi_chip = true, + .ptp_support = true, + .ops = &mv88e6393x_ops, + }, + [MV88E6220] = { .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6220, .family = MV88E6XXX_FAMILY_6250, @@ -4959,7 +5196,6 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 10, .atu_move_port_mask = 0xf, .dual_chip = true, - .tag_protocol = DSA_TAG_PROTO_DSA, .ptp_support = true, .ops = &mv88e6250_ops, }, @@ -4984,7 +5220,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_EDSA, + .edsa_support = MV88E6XXX_EDSA_SUPPORTED, .ptp_support = true, .ops = &mv88e6240_ops, }, @@ -5006,7 +5242,6 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 10, .atu_move_port_mask = 0xf, .dual_chip = true, - .tag_protocol = DSA_TAG_PROTO_DSA, .ptp_support = true, .ops = &mv88e6250_ops, }, @@ -5030,7 +5265,6 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0x1f, .pvt = true, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_DSA, .ptp_support = true, .ops = &mv88e6290_ops, }, @@ -5055,7 +5289,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_EDSA, + .edsa_support = MV88E6XXX_EDSA_SUPPORTED, .ptp_support = true, .ops = &mv88e6320_ops, }, @@ -5079,7 +5313,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 10, .atu_move_port_mask = 0xf, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_EDSA, + .edsa_support = MV88E6XXX_EDSA_SUPPORTED, .ptp_support = true, .ops = &mv88e6321_ops, }, @@ -5104,7 +5338,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 10, .pvt = true, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_EDSA, + .edsa_support = MV88E6XXX_EDSA_SUPPORTED, .ptp_support = true, .ops = &mv88e6341_ops, }, @@ -5128,7 +5362,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_EDSA, + .edsa_support = MV88E6XXX_EDSA_SUPPORTED, .ops = &mv88e6350_ops, }, @@ -5151,7 +5385,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_EDSA, + .edsa_support = MV88E6XXX_EDSA_SUPPORTED, .ops = &mv88e6351_ops, }, @@ -5175,7 +5409,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_EDSA, + .edsa_support = MV88E6XXX_EDSA_SUPPORTED, .ptp_support = true, .ops = &mv88e6352_ops, }, @@ -5199,7 +5433,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0x1f, .pvt = true, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_DSA, + .edsa_support = MV88E6XXX_EDSA_UNDOCUMENTED, .ptp_support = true, .ops = &mv88e6390_ops, }, @@ -5223,10 +5457,32 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0x1f, .pvt = true, .multi_chip = true, - .tag_protocol = DSA_TAG_PROTO_DSA, + .edsa_support = MV88E6XXX_EDSA_UNDOCUMENTED, .ptp_support = true, .ops = &mv88e6390x_ops, }, + + [MV88E6393X] = { + .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6393X, + .family = MV88E6XXX_FAMILY_6393, + .name = "Marvell 88E6393X", + .num_databases = 4096, + .num_ports = 11, /* 10 + Z80 */ + .num_internal_phys = 9, + .max_vid = 8191, + .port_base_addr = 0x0, + .phy_base_addr = 0x0, + .global1_addr = 0x1b, + .global2_addr = 0x1c, + .age_time_coeff = 3750, + .g1_irqs = 10, + .g2_irqs = 14, + .atu_move_port_mask = 0x1f, + .pvt = true, + .multi_chip = true, + .ptp_support = true, + .ops = &mv88e6393x_ops, + }, }; static const struct mv88e6xxx_info *mv88e6xxx_lookup_info(unsigned int prod_num) @@ -5292,7 +5548,45 @@ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds, { struct mv88e6xxx_chip *chip = ds->priv; - return chip->info->tag_protocol; + return chip->tag_protocol; +} + +static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds, int port, + enum dsa_tag_protocol proto) +{ + struct mv88e6xxx_chip *chip = ds->priv; + enum dsa_tag_protocol old_protocol; + int err; + + switch (proto) { + case DSA_TAG_PROTO_EDSA: + switch (chip->info->edsa_support) { + case MV88E6XXX_EDSA_UNSUPPORTED: + return -EPROTONOSUPPORT; + case MV88E6XXX_EDSA_UNDOCUMENTED: + dev_warn(chip->dev, "Relying on undocumented EDSA tagging behavior\n"); + fallthrough; + case MV88E6XXX_EDSA_SUPPORTED: + break; + } + break; + case DSA_TAG_PROTO_DSA: + break; + default: + return -EPROTONOSUPPORT; + } + + old_protocol = chip->tag_protocol; + chip->tag_protocol = proto; + + mv88e6xxx_reg_lock(chip); + err = mv88e6xxx_setup_port_mode(chip, port); + mv88e6xxx_reg_unlock(chip); + + if (err) + chip->tag_protocol = old_protocol; + + return err; } static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port, @@ -5334,9 +5628,6 @@ static int mv88e6xxx_port_mirror_add(struct dsa_switch *ds, int port, int i; int err; - if (!chip->info->ops->set_egress_port) - return -EOPNOTSUPP; - mutex_lock(&chip->reg_lock); if ((ingress ? chip->ingress_dest_port : chip->egress_dest_port) != mirror->to_local_port) { @@ -5351,9 +5642,8 @@ static int mv88e6xxx_port_mirror_add(struct dsa_switch *ds, int port, goto out; } - err = chip->info->ops->set_egress_port(chip, - direction, - mirror->to_local_port); + err = mv88e6xxx_set_egress_port(chip, direction, + mirror->to_local_port); if (err) goto out; } @@ -5386,10 +5676,8 @@ static void mv88e6xxx_port_mirror_del(struct dsa_switch *ds, int port, /* Reset egress port when no other mirror is active */ if (!other_mirrors) { - if (chip->info->ops->set_egress_port(chip, - direction, - dsa_upstream_port(ds, - port))) + if (mv88e6xxx_set_egress_port(chip, direction, + dsa_upstream_port(ds, port))) dev_err(ds->dev, "failed to set egress port\n"); } @@ -5403,7 +5691,8 @@ static int mv88e6xxx_port_pre_bridge_flags(struct dsa_switch *ds, int port, struct mv88e6xxx_chip *chip = ds->priv; const struct mv88e6xxx_ops *ops; - if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD)) + if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | + BR_BCAST_FLOOD)) return -EINVAL; ops = chip->info->ops; @@ -5422,10 +5711,23 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port, struct netlink_ext_ack *extack) { struct mv88e6xxx_chip *chip = ds->priv; + bool do_fast_age = false; int err = -EOPNOTSUPP; mv88e6xxx_reg_lock(chip); + if (flags.mask & BR_LEARNING) { + bool learning = !!(flags.val & BR_LEARNING); + u16 pav = learning ? (1 << port) : 0; + + err = mv88e6xxx_port_set_assoc_vector(chip, port, pav); + if (err) + goto out; + + if (!learning) + do_fast_age = true; + } + if (flags.mask & BR_FLOOD) { bool unicast = !!(flags.val & BR_FLOOD); @@ -5444,9 +5746,20 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port, goto out; } + if (flags.mask & BR_BCAST_FLOOD) { + bool broadcast = !!(flags.val & BR_BCAST_FLOOD); + + err = mv88e6xxx_port_broadcast_sync(chip, port, broadcast); + if (err) + goto out; + } + out: mv88e6xxx_reg_unlock(chip); + if (do_fast_age) + mv88e6xxx_port_fast_age(ds, port); + return err; } @@ -5738,6 +6051,7 @@ static int mv88e6xxx_crosschip_lag_leave(struct dsa_switch *ds, int sw_index, static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .get_tag_protocol = mv88e6xxx_get_tag_protocol, + .change_tag_protocol = mv88e6xxx_change_tag_protocol, .setup = mv88e6xxx_setup, .teardown = mv88e6xxx_teardown, .phylink_validate = mv88e6xxx_validate, @@ -5918,6 +6232,11 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) if (err) goto out; + if (chip->info->edsa_support == MV88E6XXX_EDSA_SUPPORTED) + chip->tag_protocol = DSA_TAG_PROTO_EDSA; + else + chip->tag_protocol = DSA_TAG_PROTO_DSA; + mv88e6xxx_phy_init(chip); if (chip->info->ops->get_eeprom) { diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index a57c8886f3ac..675b1f3e43b7 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -23,6 +23,8 @@ /* PVT limits for 4-bit port and 5-bit switch */ #define MV88E6XXX_MAX_PVT_SWITCHES 32 #define MV88E6XXX_MAX_PVT_PORTS 16 +#define MV88E6XXX_MAX_PVT_ENTRIES \ + (MV88E6XXX_MAX_PVT_SWITCHES * MV88E6XXX_MAX_PVT_PORTS) #define MV88E6XXX_MAX_GPIO 16 @@ -63,6 +65,8 @@ enum mv88e6xxx_model { MV88E6190, MV88E6190X, MV88E6191, + MV88E6191X, + MV88E6193X, MV88E6220, MV88E6240, MV88E6250, @@ -75,6 +79,7 @@ enum mv88e6xxx_model { MV88E6352, MV88E6390, MV88E6390X, + MV88E6393X, }; enum mv88e6xxx_family { @@ -90,6 +95,23 @@ enum mv88e6xxx_family { MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */ MV88E6XXX_FAMILY_6352, /* 6172 6176 6240 6352 */ MV88E6XXX_FAMILY_6390, /* 6190 6190X 6191 6290 6390 6390X */ + MV88E6XXX_FAMILY_6393, /* 6191X 6193X 6393X */ +}; + +/** + * enum mv88e6xxx_edsa_support - Ethertype DSA tag support level + * @MV88E6XXX_EDSA_UNSUPPORTED: Device has no support for EDSA tags + * @MV88E6XXX_EDSA_UNDOCUMENTED: Documentation indicates that + * egressing FORWARD frames with an EDSA + * tag is reserved for future use, but + * empirical data shows that this mode + * is supported. + * @MV88E6XXX_EDSA_SUPPORTED: EDSA tags are fully supported. + */ +enum mv88e6xxx_edsa_support { + MV88E6XXX_EDSA_UNSUPPORTED = 0, + MV88E6XXX_EDSA_UNDOCUMENTED, + MV88E6XXX_EDSA_SUPPORTED, }; struct mv88e6xxx_ops; @@ -129,7 +151,7 @@ struct mv88e6xxx_info { */ bool dual_chip; - enum dsa_tag_protocol tag_protocol; + enum mv88e6xxx_edsa_support edsa_support; /* Mask for FromPort and ToPort value of PortVec used in ATU Move * operation. 0 means that the ATU Move operation is not supported. @@ -246,6 +268,7 @@ enum mv88e6xxx_region_id { MV88E6XXX_REGION_GLOBAL2, MV88E6XXX_REGION_ATU, MV88E6XXX_REGION_VTU, + MV88E6XXX_REGION_PVT, _MV88E6XXX_REGION_MAX, }; @@ -257,6 +280,9 @@ struct mv88e6xxx_region_priv { struct mv88e6xxx_chip { const struct mv88e6xxx_info *info; + /* Currently configured tagging protocol */ + enum dsa_tag_protocol tag_protocol; + /* The dsa_switch this private structure is related to */ struct dsa_switch *ds; @@ -513,30 +539,30 @@ struct mv88e6xxx_ops { int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip); /* Power on/off a SERDES interface */ - int (*serdes_power)(struct mv88e6xxx_chip *chip, int port, u8 lane, + int (*serdes_power)(struct mv88e6xxx_chip *chip, int port, int lane, bool up); /* SERDES lane mapping */ - u8 (*serdes_get_lane)(struct mv88e6xxx_chip *chip, int port); + int (*serdes_get_lane)(struct mv88e6xxx_chip *chip, int port); int (*serdes_pcs_get_state)(struct mv88e6xxx_chip *chip, int port, - u8 lane, struct phylink_link_state *state); + int lane, struct phylink_link_state *state); int (*serdes_pcs_config)(struct mv88e6xxx_chip *chip, int port, - u8 lane, unsigned int mode, + int lane, unsigned int mode, phy_interface_t interface, const unsigned long *advertise); int (*serdes_pcs_an_restart)(struct mv88e6xxx_chip *chip, int port, - u8 lane); + int lane); int (*serdes_pcs_link_up)(struct mv88e6xxx_chip *chip, int port, - u8 lane, int speed, int duplex); + int lane, int speed, int duplex); /* SERDES interrupt handling */ unsigned int (*serdes_irq_mapping)(struct mv88e6xxx_chip *chip, int port); - int (*serdes_irq_enable)(struct mv88e6xxx_chip *chip, int port, u8 lane, + int (*serdes_irq_enable)(struct mv88e6xxx_chip *chip, int port, int lane, bool enable); irqreturn_t (*serdes_irq_status)(struct mv88e6xxx_chip *chip, int port, - u8 lane); + int lane); /* Statistics from the SERDES interface */ int (*serdes_get_sset_count)(struct mv88e6xxx_chip *chip, int port); diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c index 21953d6d484c..0c0f5ea6680c 100644 --- a/drivers/net/dsa/mv88e6xxx/devlink.c +++ b/drivers/net/dsa/mv88e6xxx/devlink.c @@ -503,6 +503,44 @@ static int mv88e6xxx_region_vtu_snapshot(struct devlink *dl, return 0; } +static int mv88e6xxx_region_pvt_snapshot(struct devlink *dl, + const struct devlink_region_ops *ops, + struct netlink_ext_ack *extack, + u8 **data) +{ + struct dsa_switch *ds = dsa_devlink_to_ds(dl); + struct mv88e6xxx_chip *chip = ds->priv; + int dev, port, err; + u16 *pvt, *cur; + + pvt = kcalloc(MV88E6XXX_MAX_PVT_ENTRIES, sizeof(*pvt), GFP_KERNEL); + if (!pvt) + return -ENOMEM; + + mv88e6xxx_reg_lock(chip); + + cur = pvt; + for (dev = 0; dev < MV88E6XXX_MAX_PVT_SWITCHES; dev++) { + for (port = 0; port < MV88E6XXX_MAX_PVT_PORTS; port++) { + err = mv88e6xxx_g2_pvt_read(chip, dev, port, cur); + if (err) + break; + + cur++; + } + } + + mv88e6xxx_reg_unlock(chip); + + if (err) { + kfree(pvt); + return err; + } + + *data = (u8 *)pvt; + return 0; +} + static int mv88e6xxx_region_port_snapshot(struct devlink_port *devlink_port, const struct devlink_port_region_ops *ops, struct netlink_ext_ack *extack, @@ -567,6 +605,12 @@ static struct devlink_region_ops mv88e6xxx_region_vtu_ops = { .destructor = kfree, }; +static struct devlink_region_ops mv88e6xxx_region_pvt_ops = { + .name = "pvt", + .snapshot = mv88e6xxx_region_pvt_snapshot, + .destructor = kfree, +}; + static const struct devlink_port_region_ops mv88e6xxx_region_port_ops = { .name = "port", .snapshot = mv88e6xxx_region_port_snapshot, @@ -576,6 +620,8 @@ static const struct devlink_port_region_ops mv88e6xxx_region_port_ops = { struct mv88e6xxx_region { struct devlink_region_ops *ops; u64 size; + + bool (*cond)(struct mv88e6xxx_chip *chip); }; static struct mv88e6xxx_region mv88e6xxx_regions[] = { @@ -594,6 +640,11 @@ static struct mv88e6xxx_region mv88e6xxx_regions[] = { .ops = &mv88e6xxx_region_vtu_ops /* calculated at runtime */ }, + [MV88E6XXX_REGION_PVT] = { + .ops = &mv88e6xxx_region_pvt_ops, + .size = MV88E6XXX_MAX_PVT_ENTRIES * sizeof(u16), + .cond = mv88e6xxx_has_pvt, + }, }; static void @@ -663,6 +714,7 @@ out: static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds, struct mv88e6xxx_chip *chip) { + bool (*cond)(struct mv88e6xxx_chip *chip); struct devlink_region_ops *ops; struct devlink_region *region; u64 size; @@ -671,6 +723,10 @@ static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds, for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++) { ops = mv88e6xxx_regions[i].ops; size = mv88e6xxx_regions[i].size; + cond = mv88e6xxx_regions[i].cond; + + if (cond && !cond(chip)) + continue; switch (i) { case MV88E6XXX_REGION_ATU: @@ -678,7 +734,7 @@ static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds, sizeof(struct mv88e6xxx_devlink_atu_entry); break; case MV88E6XXX_REGION_VTU: - size = mv88e6xxx_max_vid(chip) * + size = (mv88e6xxx_max_vid(chip) + 1) * sizeof(struct mv88e6xxx_devlink_vtu_entry); break; } diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index 33d443a37efc..815b0f681d69 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -315,7 +315,6 @@ int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, enum mv88e6xxx_egress_direction direction, int port) { - int *dest_port_chip; u16 reg; int err; @@ -325,13 +324,11 @@ int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, switch (direction) { case MV88E6XXX_EGRESS_DIR_INGRESS: - dest_port_chip = &chip->ingress_dest_port; reg &= ~MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK; reg |= port << __bf_shf(MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK); break; case MV88E6XXX_EGRESS_DIR_EGRESS: - dest_port_chip = &chip->egress_dest_port; reg &= ~MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK; reg |= port << __bf_shf(MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK); @@ -340,11 +337,7 @@ int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, return -EINVAL; } - err = mv88e6xxx_g1_write(chip, MV88E6185_G1_MONITOR_CTL, reg); - if (!err) - *dest_port_chip = port; - - return err; + return mv88e6xxx_g1_write(chip, MV88E6185_G1_MONITOR_CTL, reg); } /* Older generations also call this the ARP destination. It has been @@ -380,28 +373,20 @@ int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip, enum mv88e6xxx_egress_direction direction, int port) { - int *dest_port_chip; u16 ptr; - int err; switch (direction) { case MV88E6XXX_EGRESS_DIR_INGRESS: - dest_port_chip = &chip->ingress_dest_port; ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST; break; case MV88E6XXX_EGRESS_DIR_EGRESS: - dest_port_chip = &chip->egress_dest_port; ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST; break; default: return -EINVAL; } - err = mv88e6390_g1_monitor_write(chip, ptr, port); - if (!err) - *dest_port_chip = port; - - return err; + return mv88e6390_g1_monitor_write(chip, ptr, port); } int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port) diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index 7c396964d0b2..4f3dbb015f77 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -22,6 +22,7 @@ #define MV88E6185_G1_STS_PPU_STATE_DISABLED 0x8000 #define MV88E6185_G1_STS_PPU_STATE_POLLING 0xc000 #define MV88E6XXX_G1_STS_INIT_READY 0x0800 +#define MV88E6393X_G1_STS_IRQ_DEVICE_2 9 #define MV88E6XXX_G1_STS_IRQ_AVB 8 #define MV88E6XXX_G1_STS_IRQ_DEVICE 7 #define MV88E6XXX_G1_STS_IRQ_STATS 6 @@ -59,6 +60,7 @@ #define MV88E6185_G1_CTL1_SCHED_PRIO 0x0800 #define MV88E6185_G1_CTL1_MAX_FRAME_1632 0x0400 #define MV88E6185_G1_CTL1_RELOAD_EEPROM 0x0200 +#define MV88E6393X_G1_CTL1_DEVICE2_EN 0x0200 #define MV88E6XXX_G1_CTL1_DEVICE_EN 0x0080 #define MV88E6XXX_G1_CTL1_STATS_DONE_EN 0x0040 #define MV88E6XXX_G1_CTL1_VTU_PROBLEM_EN 0x0020 diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index da8bac8813e1..fa65ecd9cb85 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -239,6 +239,23 @@ static int mv88e6xxx_g2_pvt_op(struct mv88e6xxx_chip *chip, int src_dev, return mv88e6xxx_g2_pvt_op_wait(chip); } +int mv88e6xxx_g2_pvt_read(struct mv88e6xxx_chip *chip, int src_dev, + int src_port, u16 *data) +{ + int err; + + err = mv88e6xxx_g2_pvt_op_wait(chip); + if (err) + return err; + + err = mv88e6xxx_g2_pvt_op(chip, src_dev, src_port, + MV88E6XXX_G2_PVT_ADDR_OP_READ); + if (err) + return err; + + return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_PVT_DATA, data); +} + int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev, int src_port, u16 data) { diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 4127f82275ad..f3e27573a386 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -38,9 +38,15 @@ /* Offset 0x02: MGMT Enable Register 2x */ #define MV88E6XXX_G2_MGMT_EN_2X 0x02 +/* Offset 0x02: MAC LINK change IRQ Register for MV88E6393X */ +#define MV88E6393X_G2_MACLINK_INT_SRC 0x02 + /* Offset 0x03: MGMT Enable Register 0x */ #define MV88E6XXX_G2_MGMT_EN_0X 0x03 +/* Offset 0x03: MAC LINK change IRQ Mask Register for MV88E6393X */ +#define MV88E6393X_G2_MACLINK_INT_MASK 0x03 + /* Offset 0x04: Flow Control Delay Register */ #define MV88E6XXX_G2_FLOW_CTL 0x04 @@ -52,6 +58,8 @@ #define MV88E6XXX_G2_SWITCH_MGMT_FORCE_FLOW_CTL_PRI 0x0080 #define MV88E6XXX_G2_SWITCH_MGMT_RSVD2CPU 0x0008 +#define MV88E6393X_G2_EGRESS_MONITOR_DEST 0x05 + /* Offset 0x06: Device Mapping Table Register */ #define MV88E6XXX_G2_DEVICE_MAPPING 0x06 #define MV88E6XXX_G2_DEVICE_MAPPING_UPDATE 0x8000 @@ -101,7 +109,7 @@ #define MV88E6XXX_G2_PVT_ADDR_OP_WRITE_PVLAN 0x3000 #define MV88E6XXX_G2_PVT_ADDR_OP_READ 0x4000 #define MV88E6XXX_G2_PVT_ADDR_PTR_MASK 0x01ff -#define MV88E6XXX_G2_PVT_ADRR_DEV_TRUNK 0x1f +#define MV88E6XXX_G2_PVT_ADDR_DEV_TRUNK 0x1f /* Offset 0x0C: Cross-chip Port VLAN Data Register */ #define MV88E6XXX_G2_PVT_DATA 0x0c @@ -322,6 +330,8 @@ int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip, int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip, struct ethtool_eeprom *eeprom, u8 *data); +int mv88e6xxx_g2_pvt_read(struct mv88e6xxx_chip *chip, int src_dev, + int src_port, u16 *data); int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev, int src_port, u16 data); int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip); diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c index 7c2c67405322..eda710062933 100644 --- a/drivers/net/dsa/mv88e6xxx/global2_scratch.c +++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c @@ -42,7 +42,7 @@ static int mv88e6xxx_g2_scratch_write(struct mv88e6xxx_chip *chip, int reg, } /** - * mv88e6xxx_g2_scratch_gpio_get_bit - get a bit + * mv88e6xxx_g2_scratch_get_bit - get a bit * @chip: chip private data * @base_reg: base of scratch bits * @offset: index of bit within the register @@ -67,7 +67,7 @@ static int mv88e6xxx_g2_scratch_get_bit(struct mv88e6xxx_chip *chip, } /** - * mv88e6xxx_g2_scratch_gpio_set_bit - set (or clear) a bit + * mv88e6xxx_g2_scratch_set_bit - set (or clear) a bit * @chip: chip private data * @base_reg: base of scratch bits * @offset: index of bit within the register @@ -240,7 +240,7 @@ const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops = { }; /** - * mv88e6xxx_g2_gpio_set_smi - set gpio muxing for external smi + * mv88e6xxx_g2_scratch_gpio_set_smi - set gpio muxing for external smi * @chip: chip private data * @external: set mux for external smi, or free for gpio usage * diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c index 094d17a1d037..8f74ffc7a279 100644 --- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c @@ -468,30 +468,38 @@ long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp) return restart ? 1 : -1; } -bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port, - struct sk_buff *clone, unsigned int type) +void mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port, + struct sk_buff *skb) { struct mv88e6xxx_chip *chip = ds->priv; struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port]; struct ptp_header *hdr; + struct sk_buff *clone; + unsigned int type; - if (!(skb_shinfo(clone)->tx_flags & SKBTX_HW_TSTAMP)) - return false; + type = ptp_classify_raw(skb); + if (type == PTP_CLASS_NONE) + return; - hdr = mv88e6xxx_should_tstamp(chip, port, clone, type); + hdr = mv88e6xxx_should_tstamp(chip, port, skb, type); if (!hdr) - return false; + return; + + clone = skb_clone_sk(skb); + if (!clone) + return; if (test_and_set_bit_lock(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS, - &ps->state)) - return false; + &ps->state)) { + kfree_skb(clone); + return; + } ps->tx_skb = clone; ps->tx_tstamp_start = jiffies; ps->tx_seq_id = be16_to_cpu(hdr->sequence_id); ptp_schedule_worker(chip->ptp_clock, 0); - return true; } int mv88e6165_global_disable(struct mv88e6xxx_chip *chip) diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.h b/drivers/net/dsa/mv88e6xxx/hwtstamp.h index 9da9f197ba02..cf7fb6d660b1 100644 --- a/drivers/net/dsa/mv88e6xxx/hwtstamp.h +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.h @@ -117,8 +117,8 @@ int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port, bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *clone, unsigned int type); -bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port, - struct sk_buff *clone, unsigned int type); +void mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port, + struct sk_buff *skb); int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port, struct ethtool_ts_info *info); @@ -151,11 +151,9 @@ static inline bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port, return false; } -static inline bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port, - struct sk_buff *clone, - unsigned int type) +static inline void mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port, + struct sk_buff *skb) { - return false; } static inline int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index 4561f289ab76..f77e2ee64a60 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -14,6 +14,7 @@ #include <linux/phylink.h> #include "chip.h" +#include "global2.h" #include "port.h" #include "serdes.h" @@ -25,6 +26,14 @@ int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg, return mv88e6xxx_read(chip, addr, reg, val); } +int mv88e6xxx_port_wait_bit(struct mv88e6xxx_chip *chip, int port, int reg, + int bit, int val) +{ + int addr = chip->info->port_base_addr + port; + + return mv88e6xxx_wait_bit(chip, addr, reg, bit, val); +} + int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg, u16 val) { @@ -426,11 +435,111 @@ phy_interface_t mv88e6390x_port_max_speed_mode(int port) return PHY_INTERFACE_MODE_NA; } +/* Support 10, 100, 200, 1000, 2500, 5000, 10000 Mbps (e.g. 88E6393X) + * Function mv88e6xxx_port_set_speed_duplex() can't be used as the register + * values for speeds 2500 & 5000 conflict. + */ +int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port, + int speed, int duplex) +{ + u16 reg, ctrl; + int err; + + if (speed == SPEED_MAX) + speed = (port > 0 && port < 9) ? 1000 : 10000; + + if (speed == 200 && port != 0) + return -EOPNOTSUPP; + + if (speed >= 2500 && port > 0 && port < 9) + return -EOPNOTSUPP; + + switch (speed) { + case 10: + ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_10; + break; + case 100: + ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_100; + break; + case 200: + ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_100 | + MV88E6390_PORT_MAC_CTL_ALTSPEED; + break; + case 1000: + ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_1000; + break; + case 2500: + ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_1000 | + MV88E6390_PORT_MAC_CTL_ALTSPEED; + break; + case 5000: + ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000 | + MV88E6390_PORT_MAC_CTL_ALTSPEED; + break; + case 10000: + case SPEED_UNFORCED: + ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_UNFORCED; + break; + default: + return -EOPNOTSUPP; + } + + switch (duplex) { + case DUPLEX_HALF: + ctrl |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX; + break; + case DUPLEX_FULL: + ctrl |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX | + MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL; + break; + case DUPLEX_UNFORCED: + /* normal duplex detection */ + break; + default: + return -EOPNOTSUPP; + } + + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, ®); + if (err) + return err; + + reg &= ~(MV88E6XXX_PORT_MAC_CTL_SPEED_MASK | + MV88E6390_PORT_MAC_CTL_ALTSPEED | + MV88E6390_PORT_MAC_CTL_FORCE_SPEED); + + if (speed != SPEED_UNFORCED) + reg |= MV88E6390_PORT_MAC_CTL_FORCE_SPEED; + + reg |= ctrl; + + err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg); + if (err) + return err; + + if (speed) + dev_dbg(chip->dev, "p%d: Speed set to %d Mbps\n", port, speed); + else + dev_dbg(chip->dev, "p%d: Speed unforced\n", port); + dev_dbg(chip->dev, "p%d: %s %s duplex\n", port, + reg & MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX ? "Force" : "Unforce", + reg & MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL ? "full" : "half"); + + return 0; +} + +phy_interface_t mv88e6393x_port_max_speed_mode(int port) +{ + if (port == 0 || port == 9 || port == 10) + return PHY_INTERFACE_MODE_10GBASER; + + return PHY_INTERFACE_MODE_NA; +} + static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode, bool force) { - u8 lane; u16 cmode; + int lane; u16 reg; int err; @@ -450,6 +559,9 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, case PHY_INTERFACE_MODE_2500BASEX: cmode = MV88E6XXX_PORT_STS_CMODE_2500BASEX; break; + case PHY_INTERFACE_MODE_5GBASER: + cmode = MV88E6393X_PORT_STS_CMODE_5GBASER; + break; case PHY_INTERFACE_MODE_XGMII: case PHY_INTERFACE_MODE_XAUI: cmode = MV88E6XXX_PORT_STS_CMODE_XAUI; @@ -457,6 +569,9 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, case PHY_INTERFACE_MODE_RXAUI: cmode = MV88E6XXX_PORT_STS_CMODE_RXAUI; break; + case PHY_INTERFACE_MODE_10GBASER: + cmode = MV88E6393X_PORT_STS_CMODE_10GBASER; + break; default: cmode = 0; } @@ -466,7 +581,7 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, return 0; lane = mv88e6xxx_serdes_get_lane(chip, port); - if (lane) { + if (lane >= 0) { if (chip->ports[port].serdes_irq) { err = mv88e6xxx_serdes_irq_disable(chip, port, lane); if (err) @@ -495,8 +610,8 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, chip->ports[port].cmode = cmode; lane = mv88e6xxx_serdes_get_lane(chip, port); - if (!lane) - return -ENODEV; + if (lane < 0) + return lane; err = mv88e6xxx_serdes_power_up(chip, port, lane); if (err) @@ -541,6 +656,29 @@ int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port, return mv88e6xxx_port_set_cmode(chip, port, mode, false); } +int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, + phy_interface_t mode) +{ + int err; + u16 reg; + + if (port != 0 && port != 9 && port != 10) + return -EOPNOTSUPP; + + /* mv88e6393x errata 4.5: EEE should be disabled on SERDES ports */ + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, ®); + if (err) + return err; + + reg &= ~MV88E6XXX_PORT_MAC_CTL_EEE; + reg |= MV88E6XXX_PORT_MAC_CTL_FORCE_EEE; + err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg); + if (err) + return err; + + return mv88e6xxx_port_set_cmode(chip, port, mode, false); +} + static int mv88e6341_port_set_cmode_writable(struct mv88e6xxx_chip *chip, int port) { @@ -1171,6 +1309,27 @@ int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port) 0x0001); } +/* Offset 0x0B: Port Association Vector */ + +int mv88e6xxx_port_set_assoc_vector(struct mv88e6xxx_chip *chip, int port, + u16 pav) +{ + u16 reg, mask; + int err; + + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR, + ®); + if (err) + return err; + + mask = mv88e6xxx_port_mask(chip); + reg &= ~mask; + reg |= pav & mask; + + return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR, + reg); +} + /* Offset 0x0C: Port ATU Control */ int mv88e6xxx_port_disable_learn_limit(struct mv88e6xxx_chip *chip, int port) @@ -1185,6 +1344,156 @@ int mv88e6xxx_port_disable_pri_override(struct mv88e6xxx_chip *chip, int port) return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_PRI_OVERRIDE, 0); } +/* Offset 0x0E: Policy & MGMT Control Register for FAMILY 6191X 6193X 6393X */ + +static int mv88e6393x_port_policy_read(struct mv88e6xxx_chip *chip, int port, + u16 pointer, u8 *data) +{ + u16 reg; + int err; + + err = mv88e6xxx_port_write(chip, port, MV88E6393X_PORT_POLICY_MGMT_CTL, + pointer); + if (err) + return err; + + err = mv88e6xxx_port_read(chip, port, MV88E6393X_PORT_POLICY_MGMT_CTL, + ®); + if (err) + return err; + + *data = reg; + + return 0; +} + +static int mv88e6393x_port_policy_write(struct mv88e6xxx_chip *chip, int port, + u16 pointer, u8 data) +{ + u16 reg; + + reg = MV88E6393X_PORT_POLICY_MGMT_CTL_UPDATE | pointer | data; + + return mv88e6xxx_port_write(chip, port, MV88E6393X_PORT_POLICY_MGMT_CTL, + reg); +} + +static int mv88e6393x_port_policy_write_all(struct mv88e6xxx_chip *chip, + u16 pointer, u8 data) +{ + int err, port; + + for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { + if (dsa_is_unused_port(chip->ds, port)) + continue; + + err = mv88e6393x_port_policy_write(chip, port, pointer, data); + if (err) + return err; + } + + return 0; +} + +int mv88e6393x_set_egress_port(struct mv88e6xxx_chip *chip, + enum mv88e6xxx_egress_direction direction, + int port) +{ + u16 ptr; + int err; + + switch (direction) { + case MV88E6XXX_EGRESS_DIR_INGRESS: + ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_INGRESS_DEST; + err = mv88e6393x_port_policy_write_all(chip, ptr, port); + if (err) + return err; + break; + case MV88E6XXX_EGRESS_DIR_EGRESS: + ptr = MV88E6393X_G2_EGRESS_MONITOR_DEST; + err = mv88e6xxx_g2_write(chip, ptr, port); + if (err) + return err; + break; + } + + return 0; +} + +int mv88e6393x_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port, + int upstream_port) +{ + u16 ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_CPU_DEST; + u8 data = MV88E6393X_PORT_POLICY_MGMT_CTL_CPU_DEST_MGMTPRI | + upstream_port; + + return mv88e6393x_port_policy_write(chip, port, ptr, data); +} + +int mv88e6393x_port_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) +{ + u16 ptr; + int err; + + /* Consider the frames with reserved multicast destination + * addresses matching 01:80:c2:00:00:00 and + * 01:80:c2:00:00:02 as MGMT. + */ + ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000000XLO; + err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff); + if (err) + return err; + + ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000000XHI; + err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff); + if (err) + return err; + + ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000002XLO; + err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff); + if (err) + return err; + + ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000002XHI; + err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff); + if (err) + return err; + + return 0; +} + +/* Offset 0x10 & 0x11: EPC */ + +static int mv88e6393x_port_epc_wait_ready(struct mv88e6xxx_chip *chip, int port) +{ + int bit = __bf_shf(MV88E6393X_PORT_EPC_CMD_BUSY); + + return mv88e6xxx_port_wait_bit(chip, port, MV88E6393X_PORT_EPC_CMD, bit, 0); +} + +/* Port Ether type for 6393X family */ + +int mv88e6393x_port_set_ether_type(struct mv88e6xxx_chip *chip, int port, + u16 etype) +{ + u16 val; + int err; + + err = mv88e6393x_port_epc_wait_ready(chip, port); + if (err) + return err; + + err = mv88e6xxx_port_write(chip, port, MV88E6393X_PORT_EPC_DATA, etype); + if (err) + return err; + + val = MV88E6393X_PORT_EPC_CMD_BUSY | + MV88E6393X_PORT_EPC_CMD_WRITE | + MV88E6393X_PORT_EPC_INDEX_PORT_ETYPE; + + return mv88e6xxx_port_write(chip, port, MV88E6393X_PORT_EPC_CMD, val); +} + /* Offset 0x0f: Port Ether type */ int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port, @@ -1259,46 +1568,43 @@ int mv88e6390_port_tag_remap(struct mv88e6xxx_chip *chip, int port) /* Offset 0x0E: Policy Control Register */ -int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port, - enum mv88e6xxx_policy_mapping mapping, - enum mv88e6xxx_policy_action action) +static int +mv88e6xxx_port_policy_mapping_get_pos(enum mv88e6xxx_policy_mapping mapping, + enum mv88e6xxx_policy_action action, + u16 *mask, u16 *val, int *shift) { - u16 reg, mask, val; - int shift; - int err; - switch (mapping) { case MV88E6XXX_POLICY_MAPPING_DA: - shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_DA_MASK); - mask = MV88E6XXX_PORT_POLICY_CTL_DA_MASK; + *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_DA_MASK); + *mask = MV88E6XXX_PORT_POLICY_CTL_DA_MASK; break; case MV88E6XXX_POLICY_MAPPING_SA: - shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_SA_MASK); - mask = MV88E6XXX_PORT_POLICY_CTL_SA_MASK; + *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_SA_MASK); + *mask = MV88E6XXX_PORT_POLICY_CTL_SA_MASK; break; case MV88E6XXX_POLICY_MAPPING_VTU: - shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_VTU_MASK); - mask = MV88E6XXX_PORT_POLICY_CTL_VTU_MASK; + *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_VTU_MASK); + *mask = MV88E6XXX_PORT_POLICY_CTL_VTU_MASK; break; case MV88E6XXX_POLICY_MAPPING_ETYPE: - shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_ETYPE_MASK); - mask = MV88E6XXX_PORT_POLICY_CTL_ETYPE_MASK; + *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_ETYPE_MASK); + *mask = MV88E6XXX_PORT_POLICY_CTL_ETYPE_MASK; break; case MV88E6XXX_POLICY_MAPPING_PPPOE: - shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_PPPOE_MASK); - mask = MV88E6XXX_PORT_POLICY_CTL_PPPOE_MASK; + *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_PPPOE_MASK); + *mask = MV88E6XXX_PORT_POLICY_CTL_PPPOE_MASK; break; case MV88E6XXX_POLICY_MAPPING_VBAS: - shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_VBAS_MASK); - mask = MV88E6XXX_PORT_POLICY_CTL_VBAS_MASK; + *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_VBAS_MASK); + *mask = MV88E6XXX_PORT_POLICY_CTL_VBAS_MASK; break; case MV88E6XXX_POLICY_MAPPING_OPT82: - shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_OPT82_MASK); - mask = MV88E6XXX_PORT_POLICY_CTL_OPT82_MASK; + *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_OPT82_MASK); + *mask = MV88E6XXX_PORT_POLICY_CTL_OPT82_MASK; break; case MV88E6XXX_POLICY_MAPPING_UDP: - shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_UDP_MASK); - mask = MV88E6XXX_PORT_POLICY_CTL_UDP_MASK; + *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_UDP_MASK); + *mask = MV88E6XXX_PORT_POLICY_CTL_UDP_MASK; break; default: return -EOPNOTSUPP; @@ -1306,21 +1612,37 @@ int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port, switch (action) { case MV88E6XXX_POLICY_ACTION_NORMAL: - val = MV88E6XXX_PORT_POLICY_CTL_NORMAL; + *val = MV88E6XXX_PORT_POLICY_CTL_NORMAL; break; case MV88E6XXX_POLICY_ACTION_MIRROR: - val = MV88E6XXX_PORT_POLICY_CTL_MIRROR; + *val = MV88E6XXX_PORT_POLICY_CTL_MIRROR; break; case MV88E6XXX_POLICY_ACTION_TRAP: - val = MV88E6XXX_PORT_POLICY_CTL_TRAP; + *val = MV88E6XXX_PORT_POLICY_CTL_TRAP; break; case MV88E6XXX_POLICY_ACTION_DISCARD: - val = MV88E6XXX_PORT_POLICY_CTL_DISCARD; + *val = MV88E6XXX_PORT_POLICY_CTL_DISCARD; break; default: return -EOPNOTSUPP; } + return 0; +} + +int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port, + enum mv88e6xxx_policy_mapping mapping, + enum mv88e6xxx_policy_action action) +{ + u16 reg, mask, val; + int shift; + int err; + + err = mv88e6xxx_port_policy_mapping_get_pos(mapping, action, &mask, + &val, &shift); + if (err) + return err; + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_POLICY_CTL, ®); if (err) return err; @@ -1330,3 +1652,37 @@ int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port, return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_POLICY_CTL, reg); } + +int mv88e6393x_port_set_policy(struct mv88e6xxx_chip *chip, int port, + enum mv88e6xxx_policy_mapping mapping, + enum mv88e6xxx_policy_action action) +{ + u16 mask, val; + int shift; + int err; + u16 ptr; + u8 reg; + + err = mv88e6xxx_port_policy_mapping_get_pos(mapping, action, &mask, + &val, &shift); + if (err) + return err; + + /* The 16-bit Port Policy CTL register from older chips is on 6393x + * changed to Port Policy MGMT CTL, which can access more data, but + * indirectly. The original 16-bit value is divided into two 8-bit + * registers. + */ + ptr = shift / 8; + shift %= 8; + mask >>= ptr * 8; + + err = mv88e6393x_port_policy_read(chip, port, ptr, ®); + if (err) + return err; + + reg &= ~mask; + reg |= (val << shift) & mask; + + return mv88e6393x_port_policy_write(chip, port, ptr, reg); +} diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index e6d0eaa6aa1d..b10e5aebacf6 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -49,6 +49,9 @@ #define MV88E6XXX_PORT_STS_CMODE_2500BASEX 0x000b #define MV88E6XXX_PORT_STS_CMODE_XAUI 0x000c #define MV88E6XXX_PORT_STS_CMODE_RXAUI 0x000d +#define MV88E6393X_PORT_STS_CMODE_5GBASER 0x000c +#define MV88E6393X_PORT_STS_CMODE_10GBASER 0x000d +#define MV88E6393X_PORT_STS_CMODE_USXGMII 0x000e #define MV88E6185_PORT_STS_CDUPLEX 0x0008 #define MV88E6185_PORT_STS_CMODE_MASK 0x0007 #define MV88E6185_PORT_STS_CMODE_GMII_FD 0x0000 @@ -68,6 +71,8 @@ #define MV88E6390_PORT_MAC_CTL_FORCE_SPEED 0x2000 #define MV88E6390_PORT_MAC_CTL_ALTSPEED 0x1000 #define MV88E6352_PORT_MAC_CTL_200BASE 0x1000 +#define MV88E6XXX_PORT_MAC_CTL_EEE 0x0200 +#define MV88E6XXX_PORT_MAC_CTL_FORCE_EEE 0x0100 #define MV88E6185_PORT_MAC_CTL_AN_EN 0x0400 #define MV88E6185_PORT_MAC_CTL_AN_RESTART 0x0200 #define MV88E6185_PORT_MAC_CTL_AN_DONE 0x0100 @@ -117,6 +122,8 @@ #define MV88E6XXX_PORT_SWITCH_ID_PROD_6176 0x1760 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6190 0x1900 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6191 0x1910 +#define MV88E6XXX_PORT_SWITCH_ID_PROD_6191X 0x1920 +#define MV88E6XXX_PORT_SWITCH_ID_PROD_6193X 0x1930 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6185 0x1a70 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6220 0x2200 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6240 0x2400 @@ -129,6 +136,7 @@ #define MV88E6XXX_PORT_SWITCH_ID_PROD_6350 0x3710 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6351 0x3750 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6390 0x3900 +#define MV88E6XXX_PORT_SWITCH_ID_PROD_6393X 0x3930 #define MV88E6XXX_PORT_SWITCH_ID_REV_MASK 0x000f /* Offset 0x04: Port Control Register */ @@ -236,6 +244,19 @@ #define MV88E6XXX_PORT_POLICY_CTL_TRAP 0x0002 #define MV88E6XXX_PORT_POLICY_CTL_DISCARD 0x0003 +/* Offset 0x0E: Policy & MGMT Control Register (FAMILY_6393X) */ +#define MV88E6393X_PORT_POLICY_MGMT_CTL 0x0e +#define MV88E6393X_PORT_POLICY_MGMT_CTL_UPDATE 0x8000 +#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_MASK 0x3f00 +#define MV88E6393X_PORT_POLICY_MGMT_CTL_DATA_MASK 0x00ff +#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000000XLO 0x2000 +#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000000XHI 0x2100 +#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000002XLO 0x2400 +#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000002XHI 0x2500 +#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_INGRESS_DEST 0x3000 +#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_CPU_DEST 0x3800 +#define MV88E6393X_PORT_POLICY_MGMT_CTL_CPU_DEST_MGMTPRI 0x00e0 + /* Offset 0x0F: Port Special Ether Type */ #define MV88E6XXX_PORT_ETH_TYPE 0x0f #define MV88E6XXX_PORT_ETH_TYPE_DEFAULT 0x9100 @@ -243,6 +264,15 @@ /* Offset 0x10: InDiscards Low Counter */ #define MV88E6XXX_PORT_IN_DISCARD_LO 0x10 +/* Offset 0x10: Extended Port Control Command */ +#define MV88E6393X_PORT_EPC_CMD 0x10 +#define MV88E6393X_PORT_EPC_CMD_BUSY 0x8000 +#define MV88E6393X_PORT_EPC_CMD_WRITE 0x0300 +#define MV88E6393X_PORT_EPC_INDEX_PORT_ETYPE 0x02 + +/* Offset 0x11: Extended Port Control Data */ +#define MV88E6393X_PORT_EPC_DATA 0x11 + /* Offset 0x11: InDiscards High Counter */ #define MV88E6XXX_PORT_IN_DISCARD_HI 0x11 @@ -288,6 +318,8 @@ int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg, u16 *val); int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg, u16 val); +int mv88e6xxx_port_wait_bit(struct mv88e6xxx_chip *chip, int port, int reg, + int bit, int val); int mv88e6185_port_set_pause(struct mv88e6xxx_chip *chip, int port, int pause); @@ -315,10 +347,13 @@ int mv88e6390_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port, int speed, int duplex); int mv88e6390x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port, int speed, int duplex); +int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port, + int speed, int duplex); phy_interface_t mv88e6341_port_max_speed_mode(int port); phy_interface_t mv88e6390_port_max_speed_mode(int port); phy_interface_t mv88e6390x_port_max_speed_mode(int port); +phy_interface_t mv88e6393x_port_max_speed_mode(int port); int mv88e6xxx_port_set_state(struct mv88e6xxx_chip *chip, int port, u8 state); @@ -351,8 +386,19 @@ int mv88e6352_port_set_mcast_flood(struct mv88e6xxx_chip *chip, int port, int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port, enum mv88e6xxx_policy_mapping mapping, enum mv88e6xxx_policy_action action); +int mv88e6393x_port_set_policy(struct mv88e6xxx_chip *chip, int port, + enum mv88e6xxx_policy_mapping mapping, + enum mv88e6xxx_policy_action action); int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port, u16 etype); +int mv88e6393x_set_egress_port(struct mv88e6xxx_chip *chip, + enum mv88e6xxx_egress_direction direction, + int port); +int mv88e6393x_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port, + int upstream_port); +int mv88e6393x_port_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); +int mv88e6393x_port_set_ether_type(struct mv88e6xxx_chip *chip, int port, + u16 etype); int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port, bool message_port); int mv88e6xxx_port_set_trunk(struct mv88e6xxx_chip *chip, int port, @@ -361,6 +407,8 @@ int mv88e6165_port_set_jumbo_size(struct mv88e6xxx_chip *chip, int port, size_t size); int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port); int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port); +int mv88e6xxx_port_set_assoc_vector(struct mv88e6xxx_chip *chip, int port, + u16 pav); int mv88e6097_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in, u8 out); int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in, @@ -371,6 +419,8 @@ int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode); int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode); +int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, + phy_interface_t mode); int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode); int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode); int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port); diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index 3195936dc5be..e4fbef81bc52 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -95,7 +95,7 @@ static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, return 0; } -int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, bool up) { u16 val, new_val; @@ -117,7 +117,7 @@ int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, } int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, - u8 lane, unsigned int mode, + int lane, unsigned int mode, phy_interface_t interface, const unsigned long *advertise) { @@ -166,7 +166,7 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, } int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, - u8 lane, struct phylink_link_state *state) + int lane, struct phylink_link_state *state) { u16 lpa, status; int err; @@ -187,7 +187,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, } int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port, - u8 lane) + int lane) { u16 bmcr; int err; @@ -200,7 +200,7 @@ int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port, } int mv88e6352_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port, - u8 lane, int speed, int duplex) + int lane, int speed, int duplex) { u16 val, bmcr; int err; @@ -230,10 +230,10 @@ int mv88e6352_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port, return mv88e6352_serdes_write(chip, MII_BMCR, bmcr); } -u8 mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) +int mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) { u8 cmode = chip->ports[port].cmode; - u8 lane = 0; + int lane = -ENODEV; if ((cmode == MV88E6XXX_PORT_STS_CMODE_100BASEX) || (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX) || @@ -245,7 +245,7 @@ u8 mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) static bool mv88e6352_port_has_serdes(struct mv88e6xxx_chip *chip, int port) { - if (mv88e6xxx_serdes_get_lane(chip, port)) + if (mv88e6xxx_serdes_get_lane(chip, port) >= 0) return true; return false; @@ -354,7 +354,7 @@ static void mv88e6352_serdes_irq_link(struct mv88e6xxx_chip *chip, int port) } irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, - u8 lane) + int lane) { irqreturn_t ret = IRQ_NONE; u16 status; @@ -372,7 +372,7 @@ irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, return ret; } -int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane, bool enable) { u16 val = 0; @@ -413,10 +413,10 @@ void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p) } } -u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) +int mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) { u8 cmode = chip->ports[port].cmode; - u8 lane = 0; + int lane = -ENODEV; switch (port) { case 5: @@ -430,7 +430,7 @@ u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) return lane; } -int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, bool up) { /* The serdes power can't be controlled on this switch chip but we need @@ -440,23 +440,23 @@ int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, return 0; } -u8 mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) +int mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) { /* There are no configurable serdes lanes on this switch chip but we - * need to return non-zero so that callers of + * need to return a non-negative lane number so that callers of * mv88e6xxx_serdes_get_lane() know this is a serdes port. */ switch (chip->ports[port].cmode) { case MV88E6185_PORT_STS_CMODE_SERDES: case MV88E6185_PORT_STS_CMODE_1000BASE_X: - return 0xff; - default: return 0; + default: + return -ENODEV; } } int mv88e6185_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, - u8 lane, struct phylink_link_state *state) + int lane, struct phylink_link_state *state) { int err; u16 status; @@ -492,7 +492,7 @@ int mv88e6185_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, return 0; } -int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane, bool enable) { u8 cmode = chip->ports[port].cmode; @@ -525,7 +525,7 @@ static void mv88e6097_serdes_irq_link(struct mv88e6xxx_chip *chip, int port) } irqreturn_t mv88e6097_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, - u8 lane) + int lane) { u8 cmode = chip->ports[port].cmode; @@ -539,10 +539,10 @@ irqreturn_t mv88e6097_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, return IRQ_NONE; } -u8 mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) +int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) { u8 cmode = chip->ports[port].cmode; - u8 lane = 0; + int lane = -ENODEV; switch (port) { case 9: @@ -562,12 +562,12 @@ u8 mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) return lane; } -u8 mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) +int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) { u8 cmode_port = chip->ports[port].cmode; u8 cmode_port10 = chip->ports[10].cmode; u8 cmode_port9 = chip->ports[9].cmode; - u8 lane = 0; + int lane = -ENODEV; switch (port) { case 2: @@ -637,8 +637,29 @@ u8 mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) return lane; } +/* Only Ports 0, 9 and 10 have SERDES lanes. Return the SERDES lane address + * a port is using else Returns -ENODEV. + */ +int mv88e6393x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) +{ + u8 cmode = chip->ports[port].cmode; + int lane = -ENODEV; + + if (port != 0 && port != 9 && port != 10) + return -EOPNOTSUPP; + + if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX || + cmode == MV88E6XXX_PORT_STS_CMODE_SGMII || + cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX || + cmode == MV88E6393X_PORT_STS_CMODE_5GBASER || + cmode == MV88E6393X_PORT_STS_CMODE_10GBASER) + lane = port; + + return lane; +} + /* Set power up/down for 10GBASE-R and 10GBASE-X4/X2 */ -static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, u8 lane, +static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, int lane, bool up) { u16 val, new_val; @@ -665,7 +686,7 @@ static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, u8 lane, } /* Set power up/down for SGMII and 1000Base-X */ -static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, u8 lane, +static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, int lane, bool up) { u16 val, new_val; @@ -701,7 +722,7 @@ static struct mv88e6390_serdes_hw_stat mv88e6390_serdes_hw_stats[] = { int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port) { - if (mv88e6390_serdes_get_lane(chip, port) == 0) + if (mv88e6390_serdes_get_lane(chip, port) < 0) return 0; return ARRAY_SIZE(mv88e6390_serdes_hw_stats); @@ -713,7 +734,7 @@ int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip, struct mv88e6390_serdes_hw_stat *stat; int i; - if (mv88e6390_serdes_get_lane(chip, port) == 0) + if (mv88e6390_serdes_get_lane(chip, port) < 0) return 0; for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) { @@ -750,7 +771,7 @@ int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, int i; lane = mv88e6390_serdes_get_lane(chip, port); - if (lane == 0) + if (lane < 0) return 0; for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) { @@ -761,7 +782,7 @@ int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, return ARRAY_SIZE(mv88e6390_serdes_hw_stats); } -static int mv88e6390_serdes_enable_checker(struct mv88e6xxx_chip *chip, u8 lane) +static int mv88e6390_serdes_enable_checker(struct mv88e6xxx_chip *chip, int lane) { u16 reg; int err; @@ -776,7 +797,7 @@ static int mv88e6390_serdes_enable_checker(struct mv88e6xxx_chip *chip, u8 lane) MV88E6390_PG_CONTROL, reg); } -int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, bool up) { u8 cmode = chip->ports[port].cmode; @@ -801,7 +822,7 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, } int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, - u8 lane, unsigned int mode, + int lane, unsigned int mode, phy_interface_t interface, const unsigned long *advertise) { @@ -860,7 +881,7 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, } static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip, - int port, u8 lane, struct phylink_link_state *state) + int port, int lane, struct phylink_link_state *state) { u16 lpa, status; int err; @@ -883,7 +904,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip, } static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip, - int port, u8 lane, struct phylink_link_state *state) + int port, int lane, struct phylink_link_state *state) { u16 status; int err; @@ -902,8 +923,32 @@ static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip, return 0; } +static int mv88e6393x_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip, + int port, int lane, + struct phylink_link_state *state) +{ + u16 status; + int err; + + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6390_10G_STAT1, &status); + if (err) + return err; + + state->link = !!(status & MDIO_STAT1_LSTATUS); + if (state->link) { + if (state->interface == PHY_INTERFACE_MODE_5GBASER) + state->speed = SPEED_5000; + else + state->speed = SPEED_10000; + state->duplex = DUPLEX_FULL; + } + + return 0; +} + int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, - u8 lane, struct phylink_link_state *state) + int lane, struct phylink_link_state *state) { switch (state->interface) { case PHY_INTERFACE_MODE_SGMII: @@ -921,8 +966,27 @@ int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, } } +int mv88e6393x_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, + int lane, struct phylink_link_state *state) +{ + switch (state->interface) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + return mv88e6390_serdes_pcs_get_state_sgmii(chip, port, lane, + state); + case PHY_INTERFACE_MODE_5GBASER: + case PHY_INTERFACE_MODE_10GBASER: + return mv88e6393x_serdes_pcs_get_state_10g(chip, port, lane, + state); + + default: + return -EOPNOTSUPP; + } +} + int mv88e6390_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port, - u8 lane) + int lane) { u16 bmcr; int err; @@ -938,7 +1002,7 @@ int mv88e6390_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port, } int mv88e6390_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port, - u8 lane, int speed, int duplex) + int lane, int speed, int duplex) { u16 val, bmcr; int err; @@ -972,7 +1036,7 @@ int mv88e6390_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port, } static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip, - int port, u8 lane) + int port, int lane) { u16 bmsr; int err; @@ -988,8 +1052,25 @@ static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip, dsa_port_phylink_mac_change(chip->ds, port, !!(bmsr & BMSR_LSTATUS)); } +static void mv88e6393x_serdes_irq_link_10g(struct mv88e6xxx_chip *chip, + int port, u8 lane) +{ + u16 status; + int err; + + /* If the link has dropped, we want to know about it. */ + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6390_10G_STAT1, &status); + if (err) { + dev_err(chip->dev, "can't read Serdes STAT1: %d\n", err); + return; + } + + dsa_port_phylink_mac_change(chip->ds, port, !!(status & MDIO_STAT1_LSTATUS)); +} + static int mv88e6390_serdes_irq_enable_sgmii(struct mv88e6xxx_chip *chip, - u8 lane, bool enable) + int lane, bool enable) { u16 val = 0; @@ -1001,7 +1082,7 @@ static int mv88e6390_serdes_irq_enable_sgmii(struct mv88e6xxx_chip *chip, MV88E6390_SGMII_INT_ENABLE, val); } -int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane, bool enable) { u8 cmode = chip->ports[port].cmode; @@ -1017,7 +1098,7 @@ int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane, } static int mv88e6390_serdes_irq_status_sgmii(struct mv88e6xxx_chip *chip, - u8 lane, u16 *status) + int lane, u16 *status) { int err; @@ -1027,8 +1108,85 @@ static int mv88e6390_serdes_irq_status_sgmii(struct mv88e6xxx_chip *chip, return err; } +static int mv88e6393x_serdes_irq_enable_10g(struct mv88e6xxx_chip *chip, + u8 lane, bool enable) +{ + u16 val = 0; + + if (enable) + val |= MV88E6393X_10G_INT_LINK_CHANGE; + + return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_10G_INT_ENABLE, val); +} + +int mv88e6393x_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, + int lane, bool enable) +{ + u8 cmode = chip->ports[port].cmode; + + switch (cmode) { + case MV88E6XXX_PORT_STS_CMODE_SGMII: + case MV88E6XXX_PORT_STS_CMODE_1000BASEX: + case MV88E6XXX_PORT_STS_CMODE_2500BASEX: + return mv88e6390_serdes_irq_enable_sgmii(chip, lane, enable); + case MV88E6393X_PORT_STS_CMODE_5GBASER: + case MV88E6393X_PORT_STS_CMODE_10GBASER: + return mv88e6393x_serdes_irq_enable_10g(chip, lane, enable); + } + + return 0; +} + +static int mv88e6393x_serdes_irq_status_10g(struct mv88e6xxx_chip *chip, + u8 lane, u16 *status) +{ + int err; + + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_10G_INT_STATUS, status); + + return err; +} + +irqreturn_t mv88e6393x_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, + int lane) +{ + u8 cmode = chip->ports[port].cmode; + irqreturn_t ret = IRQ_NONE; + u16 status; + int err; + + switch (cmode) { + case MV88E6XXX_PORT_STS_CMODE_SGMII: + case MV88E6XXX_PORT_STS_CMODE_1000BASEX: + case MV88E6XXX_PORT_STS_CMODE_2500BASEX: + err = mv88e6390_serdes_irq_status_sgmii(chip, lane, &status); + if (err) + return ret; + if (status & (MV88E6390_SGMII_INT_LINK_DOWN | + MV88E6390_SGMII_INT_LINK_UP)) { + ret = IRQ_HANDLED; + mv88e6390_serdes_irq_link_sgmii(chip, port, lane); + } + break; + case MV88E6393X_PORT_STS_CMODE_5GBASER: + case MV88E6393X_PORT_STS_CMODE_10GBASER: + err = mv88e6393x_serdes_irq_status_10g(chip, lane, &status); + if (err) + return err; + if (status & MV88E6393X_10G_INT_LINK_CHANGE) { + ret = IRQ_HANDLED; + mv88e6393x_serdes_irq_link_10g(chip, port, lane); + } + break; + } + + return ret; +} + irqreturn_t mv88e6390_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, - u8 lane) + int lane) { u8 cmode = chip->ports[port].cmode; irqreturn_t ret = IRQ_NONE; @@ -1087,7 +1245,7 @@ static const u16 mv88e6390_serdes_regs[] = { int mv88e6390_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port) { - if (mv88e6xxx_serdes_get_lane(chip, port) == 0) + if (mv88e6xxx_serdes_get_lane(chip, port) < 0) return 0; return ARRAY_SIZE(mv88e6390_serdes_regs) * sizeof(u16); @@ -1102,7 +1260,7 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p) int i; lane = mv88e6xxx_serdes_get_lane(chip, port); - if (lane == 0) + if (lane < 0) return; for (i = 0 ; i < ARRAY_SIZE(mv88e6390_serdes_regs); i++) { @@ -1112,3 +1270,101 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p) p[i] = reg; } } + +static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane) +{ + u16 reg, pcs; + int err; + + /* mv88e6393x family errata 4.6: + * Cannot clear PwrDn bit on SERDES on port 0 if device is configured + * CPU_MGD mode or P0_mode is configured for [x]MII. + * Workaround: Set Port0 SERDES register 4.F002 bit 5=0 and bit 15=1. + * + * It seems that after this workaround the SERDES is automatically + * powered up (the bit is cleared), so power it down. + */ + if (lane == MV88E6393X_PORT0_LANE) { + err = mv88e6390_serdes_read(chip, MV88E6393X_PORT0_LANE, + MDIO_MMD_PHYXS, + MV88E6393X_SERDES_POC, ®); + if (err) + return err; + + reg &= ~MV88E6393X_SERDES_POC_PDOWN; + reg |= MV88E6393X_SERDES_POC_RESET; + + err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_SERDES_POC, reg); + if (err) + return err; + + err = mv88e6390_serdes_power_sgmii(chip, lane, false); + if (err) + return err; + } + + /* mv88e6393x family errata 4.8: + * When a SERDES port is operating in 1000BASE-X or SGMII mode link may + * not come up after hardware reset or software reset of SERDES core. + * Workaround is to write SERDES register 4.F074.14=1 for only those + * modes and 0 in all other modes. + */ + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_SERDES_POC, &pcs); + if (err) + return err; + + pcs &= MV88E6393X_SERDES_POC_PCS_MASK; + + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_ERRATA_4_8_REG, ®); + if (err) + return err; + + if (pcs == MV88E6393X_SERDES_POC_PCS_1000BASEX || + pcs == MV88E6393X_SERDES_POC_PCS_SGMII_PHY || + pcs == MV88E6393X_SERDES_POC_PCS_SGMII_MAC) + reg |= MV88E6393X_ERRATA_4_8_BIT; + else + reg &= ~MV88E6393X_ERRATA_4_8_BIT; + + return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_ERRATA_4_8_REG, reg); +} + +int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip) +{ + int err; + + err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT0_LANE); + if (err) + return err; + + err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT9_LANE); + if (err) + return err; + + return mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT10_LANE); +} + +int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, + bool on) +{ + u8 cmode = chip->ports[port].cmode; + + if (port != 0 && port != 9 && port != 10) + return -EOPNOTSUPP; + + switch (cmode) { + case MV88E6XXX_PORT_STS_CMODE_SGMII: + case MV88E6XXX_PORT_STS_CMODE_1000BASEX: + case MV88E6XXX_PORT_STS_CMODE_2500BASEX: + return mv88e6390_serdes_power_sgmii(chip, lane, on); + case MV88E6393X_PORT_STS_CMODE_5GBASER: + case MV88E6393X_PORT_STS_CMODE_10GBASER: + return mv88e6390_serdes_power_10g(chip, lane, on); + } + + return 0; +} diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h index 93822ef9bab8..cbb3ba30caea 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.h +++ b/drivers/net/dsa/mv88e6xxx/serdes.h @@ -42,6 +42,9 @@ /* 10GBASE-R and 10GBASE-X4/X2 */ #define MV88E6390_10G_CTRL1 (0x1000 + MDIO_CTRL1) #define MV88E6390_10G_STAT1 (0x1000 + MDIO_STAT1) +#define MV88E6393X_10G_INT_ENABLE 0x9000 +#define MV88E6393X_10G_INT_LINK_CHANGE BIT(2) +#define MV88E6393X_10G_INT_STATUS 0x9001 /* 1000BASE-X and SGMII */ #define MV88E6390_SGMII_BMCR (0x2000 + MII_BMCR) @@ -73,55 +76,86 @@ #define MV88E6390_PG_CONTROL 0xf010 #define MV88E6390_PG_CONTROL_ENABLE_PC BIT(0) -u8 mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); -u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); -u8 mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); -u8 mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); -u8 mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); +#define MV88E6393X_PORT0_LANE 0x00 +#define MV88E6393X_PORT9_LANE 0x09 +#define MV88E6393X_PORT10_LANE 0x0a + +/* Port Operational Configuration */ +#define MV88E6393X_SERDES_POC 0xf002 +#define MV88E6393X_SERDES_POC_PCS_1000BASEX 0x0000 +#define MV88E6393X_SERDES_POC_PCS_2500BASEX 0x0001 +#define MV88E6393X_SERDES_POC_PCS_SGMII_PHY 0x0002 +#define MV88E6393X_SERDES_POC_PCS_SGMII_MAC 0x0003 +#define MV88E6393X_SERDES_POC_PCS_5GBASER 0x0004 +#define MV88E6393X_SERDES_POC_PCS_10GBASER 0x0005 +#define MV88E6393X_SERDES_POC_PCS_USXGMII_PHY 0x0006 +#define MV88E6393X_SERDES_POC_PCS_USXGMII_MAC 0x0007 +#define MV88E6393X_SERDES_POC_PCS_MASK 0x0007 +#define MV88E6393X_SERDES_POC_RESET BIT(15) +#define MV88E6393X_SERDES_POC_PDOWN BIT(5) + +#define MV88E6393X_ERRATA_4_8_REG 0xF074 +#define MV88E6393X_ERRATA_4_8_BIT BIT(14) + +int mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); +int mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); +int mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); +int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); +int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); +int mv88e6393x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, - u8 lane, unsigned int mode, + int lane, unsigned int mode, phy_interface_t interface, const unsigned long *advertise); int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, - u8 lane, unsigned int mode, + int lane, unsigned int mode, phy_interface_t interface, const unsigned long *advertise); int mv88e6185_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, - u8 lane, struct phylink_link_state *state); + int lane, struct phylink_link_state *state); int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, - u8 lane, struct phylink_link_state *state); + int lane, struct phylink_link_state *state); int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, - u8 lane, struct phylink_link_state *state); + int lane, struct phylink_link_state *state); +int mv88e6393x_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, + int lane, struct phylink_link_state *state); int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port, - u8 lane); + int lane); int mv88e6390_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port, - u8 lane); + int lane); int mv88e6352_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port, - u8 lane, int speed, int duplex); + int lane, int speed, int duplex); int mv88e6390_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port, - u8 lane, int speed, int duplex); + int lane, int speed, int duplex); unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port); unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port); -int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, bool up); -int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, bool on); -int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, bool on); -int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, + bool on); +int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip); +int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane, bool enable); -int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane, bool enable); -int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane, bool enable); +int mv88e6393x_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, + int lane, bool enable); irqreturn_t mv88e6097_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, - u8 lane); + int lane); irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, - u8 lane); + int lane); irqreturn_t mv88e6390_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, - u8 lane); + int lane); +irqreturn_t mv88e6393x_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, + int lane); int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port); int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip, int port, uint8_t *data); @@ -138,18 +172,18 @@ void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p); int mv88e6390_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port); void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p); -/* Return the (first) SERDES lane address a port is using, 0 otherwise. */ -static inline u8 mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip, - int port) +/* Return the (first) SERDES lane address a port is using, -errno otherwise. */ +static inline int mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip, + int port) { if (!chip->info->ops->serdes_get_lane) - return 0; + return -EOPNOTSUPP; return chip->info->ops->serdes_get_lane(chip, port); } static inline int mv88e6xxx_serdes_power_up(struct mv88e6xxx_chip *chip, - int port, u8 lane) + int port, int lane) { if (!chip->info->ops->serdes_power) return -EOPNOTSUPP; @@ -158,7 +192,7 @@ static inline int mv88e6xxx_serdes_power_up(struct mv88e6xxx_chip *chip, } static inline int mv88e6xxx_serdes_power_down(struct mv88e6xxx_chip *chip, - int port, u8 lane) + int port, int lane) { if (!chip->info->ops->serdes_power) return -EOPNOTSUPP; @@ -176,7 +210,7 @@ mv88e6xxx_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port) } static inline int mv88e6xxx_serdes_irq_enable(struct mv88e6xxx_chip *chip, - int port, u8 lane) + int port, int lane) { if (!chip->info->ops->serdes_irq_enable) return -EOPNOTSUPP; @@ -185,7 +219,7 @@ static inline int mv88e6xxx_serdes_irq_enable(struct mv88e6xxx_chip *chip, } static inline int mv88e6xxx_serdes_irq_disable(struct mv88e6xxx_chip *chip, - int port, u8 lane) + int port, int lane) { if (!chip->info->ops->serdes_irq_enable) return -EOPNOTSUPP; @@ -194,7 +228,7 @@ static inline int mv88e6xxx_serdes_irq_disable(struct mv88e6xxx_chip *chip, } static inline irqreturn_t -mv88e6xxx_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, u8 lane) +mv88e6xxx_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, int lane) { if (!chip->info->ops->serdes_irq_status) return IRQ_NONE; diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 628afb47b579..ce607fbaaa3a 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -719,7 +719,9 @@ static int felix_bridge_join(struct dsa_switch *ds, int port, { struct ocelot *ocelot = ds->priv; - return ocelot_port_bridge_join(ocelot, port, br); + ocelot_port_bridge_join(ocelot, port, br); + + return 0; } static void felix_bridge_leave(struct dsa_switch *ds, int port, @@ -1393,19 +1395,20 @@ static bool felix_rxtstamp(struct dsa_switch *ds, int port, return false; } -static bool felix_txtstamp(struct dsa_switch *ds, int port, - struct sk_buff *clone, unsigned int type) +static void felix_txtstamp(struct dsa_switch *ds, int port, + struct sk_buff *skb) { struct ocelot *ocelot = ds->priv; - struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct sk_buff *clone = NULL; - if (ocelot->ptp && (skb_shinfo(clone)->tx_flags & SKBTX_HW_TSTAMP) && - ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) { - ocelot_port_add_txtstamp_skb(ocelot, port, clone); - return true; - } + if (!ocelot->ptp) + return; - return false; + if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) + return; + + if (clone) + OCELOT_SKB_CB(skb)->clone = clone; } static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu) diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 5ff623ee76a6..2473bebe48e6 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -1057,10 +1057,8 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) res.end += felix->imdio_base; imdio_regs = devm_ioremap_resource(dev, &res); - if (IS_ERR(imdio_regs)) { - dev_err(dev, "failed to map internal MDIO registers\n"); + if (IS_ERR(imdio_regs)) return PTR_ERR(imdio_regs); - } hw = enetc_hw_alloc(dev, imdio_regs); if (IS_ERR(hw)) { @@ -1229,8 +1227,12 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port, if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX) return -ERANGE; - ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port) | - QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q, + /* Set port num and disable ALWAYS_GUARD_BAND_SCH_Q, which means set + * guard band to be implemented for nonschedule queues to schedule + * queues transition. + */ + ocelot_rmw(ocelot, + QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port), QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M | QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q, QSYS_TAS_PARAM_CFG_CTRL); diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c index 12e76020bea3..973761132fc3 100644 --- a/drivers/net/dsa/sja1105/sja1105_flower.c +++ b/drivers/net/dsa/sja1105/sja1105_flower.c @@ -317,11 +317,16 @@ int sja1105_cls_flower_add(struct dsa_switch *ds, int port, if (rc) return rc; - rc = -EOPNOTSUPP; - flow_action_for_each(i, act, &rule->action) { switch (act->id) { case FLOW_ACTION_POLICE: + if (act->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, + "QoS offload not support packets per second"); + rc = -EOPNOTSUPP; + goto out; + } + rc = sja1105_flower_policer(priv, port, extack, cookie, &key, act->police.rate_bytes_ps, diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 51ea104c63bb..405024b637d6 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -3049,21 +3049,6 @@ static void sja1105_teardown(struct dsa_switch *ds) } } -static int sja1105_port_enable(struct dsa_switch *ds, int port, - struct phy_device *phy) -{ - struct net_device *slave; - - if (!dsa_is_user_port(ds, port)) - return 0; - - slave = dsa_to_port(ds, port)->slave; - - slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; - - return 0; -} - static void sja1105_port_disable(struct dsa_switch *ds, int port) { struct sja1105_private *priv = ds->priv; @@ -3152,7 +3137,7 @@ static void sja1105_port_deferred_xmit(struct kthread_work *work) struct sk_buff *skb; while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) { - struct sk_buff *clone = DSA_SKB_CB(skb)->clone; + struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone; mutex_lock(&priv->mgmt_lock); @@ -3491,7 +3476,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = { .get_ethtool_stats = sja1105_get_ethtool_stats, .get_sset_count = sja1105_get_sset_count, .get_ts_info = sja1105_get_ts_info, - .port_enable = sja1105_port_enable, .port_disable = sja1105_port_disable, .port_fdb_dump = sja1105_fdb_dump, .port_fdb_add = sja1105_fdb_add, diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c index 1b90570b257b..0bc566b9e958 100644 --- a/drivers/net/dsa/sja1105/sja1105_ptp.c +++ b/drivers/net/dsa/sja1105/sja1105_ptp.c @@ -431,20 +431,24 @@ bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port, return true; } -/* Called from dsa_skb_tx_timestamp. This callback is just to make DSA clone - * the skb and have it available in DSA_SKB_CB in the .port_deferred_xmit +/* Called from dsa_skb_tx_timestamp. This callback is just to clone + * the skb and have it available in SJA1105_SKB_CB in the .port_deferred_xmit * callback, where we will timestamp it synchronously. */ -bool sja1105_port_txtstamp(struct dsa_switch *ds, int port, - struct sk_buff *skb, unsigned int type) +void sja1105_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb) { struct sja1105_private *priv = ds->priv; struct sja1105_port *sp = &priv->ports[port]; + struct sk_buff *clone; if (!sp->hwts_tx_en) - return false; + return; - return true; + clone = skb_clone_sk(skb); + if (!clone) + return; + + SJA1105_SKB_CB(skb)->clone = clone; } static int sja1105_ptp_reset(struct dsa_switch *ds) diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h index 3daa33e98e77..34f97f58a355 100644 --- a/drivers/net/dsa/sja1105/sja1105_ptp.h +++ b/drivers/net/dsa/sja1105/sja1105_ptp.h @@ -104,8 +104,8 @@ void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot, bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb, unsigned int type); -bool sja1105_port_txtstamp(struct dsa_switch *ds, int port, - struct sk_buff *skb, unsigned int type); +void sja1105_port_txtstamp(struct dsa_switch *ds, int port, + struct sk_buff *skb); int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr); diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index 53e1f7e07959..96cc5fc36eb5 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -1051,6 +1051,7 @@ el3_netdev_get_ecmd(struct net_device *dev, struct ethtool_link_ksettings *cmd) break; case 3: cmd->base.port = PORT_BNC; + break; default: break; } diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index ad04660b97b8..1cdff1dca790 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -19,6 +19,7 @@ config SUNGEM_PHY tristate source "drivers/net/ethernet/3com/Kconfig" +source "drivers/net/ethernet/actions/Kconfig" source "drivers/net/ethernet/adaptec/Kconfig" source "drivers/net/ethernet/aeroflex/Kconfig" source "drivers/net/ethernet/agere/Kconfig" @@ -81,6 +82,7 @@ source "drivers/net/ethernet/huawei/Kconfig" source "drivers/net/ethernet/i825xx/Kconfig" source "drivers/net/ethernet/ibm/Kconfig" source "drivers/net/ethernet/intel/Kconfig" +source "drivers/net/ethernet/microsoft/Kconfig" source "drivers/net/ethernet/xscale/Kconfig" config JME @@ -97,7 +99,8 @@ config JME config KORINA tristate "Korina (IDT RC32434) Ethernet support" - depends on MIKROTIK_RB532 + depends on MIKROTIK_RB532 || COMPILE_TEST + select MII help If you have a Mikrotik RouterBoard 500 or IDT RC32434 based system say Y. Otherwise say N. diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 1e7dc8a7762d..cb3f9084a21b 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_NET_VENDOR_3COM) += 3com/ obj-$(CONFIG_NET_VENDOR_8390) += 8390/ +obj-$(CONFIG_NET_VENDOR_ACTIONS) += actions/ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/ obj-$(CONFIG_GRETH) += aeroflex/ obj-$(CONFIG_NET_VENDOR_AGERE) += agere/ @@ -44,6 +45,7 @@ obj-$(CONFIG_NET_VENDOR_HUAWEI) += huawei/ obj-$(CONFIG_NET_VENDOR_IBM) += ibm/ obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/ +obj-$(CONFIG_NET_VENDOR_MICROSOFT) += microsoft/ obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/ obj-$(CONFIG_JME) += jme.o obj-$(CONFIG_KORINA) += korina.o diff --git a/drivers/net/ethernet/actions/Kconfig b/drivers/net/ethernet/actions/Kconfig new file mode 100644 index 000000000000..ccad6a3f4d6f --- /dev/null +++ b/drivers/net/ethernet/actions/Kconfig @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config NET_VENDOR_ACTIONS + bool "Actions Semi devices" + default y + depends on ARCH_ACTIONS + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all the + questions about Actions Semi devices. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_ACTIONS + +config OWL_EMAC + tristate "Actions Semi Owl Ethernet MAC support" + select PHYLIB + help + This driver supports the Actions Semi Ethernet Media Access + Controller (EMAC) found on the S500 and S900 SoCs. The controller + is compliant with the IEEE 802.3 CSMA/CD standard and supports + both half-duplex and full-duplex operation modes at 10/100 Mb/s. + +endif # NET_VENDOR_ACTIONS diff --git a/drivers/net/ethernet/actions/Makefile b/drivers/net/ethernet/actions/Makefile new file mode 100644 index 000000000000..fde8001d538a --- /dev/null +++ b/drivers/net/ethernet/actions/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the Actions Semi Owl SoCs built-in ethernet macs +# + +obj-$(CONFIG_OWL_EMAC) += owl-emac.o diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c new file mode 100644 index 000000000000..b8e771c2bc40 --- /dev/null +++ b/drivers/net/ethernet/actions/owl-emac.c @@ -0,0 +1,1625 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Actions Semi Owl SoCs Ethernet MAC driver + * + * Copyright (c) 2012 Actions Semi Inc. + * Copyright (c) 2021 Cristian Ciocaltea <cristian.ciocaltea@gmail.com> + */ + +#include <linux/circ_buf.h> +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/reset.h> + +#include "owl-emac.h" + +#define OWL_EMAC_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK) + +static u32 owl_emac_reg_read(struct owl_emac_priv *priv, u32 reg) +{ + return readl(priv->base + reg); +} + +static void owl_emac_reg_write(struct owl_emac_priv *priv, u32 reg, u32 data) +{ + writel(data, priv->base + reg); +} + +static u32 owl_emac_reg_update(struct owl_emac_priv *priv, + u32 reg, u32 mask, u32 val) +{ + u32 data, old_val; + + data = owl_emac_reg_read(priv, reg); + old_val = data & mask; + + data &= ~mask; + data |= val & mask; + + owl_emac_reg_write(priv, reg, data); + + return old_val; +} + +static void owl_emac_reg_set(struct owl_emac_priv *priv, u32 reg, u32 bits) +{ + owl_emac_reg_update(priv, reg, bits, bits); +} + +static void owl_emac_reg_clear(struct owl_emac_priv *priv, u32 reg, u32 bits) +{ + owl_emac_reg_update(priv, reg, bits, 0); +} + +static struct device *owl_emac_get_dev(struct owl_emac_priv *priv) +{ + return priv->netdev->dev.parent; +} + +static void owl_emac_irq_enable(struct owl_emac_priv *priv) +{ + /* Enable all interrupts except TU. + * + * Note the NIE and AIE bits shall also be set in order to actually + * enable the selected interrupts. + */ + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR7, + OWL_EMAC_BIT_MAC_CSR7_NIE | + OWL_EMAC_BIT_MAC_CSR7_AIE | + OWL_EMAC_BIT_MAC_CSR7_ALL_NOT_TUE); +} + +static void owl_emac_irq_disable(struct owl_emac_priv *priv) +{ + /* Disable all interrupts. + * + * WARNING: Unset only the NIE and AIE bits in CSR7 to workaround an + * unexpected side effect (MAC hardware bug?!) where some bits in the + * status register (CSR5) are cleared automatically before being able + * to read them via owl_emac_irq_clear(). + */ + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR7, + OWL_EMAC_BIT_MAC_CSR7_ALL_NOT_TUE); +} + +static u32 owl_emac_irq_status(struct owl_emac_priv *priv) +{ + return owl_emac_reg_read(priv, OWL_EMAC_REG_MAC_CSR5); +} + +static u32 owl_emac_irq_clear(struct owl_emac_priv *priv) +{ + u32 val = owl_emac_irq_status(priv); + + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR5, val); + + return val; +} + +static dma_addr_t owl_emac_dma_map_rx(struct owl_emac_priv *priv, + struct sk_buff *skb) +{ + struct device *dev = owl_emac_get_dev(priv); + + /* Buffer pointer for the RX DMA descriptor must be word aligned. */ + return dma_map_single(dev, skb_tail_pointer(skb), + skb_tailroom(skb), DMA_FROM_DEVICE); +} + +static void owl_emac_dma_unmap_rx(struct owl_emac_priv *priv, + struct sk_buff *skb, dma_addr_t dma_addr) +{ + struct device *dev = owl_emac_get_dev(priv); + + dma_unmap_single(dev, dma_addr, skb_tailroom(skb), DMA_FROM_DEVICE); +} + +static dma_addr_t owl_emac_dma_map_tx(struct owl_emac_priv *priv, + struct sk_buff *skb) +{ + struct device *dev = owl_emac_get_dev(priv); + + return dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); +} + +static void owl_emac_dma_unmap_tx(struct owl_emac_priv *priv, + struct sk_buff *skb, dma_addr_t dma_addr) +{ + struct device *dev = owl_emac_get_dev(priv); + + dma_unmap_single(dev, dma_addr, skb_headlen(skb), DMA_TO_DEVICE); +} + +static unsigned int owl_emac_ring_num_unused(struct owl_emac_ring *ring) +{ + return CIRC_SPACE(ring->head, ring->tail, ring->size); +} + +static unsigned int owl_emac_ring_get_next(struct owl_emac_ring *ring, + unsigned int cur) +{ + return (cur + 1) & (ring->size - 1); +} + +static void owl_emac_ring_push_head(struct owl_emac_ring *ring) +{ + ring->head = owl_emac_ring_get_next(ring, ring->head); +} + +static void owl_emac_ring_pop_tail(struct owl_emac_ring *ring) +{ + ring->tail = owl_emac_ring_get_next(ring, ring->tail); +} + +static struct sk_buff *owl_emac_alloc_skb(struct net_device *netdev) +{ + struct sk_buff *skb; + int offset; + + skb = netdev_alloc_skb(netdev, OWL_EMAC_RX_FRAME_MAX_LEN + + OWL_EMAC_SKB_RESERVE); + if (unlikely(!skb)) + return NULL; + + /* Ensure 4 bytes DMA alignment. */ + offset = ((uintptr_t)skb->data) & (OWL_EMAC_SKB_ALIGN - 1); + if (unlikely(offset)) + skb_reserve(skb, OWL_EMAC_SKB_ALIGN - offset); + + return skb; +} + +static int owl_emac_ring_prepare_rx(struct owl_emac_priv *priv) +{ + struct owl_emac_ring *ring = &priv->rx_ring; + struct device *dev = owl_emac_get_dev(priv); + struct net_device *netdev = priv->netdev; + struct owl_emac_ring_desc *desc; + struct sk_buff *skb; + dma_addr_t dma_addr; + int i; + + for (i = 0; i < ring->size; i++) { + skb = owl_emac_alloc_skb(netdev); + if (!skb) + return -ENOMEM; + + dma_addr = owl_emac_dma_map_rx(priv, skb); + if (dma_mapping_error(dev, dma_addr)) { + dev_kfree_skb(skb); + return -ENOMEM; + } + + desc = &ring->descs[i]; + desc->status = OWL_EMAC_BIT_RDES0_OWN; + desc->control = skb_tailroom(skb) & OWL_EMAC_MSK_RDES1_RBS1; + desc->buf_addr = dma_addr; + desc->reserved = 0; + + ring->skbs[i] = skb; + ring->skbs_dma[i] = dma_addr; + } + + desc->control |= OWL_EMAC_BIT_RDES1_RER; + + ring->head = 0; + ring->tail = 0; + + return 0; +} + +static void owl_emac_ring_prepare_tx(struct owl_emac_priv *priv) +{ + struct owl_emac_ring *ring = &priv->tx_ring; + struct owl_emac_ring_desc *desc; + int i; + + for (i = 0; i < ring->size; i++) { + desc = &ring->descs[i]; + + desc->status = 0; + desc->control = OWL_EMAC_BIT_TDES1_IC; + desc->buf_addr = 0; + desc->reserved = 0; + } + + desc->control |= OWL_EMAC_BIT_TDES1_TER; + + memset(ring->skbs_dma, 0, sizeof(dma_addr_t) * ring->size); + + ring->head = 0; + ring->tail = 0; +} + +static void owl_emac_ring_unprepare_rx(struct owl_emac_priv *priv) +{ + struct owl_emac_ring *ring = &priv->rx_ring; + int i; + + for (i = 0; i < ring->size; i++) { + ring->descs[i].status = 0; + + if (!ring->skbs_dma[i]) + continue; + + owl_emac_dma_unmap_rx(priv, ring->skbs[i], ring->skbs_dma[i]); + ring->skbs_dma[i] = 0; + + dev_kfree_skb(ring->skbs[i]); + ring->skbs[i] = NULL; + } +} + +static void owl_emac_ring_unprepare_tx(struct owl_emac_priv *priv) +{ + struct owl_emac_ring *ring = &priv->tx_ring; + int i; + + for (i = 0; i < ring->size; i++) { + ring->descs[i].status = 0; + + if (!ring->skbs_dma[i]) + continue; + + owl_emac_dma_unmap_tx(priv, ring->skbs[i], ring->skbs_dma[i]); + ring->skbs_dma[i] = 0; + + dev_kfree_skb(ring->skbs[i]); + ring->skbs[i] = NULL; + } +} + +static int owl_emac_ring_alloc(struct device *dev, struct owl_emac_ring *ring, + unsigned int size) +{ + ring->descs = dmam_alloc_coherent(dev, + sizeof(struct owl_emac_ring_desc) * size, + &ring->descs_dma, GFP_KERNEL); + if (!ring->descs) + return -ENOMEM; + + ring->skbs = devm_kcalloc(dev, size, sizeof(struct sk_buff *), + GFP_KERNEL); + if (!ring->skbs) + return -ENOMEM; + + ring->skbs_dma = devm_kcalloc(dev, size, sizeof(dma_addr_t), + GFP_KERNEL); + if (!ring->skbs_dma) + return -ENOMEM; + + ring->size = size; + + return 0; +} + +static void owl_emac_dma_cmd_resume_rx(struct owl_emac_priv *priv) +{ + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR2, + OWL_EMAC_VAL_MAC_CSR2_RPD); +} + +static void owl_emac_dma_cmd_resume_tx(struct owl_emac_priv *priv) +{ + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR1, + OWL_EMAC_VAL_MAC_CSR1_TPD); +} + +static u32 owl_emac_dma_cmd_set_tx(struct owl_emac_priv *priv, u32 status) +{ + return owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6, + OWL_EMAC_BIT_MAC_CSR6_ST, status); +} + +static u32 owl_emac_dma_cmd_start_tx(struct owl_emac_priv *priv) +{ + return owl_emac_dma_cmd_set_tx(priv, ~0); +} + +static u32 owl_emac_dma_cmd_set(struct owl_emac_priv *priv, u32 status) +{ + return owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6, + OWL_EMAC_MSK_MAC_CSR6_STSR, status); +} + +static u32 owl_emac_dma_cmd_start(struct owl_emac_priv *priv) +{ + return owl_emac_dma_cmd_set(priv, ~0); +} + +static u32 owl_emac_dma_cmd_stop(struct owl_emac_priv *priv) +{ + return owl_emac_dma_cmd_set(priv, 0); +} + +static void owl_emac_set_hw_mac_addr(struct net_device *netdev) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + u8 *mac_addr = netdev->dev_addr; + u32 addr_high, addr_low; + + addr_high = mac_addr[0] << 8 | mac_addr[1]; + addr_low = mac_addr[2] << 24 | mac_addr[3] << 16 | + mac_addr[4] << 8 | mac_addr[5]; + + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR17, addr_high); + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR16, addr_low); +} + +static void owl_emac_update_link_state(struct owl_emac_priv *priv) +{ + u32 val, status; + + if (priv->pause) { + val = OWL_EMAC_BIT_MAC_CSR20_FCE | OWL_EMAC_BIT_MAC_CSR20_TUE; + val |= OWL_EMAC_BIT_MAC_CSR20_TPE | OWL_EMAC_BIT_MAC_CSR20_RPE; + val |= OWL_EMAC_BIT_MAC_CSR20_BPE; + } else { + val = 0; + } + + /* Update flow control. */ + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR20, val); + + val = (priv->speed == SPEED_100) ? OWL_EMAC_VAL_MAC_CSR6_SPEED_100M : + OWL_EMAC_VAL_MAC_CSR6_SPEED_10M; + val <<= OWL_EMAC_OFF_MAC_CSR6_SPEED; + + if (priv->duplex == DUPLEX_FULL) + val |= OWL_EMAC_BIT_MAC_CSR6_FD; + + spin_lock_bh(&priv->lock); + + /* Temporarily stop DMA TX & RX. */ + status = owl_emac_dma_cmd_stop(priv); + + /* Update operation modes. */ + owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6, + OWL_EMAC_MSK_MAC_CSR6_SPEED | + OWL_EMAC_BIT_MAC_CSR6_FD, val); + + /* Restore DMA TX & RX status. */ + owl_emac_dma_cmd_set(priv, status); + + spin_unlock_bh(&priv->lock); +} + +static void owl_emac_adjust_link(struct net_device *netdev) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + struct phy_device *phydev = netdev->phydev; + bool state_changed = false; + + if (phydev->link) { + if (!priv->link) { + priv->link = phydev->link; + state_changed = true; + } + + if (priv->speed != phydev->speed) { + priv->speed = phydev->speed; + state_changed = true; + } + + if (priv->duplex != phydev->duplex) { + priv->duplex = phydev->duplex; + state_changed = true; + } + + if (priv->pause != phydev->pause) { + priv->pause = phydev->pause; + state_changed = true; + } + } else { + if (priv->link) { + priv->link = phydev->link; + state_changed = true; + } + } + + if (state_changed) { + if (phydev->link) + owl_emac_update_link_state(priv); + + if (netif_msg_link(priv)) + phy_print_status(phydev); + } +} + +static irqreturn_t owl_emac_handle_irq(int irq, void *data) +{ + struct net_device *netdev = data; + struct owl_emac_priv *priv = netdev_priv(netdev); + + if (netif_running(netdev)) { + owl_emac_irq_disable(priv); + napi_schedule(&priv->napi); + } + + return IRQ_HANDLED; +} + +static void owl_emac_ether_addr_push(u8 **dst, const u8 *src) +{ + u32 *a = (u32 *)(*dst); + const u16 *b = (const u16 *)src; + + a[0] = b[0]; + a[1] = b[1]; + a[2] = b[2]; + + *dst += 12; +} + +static void +owl_emac_setup_frame_prepare(struct owl_emac_priv *priv, struct sk_buff *skb) +{ + const u8 bcast_addr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + const u8 *mac_addr = priv->netdev->dev_addr; + u8 *frame; + int i; + + skb_put(skb, OWL_EMAC_SETUP_FRAME_LEN); + + frame = skb->data; + memset(frame, 0, skb->len); + + owl_emac_ether_addr_push(&frame, mac_addr); + owl_emac_ether_addr_push(&frame, bcast_addr); + + /* Fill multicast addresses. */ + WARN_ON(priv->mcaddr_list.count >= OWL_EMAC_MAX_MULTICAST_ADDRS); + for (i = 0; i < priv->mcaddr_list.count; i++) { + mac_addr = priv->mcaddr_list.addrs[i]; + owl_emac_ether_addr_push(&frame, mac_addr); + } +} + +/* The setup frame is a special descriptor which is used to provide physical + * addresses (i.e. mac, broadcast and multicast) to the MAC hardware for + * filtering purposes. To be recognized as a setup frame, the TDES1_SET bit + * must be set in the TX descriptor control field. + */ +static int owl_emac_setup_frame_xmit(struct owl_emac_priv *priv) +{ + struct owl_emac_ring *ring = &priv->tx_ring; + struct net_device *netdev = priv->netdev; + struct owl_emac_ring_desc *desc; + struct sk_buff *skb; + unsigned int tx_head; + u32 status, control; + dma_addr_t dma_addr; + int ret; + + skb = owl_emac_alloc_skb(netdev); + if (!skb) + return -ENOMEM; + + owl_emac_setup_frame_prepare(priv, skb); + + dma_addr = owl_emac_dma_map_tx(priv, skb); + if (dma_mapping_error(owl_emac_get_dev(priv), dma_addr)) { + ret = -ENOMEM; + goto err_free_skb; + } + + spin_lock_bh(&priv->lock); + + tx_head = ring->head; + desc = &ring->descs[tx_head]; + + status = READ_ONCE(desc->status); + control = READ_ONCE(desc->control); + dma_rmb(); /* Ensure data has been read before used. */ + + if (unlikely(status & OWL_EMAC_BIT_TDES0_OWN) || + !owl_emac_ring_num_unused(ring)) { + spin_unlock_bh(&priv->lock); + owl_emac_dma_unmap_tx(priv, skb, dma_addr); + ret = -EBUSY; + goto err_free_skb; + } + + ring->skbs[tx_head] = skb; + ring->skbs_dma[tx_head] = dma_addr; + + control &= OWL_EMAC_BIT_TDES1_IC | OWL_EMAC_BIT_TDES1_TER; /* Maintain bits */ + control |= OWL_EMAC_BIT_TDES1_SET; + control |= OWL_EMAC_MSK_TDES1_TBS1 & skb->len; + + WRITE_ONCE(desc->control, control); + WRITE_ONCE(desc->buf_addr, dma_addr); + dma_wmb(); /* Flush descriptor before changing ownership. */ + WRITE_ONCE(desc->status, OWL_EMAC_BIT_TDES0_OWN); + + owl_emac_ring_push_head(ring); + + /* Temporarily enable DMA TX. */ + status = owl_emac_dma_cmd_start_tx(priv); + + /* Trigger setup frame processing. */ + owl_emac_dma_cmd_resume_tx(priv); + + /* Restore DMA TX status. */ + owl_emac_dma_cmd_set_tx(priv, status); + + /* Stop regular TX until setup frame is processed. */ + netif_stop_queue(netdev); + + spin_unlock_bh(&priv->lock); + + return 0; + +err_free_skb: + dev_kfree_skb(skb); + return ret; +} + +static netdev_tx_t owl_emac_ndo_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + struct device *dev = owl_emac_get_dev(priv); + struct owl_emac_ring *ring = &priv->tx_ring; + struct owl_emac_ring_desc *desc; + unsigned int tx_head; + u32 status, control; + dma_addr_t dma_addr; + + dma_addr = owl_emac_dma_map_tx(priv, skb); + if (dma_mapping_error(dev, dma_addr)) { + dev_err_ratelimited(&netdev->dev, "TX DMA mapping failed\n"); + dev_kfree_skb(skb); + netdev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + spin_lock_bh(&priv->lock); + + tx_head = ring->head; + desc = &ring->descs[tx_head]; + + status = READ_ONCE(desc->status); + control = READ_ONCE(desc->control); + dma_rmb(); /* Ensure data has been read before used. */ + + if (!owl_emac_ring_num_unused(ring) || + unlikely(status & OWL_EMAC_BIT_TDES0_OWN)) { + netif_stop_queue(netdev); + spin_unlock_bh(&priv->lock); + + dev_dbg_ratelimited(&netdev->dev, "TX buffer full, status=0x%08x\n", + owl_emac_irq_status(priv)); + owl_emac_dma_unmap_tx(priv, skb, dma_addr); + netdev->stats.tx_dropped++; + return NETDEV_TX_BUSY; + } + + ring->skbs[tx_head] = skb; + ring->skbs_dma[tx_head] = dma_addr; + + control &= OWL_EMAC_BIT_TDES1_IC | OWL_EMAC_BIT_TDES1_TER; /* Maintain bits */ + control |= OWL_EMAC_BIT_TDES1_FS | OWL_EMAC_BIT_TDES1_LS; + control |= OWL_EMAC_MSK_TDES1_TBS1 & skb->len; + + WRITE_ONCE(desc->control, control); + WRITE_ONCE(desc->buf_addr, dma_addr); + dma_wmb(); /* Flush descriptor before changing ownership. */ + WRITE_ONCE(desc->status, OWL_EMAC_BIT_TDES0_OWN); + + owl_emac_dma_cmd_resume_tx(priv); + owl_emac_ring_push_head(ring); + + /* FIXME: The transmission is currently restricted to a single frame + * at a time as a workaround for a MAC hardware bug that causes random + * freeze of the TX queue processor. + */ + netif_stop_queue(netdev); + + spin_unlock_bh(&priv->lock); + + return NETDEV_TX_OK; +} + +static bool owl_emac_tx_complete_tail(struct owl_emac_priv *priv) +{ + struct owl_emac_ring *ring = &priv->tx_ring; + struct net_device *netdev = priv->netdev; + struct owl_emac_ring_desc *desc; + struct sk_buff *skb; + unsigned int tx_tail; + u32 status; + + tx_tail = ring->tail; + desc = &ring->descs[tx_tail]; + + status = READ_ONCE(desc->status); + dma_rmb(); /* Ensure data has been read before used. */ + + if (status & OWL_EMAC_BIT_TDES0_OWN) + return false; + + /* Check for errors. */ + if (status & OWL_EMAC_BIT_TDES0_ES) { + dev_dbg_ratelimited(&netdev->dev, + "TX complete error status: 0x%08x\n", + status); + + netdev->stats.tx_errors++; + + if (status & OWL_EMAC_BIT_TDES0_UF) + netdev->stats.tx_fifo_errors++; + + if (status & OWL_EMAC_BIT_TDES0_EC) + netdev->stats.tx_aborted_errors++; + + if (status & OWL_EMAC_BIT_TDES0_LC) + netdev->stats.tx_window_errors++; + + if (status & OWL_EMAC_BIT_TDES0_NC) + netdev->stats.tx_heartbeat_errors++; + + if (status & OWL_EMAC_BIT_TDES0_LO) + netdev->stats.tx_carrier_errors++; + } else { + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += ring->skbs[tx_tail]->len; + } + + /* Some collisions occurred, but pkt has been transmitted. */ + if (status & OWL_EMAC_BIT_TDES0_DE) + netdev->stats.collisions++; + + skb = ring->skbs[tx_tail]; + owl_emac_dma_unmap_tx(priv, skb, ring->skbs_dma[tx_tail]); + dev_kfree_skb(skb); + + ring->skbs[tx_tail] = NULL; + ring->skbs_dma[tx_tail] = 0; + + owl_emac_ring_pop_tail(ring); + + if (unlikely(netif_queue_stopped(netdev))) + netif_wake_queue(netdev); + + return true; +} + +static void owl_emac_tx_complete(struct owl_emac_priv *priv) +{ + struct owl_emac_ring *ring = &priv->tx_ring; + struct net_device *netdev = priv->netdev; + unsigned int tx_next; + u32 status; + + spin_lock(&priv->lock); + + while (ring->tail != ring->head) { + if (!owl_emac_tx_complete_tail(priv)) + break; + } + + /* FIXME: This is a workaround for a MAC hardware bug not clearing + * (sometimes) the OWN bit for a transmitted frame descriptor. + * + * At this point, when TX queue is full, the tail descriptor has the + * OWN bit set, which normally means the frame has not been processed + * or transmitted yet. But if there is at least one descriptor in the + * queue having the OWN bit cleared, we can safely assume the tail + * frame has been also processed by the MAC hardware. + * + * If that's the case, let's force the frame completion by manually + * clearing the OWN bit. + */ + if (unlikely(!owl_emac_ring_num_unused(ring))) { + tx_next = ring->tail; + + while ((tx_next = owl_emac_ring_get_next(ring, tx_next)) != ring->head) { + status = READ_ONCE(ring->descs[tx_next].status); + dma_rmb(); /* Ensure data has been read before used. */ + + if (status & OWL_EMAC_BIT_TDES0_OWN) + continue; + + netdev_dbg(netdev, "Found uncleared TX desc OWN bit\n"); + + status = READ_ONCE(ring->descs[ring->tail].status); + dma_rmb(); /* Ensure data has been read before used. */ + status &= ~OWL_EMAC_BIT_TDES0_OWN; + WRITE_ONCE(ring->descs[ring->tail].status, status); + + owl_emac_tx_complete_tail(priv); + break; + } + } + + spin_unlock(&priv->lock); +} + +static int owl_emac_rx_process(struct owl_emac_priv *priv, int budget) +{ + struct owl_emac_ring *ring = &priv->rx_ring; + struct device *dev = owl_emac_get_dev(priv); + struct net_device *netdev = priv->netdev; + struct owl_emac_ring_desc *desc; + struct sk_buff *curr_skb, *new_skb; + dma_addr_t curr_dma, new_dma; + unsigned int rx_tail, len; + u32 status; + int recv = 0; + + while (recv < budget) { + spin_lock(&priv->lock); + + rx_tail = ring->tail; + desc = &ring->descs[rx_tail]; + + status = READ_ONCE(desc->status); + dma_rmb(); /* Ensure data has been read before used. */ + + if (status & OWL_EMAC_BIT_RDES0_OWN) { + spin_unlock(&priv->lock); + break; + } + + curr_skb = ring->skbs[rx_tail]; + curr_dma = ring->skbs_dma[rx_tail]; + owl_emac_ring_pop_tail(ring); + + spin_unlock(&priv->lock); + + if (status & (OWL_EMAC_BIT_RDES0_DE | OWL_EMAC_BIT_RDES0_RF | + OWL_EMAC_BIT_RDES0_TL | OWL_EMAC_BIT_RDES0_CS | + OWL_EMAC_BIT_RDES0_DB | OWL_EMAC_BIT_RDES0_CE | + OWL_EMAC_BIT_RDES0_ZERO)) { + dev_dbg_ratelimited(&netdev->dev, + "RX desc error status: 0x%08x\n", + status); + + if (status & OWL_EMAC_BIT_RDES0_DE) + netdev->stats.rx_over_errors++; + + if (status & (OWL_EMAC_BIT_RDES0_RF | OWL_EMAC_BIT_RDES0_DB)) + netdev->stats.rx_frame_errors++; + + if (status & OWL_EMAC_BIT_RDES0_TL) + netdev->stats.rx_length_errors++; + + if (status & OWL_EMAC_BIT_RDES0_CS) + netdev->stats.collisions++; + + if (status & OWL_EMAC_BIT_RDES0_CE) + netdev->stats.rx_crc_errors++; + + if (status & OWL_EMAC_BIT_RDES0_ZERO) + netdev->stats.rx_fifo_errors++; + + goto drop_skb; + } + + len = (status & OWL_EMAC_MSK_RDES0_FL) >> OWL_EMAC_OFF_RDES0_FL; + if (unlikely(len > OWL_EMAC_RX_FRAME_MAX_LEN)) { + netdev->stats.rx_length_errors++; + netdev_err(netdev, "invalid RX frame len: %u\n", len); + goto drop_skb; + } + + /* Prepare new skb before receiving the current one. */ + new_skb = owl_emac_alloc_skb(netdev); + if (unlikely(!new_skb)) + goto drop_skb; + + new_dma = owl_emac_dma_map_rx(priv, new_skb); + if (dma_mapping_error(dev, new_dma)) { + dev_kfree_skb(new_skb); + netdev_err(netdev, "RX DMA mapping failed\n"); + goto drop_skb; + } + + owl_emac_dma_unmap_rx(priv, curr_skb, curr_dma); + + skb_put(curr_skb, len - ETH_FCS_LEN); + curr_skb->ip_summed = CHECKSUM_NONE; + curr_skb->protocol = eth_type_trans(curr_skb, netdev); + curr_skb->dev = netdev; + + netif_receive_skb(curr_skb); + + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += len; + recv++; + goto push_skb; + +drop_skb: + netdev->stats.rx_dropped++; + netdev->stats.rx_errors++; + /* Reuse the current skb. */ + new_skb = curr_skb; + new_dma = curr_dma; + +push_skb: + spin_lock(&priv->lock); + + ring->skbs[ring->head] = new_skb; + ring->skbs_dma[ring->head] = new_dma; + + WRITE_ONCE(desc->buf_addr, new_dma); + dma_wmb(); /* Flush descriptor before changing ownership. */ + WRITE_ONCE(desc->status, OWL_EMAC_BIT_RDES0_OWN); + + owl_emac_ring_push_head(ring); + + spin_unlock(&priv->lock); + } + + return recv; +} + +static int owl_emac_poll(struct napi_struct *napi, int budget) +{ + int work_done = 0, ru_cnt = 0, recv; + static int tx_err_cnt, rx_err_cnt; + struct owl_emac_priv *priv; + u32 status, proc_status; + + priv = container_of(napi, struct owl_emac_priv, napi); + + while ((status = owl_emac_irq_clear(priv)) & + (OWL_EMAC_BIT_MAC_CSR5_NIS | OWL_EMAC_BIT_MAC_CSR5_AIS)) { + recv = 0; + + /* TX setup frame raises ETI instead of TI. */ + if (status & (OWL_EMAC_BIT_MAC_CSR5_TI | OWL_EMAC_BIT_MAC_CSR5_ETI)) { + owl_emac_tx_complete(priv); + tx_err_cnt = 0; + + /* Count MAC internal RX errors. */ + proc_status = status & OWL_EMAC_MSK_MAC_CSR5_RS; + proc_status >>= OWL_EMAC_OFF_MAC_CSR5_RS; + if (proc_status == OWL_EMAC_VAL_MAC_CSR5_RS_DATA || + proc_status == OWL_EMAC_VAL_MAC_CSR5_RS_CDES || + proc_status == OWL_EMAC_VAL_MAC_CSR5_RS_FDES) + rx_err_cnt++; + } + + if (status & OWL_EMAC_BIT_MAC_CSR5_RI) { + recv = owl_emac_rx_process(priv, budget - work_done); + rx_err_cnt = 0; + + /* Count MAC internal TX errors. */ + proc_status = status & OWL_EMAC_MSK_MAC_CSR5_TS; + proc_status >>= OWL_EMAC_OFF_MAC_CSR5_TS; + if (proc_status == OWL_EMAC_VAL_MAC_CSR5_TS_DATA || + proc_status == OWL_EMAC_VAL_MAC_CSR5_TS_CDES) + tx_err_cnt++; + } else if (status & OWL_EMAC_BIT_MAC_CSR5_RU) { + /* MAC AHB is in suspended state, will return to RX + * descriptor processing when the host changes ownership + * of the descriptor and either an RX poll demand CMD is + * issued or a new frame is recognized by the MAC AHB. + */ + if (++ru_cnt == 2) + owl_emac_dma_cmd_resume_rx(priv); + + recv = owl_emac_rx_process(priv, budget - work_done); + + /* Guard against too many RU interrupts. */ + if (ru_cnt > 3) + break; + } + + work_done += recv; + if (work_done >= budget) + break; + } + + if (work_done < budget) { + napi_complete_done(napi, work_done); + owl_emac_irq_enable(priv); + } + + /* Reset MAC when getting too many internal TX or RX errors. */ + if (tx_err_cnt > 10 || rx_err_cnt > 10) { + netdev_dbg(priv->netdev, "%s error status: 0x%08x\n", + tx_err_cnt > 10 ? "TX" : "RX", status); + rx_err_cnt = 0; + tx_err_cnt = 0; + schedule_work(&priv->mac_reset_task); + } + + return work_done; +} + +static void owl_emac_mdio_clock_enable(struct owl_emac_priv *priv) +{ + u32 val; + + /* Enable MDC clock generation by adjusting CLKDIV according to + * the vendor implementation of the original driver. + */ + val = owl_emac_reg_read(priv, OWL_EMAC_REG_MAC_CSR10); + val &= OWL_EMAC_MSK_MAC_CSR10_CLKDIV; + val |= OWL_EMAC_VAL_MAC_CSR10_CLKDIV_128 << OWL_EMAC_OFF_MAC_CSR10_CLKDIV; + + val |= OWL_EMAC_BIT_MAC_CSR10_SB; + val |= OWL_EMAC_VAL_MAC_CSR10_OPCODE_CDS << OWL_EMAC_OFF_MAC_CSR10_OPCODE; + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR10, val); +} + +static void owl_emac_core_hw_reset(struct owl_emac_priv *priv) +{ + /* Trigger hardware reset. */ + reset_control_assert(priv->reset); + usleep_range(10, 20); + reset_control_deassert(priv->reset); + usleep_range(100, 200); +} + +static int owl_emac_core_sw_reset(struct owl_emac_priv *priv) +{ + u32 val; + int ret; + + /* Trigger software reset. */ + owl_emac_reg_set(priv, OWL_EMAC_REG_MAC_CSR0, OWL_EMAC_BIT_MAC_CSR0_SWR); + ret = readl_poll_timeout(priv->base + OWL_EMAC_REG_MAC_CSR0, + val, !(val & OWL_EMAC_BIT_MAC_CSR0_SWR), + OWL_EMAC_POLL_DELAY_USEC, + OWL_EMAC_RESET_POLL_TIMEOUT_USEC); + if (ret) + return ret; + + if (priv->phy_mode == PHY_INTERFACE_MODE_RMII) { + /* Enable RMII and use the 50MHz rmii clk as output to PHY. */ + val = 0; + } else { + /* Enable SMII and use the 125MHz rmii clk as output to PHY. + * Additionally set SMII SYNC delay to 4 half cycle. + */ + val = 0x04 << OWL_EMAC_OFF_MAC_CTRL_SSDC; + val |= OWL_EMAC_BIT_MAC_CTRL_RSIS; + } + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CTRL, val); + + /* MDC is disabled after reset. */ + owl_emac_mdio_clock_enable(priv); + + /* Set FIFO pause & restart threshold levels. */ + val = 0x40 << OWL_EMAC_OFF_MAC_CSR19_FPTL; + val |= 0x10 << OWL_EMAC_OFF_MAC_CSR19_FRTL; + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR19, val); + + /* Set flow control pause quanta time to ~100 ms. */ + val = 0x4FFF << OWL_EMAC_OFF_MAC_CSR18_PQT; + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR18, val); + + /* Setup interrupt mitigation. */ + val = 7 << OWL_EMAC_OFF_MAC_CSR11_NRP; + val |= 4 << OWL_EMAC_OFF_MAC_CSR11_RT; + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR11, val); + + /* Set RX/TX rings base addresses. */ + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR3, + (u32)(priv->rx_ring.descs_dma)); + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR4, + (u32)(priv->tx_ring.descs_dma)); + + /* Setup initial operation mode. */ + val = OWL_EMAC_VAL_MAC_CSR6_SPEED_100M << OWL_EMAC_OFF_MAC_CSR6_SPEED; + val |= OWL_EMAC_BIT_MAC_CSR6_FD; + owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6, + OWL_EMAC_MSK_MAC_CSR6_SPEED | + OWL_EMAC_BIT_MAC_CSR6_FD, val); + owl_emac_reg_clear(priv, OWL_EMAC_REG_MAC_CSR6, + OWL_EMAC_BIT_MAC_CSR6_PR | OWL_EMAC_BIT_MAC_CSR6_PM); + + priv->link = 0; + priv->speed = SPEED_UNKNOWN; + priv->duplex = DUPLEX_UNKNOWN; + priv->pause = 0; + priv->mcaddr_list.count = 0; + + return 0; +} + +static int owl_emac_enable(struct net_device *netdev, bool start_phy) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + int ret; + + owl_emac_dma_cmd_stop(priv); + owl_emac_irq_disable(priv); + owl_emac_irq_clear(priv); + + owl_emac_ring_prepare_tx(priv); + ret = owl_emac_ring_prepare_rx(priv); + if (ret) + goto err_unprep; + + ret = owl_emac_core_sw_reset(priv); + if (ret) { + netdev_err(netdev, "failed to soft reset MAC core: %d\n", ret); + goto err_unprep; + } + + owl_emac_set_hw_mac_addr(netdev); + owl_emac_setup_frame_xmit(priv); + + netdev_reset_queue(netdev); + napi_enable(&priv->napi); + + owl_emac_irq_enable(priv); + owl_emac_dma_cmd_start(priv); + + if (start_phy) + phy_start(netdev->phydev); + + netif_start_queue(netdev); + + return 0; + +err_unprep: + owl_emac_ring_unprepare_rx(priv); + owl_emac_ring_unprepare_tx(priv); + + return ret; +} + +static void owl_emac_disable(struct net_device *netdev, bool stop_phy) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + + owl_emac_dma_cmd_stop(priv); + owl_emac_irq_disable(priv); + + netif_stop_queue(netdev); + napi_disable(&priv->napi); + + if (stop_phy) + phy_stop(netdev->phydev); + + owl_emac_ring_unprepare_rx(priv); + owl_emac_ring_unprepare_tx(priv); +} + +static int owl_emac_ndo_open(struct net_device *netdev) +{ + return owl_emac_enable(netdev, true); +} + +static int owl_emac_ndo_stop(struct net_device *netdev) +{ + owl_emac_disable(netdev, true); + + return 0; +} + +static void owl_emac_set_multicast(struct net_device *netdev, int count) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + struct netdev_hw_addr *ha; + int index = 0; + + if (count <= 0) { + priv->mcaddr_list.count = 0; + return; + } + + netdev_for_each_mc_addr(ha, netdev) { + if (!is_multicast_ether_addr(ha->addr)) + continue; + + WARN_ON(index >= OWL_EMAC_MAX_MULTICAST_ADDRS); + ether_addr_copy(priv->mcaddr_list.addrs[index++], ha->addr); + } + + priv->mcaddr_list.count = index; + + owl_emac_setup_frame_xmit(priv); +} + +static void owl_emac_ndo_set_rx_mode(struct net_device *netdev) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + u32 status, val = 0; + int mcast_count = 0; + + if (netdev->flags & IFF_PROMISC) { + val = OWL_EMAC_BIT_MAC_CSR6_PR; + } else if (netdev->flags & IFF_ALLMULTI) { + val = OWL_EMAC_BIT_MAC_CSR6_PM; + } else if (netdev->flags & IFF_MULTICAST) { + mcast_count = netdev_mc_count(netdev); + + if (mcast_count > OWL_EMAC_MAX_MULTICAST_ADDRS) { + val = OWL_EMAC_BIT_MAC_CSR6_PM; + mcast_count = 0; + } + } + + spin_lock_bh(&priv->lock); + + /* Temporarily stop DMA TX & RX. */ + status = owl_emac_dma_cmd_stop(priv); + + /* Update operation modes. */ + owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6, + OWL_EMAC_BIT_MAC_CSR6_PR | OWL_EMAC_BIT_MAC_CSR6_PM, + val); + + /* Restore DMA TX & RX status. */ + owl_emac_dma_cmd_set(priv, status); + + spin_unlock_bh(&priv->lock); + + /* Set/reset multicast addr list. */ + owl_emac_set_multicast(netdev, mcast_count); +} + +static int owl_emac_ndo_set_mac_addr(struct net_device *netdev, void *addr) +{ + struct sockaddr *skaddr = addr; + + if (!is_valid_ether_addr(skaddr->sa_data)) + return -EADDRNOTAVAIL; + + if (netif_running(netdev)) + return -EBUSY; + + memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len); + owl_emac_set_hw_mac_addr(netdev); + + return owl_emac_setup_frame_xmit(netdev_priv(netdev)); +} + +static int owl_emac_ndo_do_ioctl(struct net_device *netdev, + struct ifreq *req, int cmd) +{ + if (!netif_running(netdev)) + return -EINVAL; + + return phy_mii_ioctl(netdev->phydev, req, cmd); +} + +static void owl_emac_ndo_tx_timeout(struct net_device *netdev, + unsigned int txqueue) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + + schedule_work(&priv->mac_reset_task); +} + +static void owl_emac_reset_task(struct work_struct *work) +{ + struct owl_emac_priv *priv; + + priv = container_of(work, struct owl_emac_priv, mac_reset_task); + + netdev_dbg(priv->netdev, "resetting MAC\n"); + owl_emac_disable(priv->netdev, false); + owl_emac_enable(priv->netdev, false); +} + +static struct net_device_stats * +owl_emac_ndo_get_stats(struct net_device *netdev) +{ + /* FIXME: If possible, try to get stats from MAC hardware registers + * instead of tracking them manually in the driver. + */ + + return &netdev->stats; +} + +static const struct net_device_ops owl_emac_netdev_ops = { + .ndo_open = owl_emac_ndo_open, + .ndo_stop = owl_emac_ndo_stop, + .ndo_start_xmit = owl_emac_ndo_start_xmit, + .ndo_set_rx_mode = owl_emac_ndo_set_rx_mode, + .ndo_set_mac_address = owl_emac_ndo_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = owl_emac_ndo_do_ioctl, + .ndo_tx_timeout = owl_emac_ndo_tx_timeout, + .ndo_get_stats = owl_emac_ndo_get_stats, +}; + +static void owl_emac_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strscpy(info->driver, OWL_EMAC_DRVNAME, sizeof(info->driver)); +} + +static u32 owl_emac_ethtool_get_msglevel(struct net_device *netdev) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + + return priv->msg_enable; +} + +static void owl_emac_ethtool_set_msglevel(struct net_device *ndev, u32 val) +{ + struct owl_emac_priv *priv = netdev_priv(ndev); + + priv->msg_enable = val; +} + +static const struct ethtool_ops owl_emac_ethtool_ops = { + .get_drvinfo = owl_emac_ethtool_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_msglevel = owl_emac_ethtool_get_msglevel, + .set_msglevel = owl_emac_ethtool_set_msglevel, +}; + +static int owl_emac_mdio_wait(struct owl_emac_priv *priv) +{ + u32 val; + + /* Wait while data transfer is in progress. */ + return readl_poll_timeout(priv->base + OWL_EMAC_REG_MAC_CSR10, + val, !(val & OWL_EMAC_BIT_MAC_CSR10_SB), + OWL_EMAC_POLL_DELAY_USEC, + OWL_EMAC_MDIO_POLL_TIMEOUT_USEC); +} + +static int owl_emac_mdio_read(struct mii_bus *bus, int addr, int regnum) +{ + struct owl_emac_priv *priv = bus->priv; + u32 data, tmp; + int ret; + + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + + data = OWL_EMAC_BIT_MAC_CSR10_SB; + data |= OWL_EMAC_VAL_MAC_CSR10_OPCODE_RD << OWL_EMAC_OFF_MAC_CSR10_OPCODE; + + tmp = addr << OWL_EMAC_OFF_MAC_CSR10_PHYADD; + data |= tmp & OWL_EMAC_MSK_MAC_CSR10_PHYADD; + + tmp = regnum << OWL_EMAC_OFF_MAC_CSR10_REGADD; + data |= tmp & OWL_EMAC_MSK_MAC_CSR10_REGADD; + + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR10, data); + + ret = owl_emac_mdio_wait(priv); + if (ret) + return ret; + + data = owl_emac_reg_read(priv, OWL_EMAC_REG_MAC_CSR10); + data &= OWL_EMAC_MSK_MAC_CSR10_DATA; + + return data; +} + +static int +owl_emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) +{ + struct owl_emac_priv *priv = bus->priv; + u32 data, tmp; + + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + + data = OWL_EMAC_BIT_MAC_CSR10_SB; + data |= OWL_EMAC_VAL_MAC_CSR10_OPCODE_WR << OWL_EMAC_OFF_MAC_CSR10_OPCODE; + + tmp = addr << OWL_EMAC_OFF_MAC_CSR10_PHYADD; + data |= tmp & OWL_EMAC_MSK_MAC_CSR10_PHYADD; + + tmp = regnum << OWL_EMAC_OFF_MAC_CSR10_REGADD; + data |= tmp & OWL_EMAC_MSK_MAC_CSR10_REGADD; + + data |= val & OWL_EMAC_MSK_MAC_CSR10_DATA; + + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR10, data); + + return owl_emac_mdio_wait(priv); +} + +static int owl_emac_mdio_init(struct net_device *netdev) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + struct device *dev = owl_emac_get_dev(priv); + struct device_node *mdio_node; + int ret; + + mdio_node = of_get_child_by_name(dev->of_node, "mdio"); + if (!mdio_node) + return -ENODEV; + + if (!of_device_is_available(mdio_node)) { + ret = -ENODEV; + goto err_put_node; + } + + priv->mii = devm_mdiobus_alloc(dev); + if (!priv->mii) { + ret = -ENOMEM; + goto err_put_node; + } + + snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); + priv->mii->name = "owl-emac-mdio"; + priv->mii->parent = dev; + priv->mii->read = owl_emac_mdio_read; + priv->mii->write = owl_emac_mdio_write; + priv->mii->phy_mask = ~0; /* Mask out all PHYs from auto probing. */ + priv->mii->priv = priv; + + ret = devm_of_mdiobus_register(dev, priv->mii, mdio_node); + +err_put_node: + of_node_put(mdio_node); + return ret; +} + +static int owl_emac_phy_init(struct net_device *netdev) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + struct device *dev = owl_emac_get_dev(priv); + struct phy_device *phy; + + phy = of_phy_get_and_connect(netdev, dev->of_node, + owl_emac_adjust_link); + if (!phy) + return -ENODEV; + + phy_set_sym_pause(phy, true, true, true); + + if (netif_msg_link(priv)) + phy_attached_info(phy); + + return 0; +} + +static void owl_emac_get_mac_addr(struct net_device *netdev) +{ + struct device *dev = netdev->dev.parent; + int ret; + + ret = eth_platform_get_mac_address(dev, netdev->dev_addr); + if (!ret && is_valid_ether_addr(netdev->dev_addr)) + return; + + eth_hw_addr_random(netdev); + dev_warn(dev, "using random MAC address %pM\n", netdev->dev_addr); +} + +static __maybe_unused int owl_emac_suspend(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct owl_emac_priv *priv = netdev_priv(netdev); + + disable_irq(netdev->irq); + + if (netif_running(netdev)) { + owl_emac_disable(netdev, true); + netif_device_detach(netdev); + } + + clk_bulk_disable_unprepare(OWL_EMAC_NCLKS, priv->clks); + + return 0; +} + +static __maybe_unused int owl_emac_resume(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct owl_emac_priv *priv = netdev_priv(netdev); + int ret; + + ret = clk_bulk_prepare_enable(OWL_EMAC_NCLKS, priv->clks); + if (ret) + return ret; + + if (netif_running(netdev)) { + owl_emac_core_hw_reset(priv); + owl_emac_core_sw_reset(priv); + + ret = owl_emac_enable(netdev, true); + if (ret) { + clk_bulk_disable_unprepare(OWL_EMAC_NCLKS, priv->clks); + return ret; + } + + netif_device_attach(netdev); + } + + enable_irq(netdev->irq); + + return 0; +} + +static void owl_emac_clk_disable_unprepare(void *data) +{ + struct owl_emac_priv *priv = data; + + clk_bulk_disable_unprepare(OWL_EMAC_NCLKS, priv->clks); +} + +static int owl_emac_clk_set_rate(struct owl_emac_priv *priv) +{ + struct device *dev = owl_emac_get_dev(priv); + unsigned long rate; + int ret; + + switch (priv->phy_mode) { + case PHY_INTERFACE_MODE_RMII: + rate = 50000000; + break; + + case PHY_INTERFACE_MODE_SMII: + rate = 125000000; + break; + + default: + dev_err(dev, "unsupported phy interface mode %d\n", + priv->phy_mode); + return -EOPNOTSUPP; + } + + ret = clk_set_rate(priv->clks[OWL_EMAC_CLK_RMII].clk, rate); + if (ret) + dev_err(dev, "failed to set RMII clock rate: %d\n", ret); + + return ret; +} + +static int owl_emac_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct net_device *netdev; + struct owl_emac_priv *priv; + int ret, i; + + netdev = devm_alloc_etherdev(dev, sizeof(*priv)); + if (!netdev) + return -ENOMEM; + + platform_set_drvdata(pdev, netdev); + SET_NETDEV_DEV(netdev, dev); + + priv = netdev_priv(netdev); + priv->netdev = netdev; + priv->msg_enable = netif_msg_init(-1, OWL_EMAC_DEFAULT_MSG_ENABLE); + + ret = of_get_phy_mode(dev->of_node, &priv->phy_mode); + if (ret) { + dev_err(dev, "failed to get phy mode: %d\n", ret); + return ret; + } + + spin_lock_init(&priv->lock); + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(dev, "unsupported DMA mask\n"); + return ret; + } + + ret = owl_emac_ring_alloc(dev, &priv->rx_ring, OWL_EMAC_RX_RING_SIZE); + if (ret) + return ret; + + ret = owl_emac_ring_alloc(dev, &priv->tx_ring, OWL_EMAC_TX_RING_SIZE); + if (ret) + return ret; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + netdev->irq = platform_get_irq(pdev, 0); + if (netdev->irq < 0) + return netdev->irq; + + ret = devm_request_irq(dev, netdev->irq, owl_emac_handle_irq, + IRQF_SHARED, netdev->name, netdev); + if (ret) { + dev_err(dev, "failed to request irq: %d\n", netdev->irq); + return ret; + } + + for (i = 0; i < OWL_EMAC_NCLKS; i++) + priv->clks[i].id = owl_emac_clk_names[i]; + + ret = devm_clk_bulk_get(dev, OWL_EMAC_NCLKS, priv->clks); + if (ret) + return ret; + + ret = clk_bulk_prepare_enable(OWL_EMAC_NCLKS, priv->clks); + if (ret) + return ret; + + ret = devm_add_action_or_reset(dev, owl_emac_clk_disable_unprepare, priv); + if (ret) + return ret; + + ret = owl_emac_clk_set_rate(priv); + if (ret) + return ret; + + priv->reset = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(priv->reset)) + return dev_err_probe(dev, PTR_ERR(priv->reset), + "failed to get reset control"); + + owl_emac_get_mac_addr(netdev); + + owl_emac_core_hw_reset(priv); + owl_emac_mdio_clock_enable(priv); + + ret = owl_emac_mdio_init(netdev); + if (ret) { + dev_err(dev, "failed to initialize MDIO bus\n"); + return ret; + } + + ret = owl_emac_phy_init(netdev); + if (ret) { + dev_err(dev, "failed to initialize PHY\n"); + return ret; + } + + INIT_WORK(&priv->mac_reset_task, owl_emac_reset_task); + + netdev->min_mtu = OWL_EMAC_MTU_MIN; + netdev->max_mtu = OWL_EMAC_MTU_MAX; + netdev->watchdog_timeo = OWL_EMAC_TX_TIMEOUT; + netdev->netdev_ops = &owl_emac_netdev_ops; + netdev->ethtool_ops = &owl_emac_ethtool_ops; + netif_napi_add(netdev, &priv->napi, owl_emac_poll, NAPI_POLL_WEIGHT); + + ret = devm_register_netdev(dev, netdev); + if (ret) { + netif_napi_del(&priv->napi); + phy_disconnect(netdev->phydev); + return ret; + } + + return 0; +} + +static int owl_emac_remove(struct platform_device *pdev) +{ + struct owl_emac_priv *priv = platform_get_drvdata(pdev); + + netif_napi_del(&priv->napi); + phy_disconnect(priv->netdev->phydev); + cancel_work_sync(&priv->mac_reset_task); + + return 0; +} + +static const struct of_device_id owl_emac_of_match[] = { + { .compatible = "actions,owl-emac", }, + { } +}; +MODULE_DEVICE_TABLE(of, owl_emac_of_match); + +static SIMPLE_DEV_PM_OPS(owl_emac_pm_ops, + owl_emac_suspend, owl_emac_resume); + +static struct platform_driver owl_emac_driver = { + .driver = { + .name = OWL_EMAC_DRVNAME, + .of_match_table = owl_emac_of_match, + .pm = &owl_emac_pm_ops, + }, + .probe = owl_emac_probe, + .remove = owl_emac_remove, +}; +module_platform_driver(owl_emac_driver); + +MODULE_DESCRIPTION("Actions Semi Owl SoCs Ethernet MAC Driver"); +MODULE_AUTHOR("Actions Semi Inc."); +MODULE_AUTHOR("Cristian Ciocaltea <cristian.ciocaltea@gmail.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/actions/owl-emac.h b/drivers/net/ethernet/actions/owl-emac.h new file mode 100644 index 000000000000..9eb0d1a30242 --- /dev/null +++ b/drivers/net/ethernet/actions/owl-emac.h @@ -0,0 +1,280 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Actions Semi Owl SoCs Ethernet MAC driver + * + * Copyright (c) 2012 Actions Semi Inc. + * Copyright (c) 2021 Cristian Ciocaltea <cristian.ciocaltea@gmail.com> + */ + +#ifndef __OWL_EMAC_H__ +#define __OWL_EMAC_H__ + +#define OWL_EMAC_DRVNAME "owl-emac" + +#define OWL_EMAC_POLL_DELAY_USEC 5 +#define OWL_EMAC_MDIO_POLL_TIMEOUT_USEC 1000 +#define OWL_EMAC_RESET_POLL_TIMEOUT_USEC 2000 +#define OWL_EMAC_TX_TIMEOUT (2 * HZ) + +#define OWL_EMAC_MTU_MIN ETH_MIN_MTU +#define OWL_EMAC_MTU_MAX ETH_DATA_LEN +#define OWL_EMAC_RX_FRAME_MAX_LEN (ETH_FRAME_LEN + ETH_FCS_LEN) +#define OWL_EMAC_SKB_ALIGN 4 +#define OWL_EMAC_SKB_RESERVE 18 + +#define OWL_EMAC_MAX_MULTICAST_ADDRS 14 +#define OWL_EMAC_SETUP_FRAME_LEN 192 + +#define OWL_EMAC_RX_RING_SIZE 64 +#define OWL_EMAC_TX_RING_SIZE 32 + +/* Bus mode register */ +#define OWL_EMAC_REG_MAC_CSR0 0x0000 +#define OWL_EMAC_BIT_MAC_CSR0_SWR BIT(0) /* Software reset */ + +/* Transmit/receive poll demand registers */ +#define OWL_EMAC_REG_MAC_CSR1 0x0008 +#define OWL_EMAC_VAL_MAC_CSR1_TPD 0x01 +#define OWL_EMAC_REG_MAC_CSR2 0x0010 +#define OWL_EMAC_VAL_MAC_CSR2_RPD 0x01 + +/* Receive/transmit descriptor list base address registers */ +#define OWL_EMAC_REG_MAC_CSR3 0x0018 +#define OWL_EMAC_REG_MAC_CSR4 0x0020 + +/* Status register */ +#define OWL_EMAC_REG_MAC_CSR5 0x0028 +#define OWL_EMAC_MSK_MAC_CSR5_TS GENMASK(22, 20) /* Transmit process state */ +#define OWL_EMAC_OFF_MAC_CSR5_TS 20 +#define OWL_EMAC_VAL_MAC_CSR5_TS_DATA 0x03 /* Transferring data HOST -> FIFO */ +#define OWL_EMAC_VAL_MAC_CSR5_TS_CDES 0x07 /* Closing transmit descriptor */ +#define OWL_EMAC_MSK_MAC_CSR5_RS GENMASK(19, 17) /* Receive process state */ +#define OWL_EMAC_OFF_MAC_CSR5_RS 17 +#define OWL_EMAC_VAL_MAC_CSR5_RS_FDES 0x01 /* Fetching receive descriptor */ +#define OWL_EMAC_VAL_MAC_CSR5_RS_CDES 0x05 /* Closing receive descriptor */ +#define OWL_EMAC_VAL_MAC_CSR5_RS_DATA 0x07 /* Transferring data FIFO -> HOST */ +#define OWL_EMAC_BIT_MAC_CSR5_NIS BIT(16) /* Normal interrupt summary */ +#define OWL_EMAC_BIT_MAC_CSR5_AIS BIT(15) /* Abnormal interrupt summary */ +#define OWL_EMAC_BIT_MAC_CSR5_ERI BIT(14) /* Early receive interrupt */ +#define OWL_EMAC_BIT_MAC_CSR5_GTE BIT(11) /* General-purpose timer expiration */ +#define OWL_EMAC_BIT_MAC_CSR5_ETI BIT(10) /* Early transmit interrupt */ +#define OWL_EMAC_BIT_MAC_CSR5_RPS BIT(8) /* Receive process stopped */ +#define OWL_EMAC_BIT_MAC_CSR5_RU BIT(7) /* Receive buffer unavailable */ +#define OWL_EMAC_BIT_MAC_CSR5_RI BIT(6) /* Receive interrupt */ +#define OWL_EMAC_BIT_MAC_CSR5_UNF BIT(5) /* Transmit underflow */ +#define OWL_EMAC_BIT_MAC_CSR5_LCIS BIT(4) /* Link change status */ +#define OWL_EMAC_BIT_MAC_CSR5_LCIQ BIT(3) /* Link change interrupt */ +#define OWL_EMAC_BIT_MAC_CSR5_TU BIT(2) /* Transmit buffer unavailable */ +#define OWL_EMAC_BIT_MAC_CSR5_TPS BIT(1) /* Transmit process stopped */ +#define OWL_EMAC_BIT_MAC_CSR5_TI BIT(0) /* Transmit interrupt */ + +/* Operation mode register */ +#define OWL_EMAC_REG_MAC_CSR6 0x0030 +#define OWL_EMAC_BIT_MAC_CSR6_RA BIT(30) /* Receive all */ +#define OWL_EMAC_BIT_MAC_CSR6_TTM BIT(22) /* Transmit threshold mode */ +#define OWL_EMAC_BIT_MAC_CSR6_SF BIT(21) /* Store and forward */ +#define OWL_EMAC_MSK_MAC_CSR6_SPEED GENMASK(17, 16) /* Eth speed selection */ +#define OWL_EMAC_OFF_MAC_CSR6_SPEED 16 +#define OWL_EMAC_VAL_MAC_CSR6_SPEED_100M 0x00 +#define OWL_EMAC_VAL_MAC_CSR6_SPEED_10M 0x02 +#define OWL_EMAC_BIT_MAC_CSR6_ST BIT(13) /* Start/stop transmit command */ +#define OWL_EMAC_BIT_MAC_CSR6_LP BIT(10) /* Loopback mode */ +#define OWL_EMAC_BIT_MAC_CSR6_FD BIT(9) /* Full duplex mode */ +#define OWL_EMAC_BIT_MAC_CSR6_PM BIT(7) /* Pass all multicast */ +#define OWL_EMAC_BIT_MAC_CSR6_PR BIT(6) /* Promiscuous mode */ +#define OWL_EMAC_BIT_MAC_CSR6_IF BIT(4) /* Inverse filtering */ +#define OWL_EMAC_BIT_MAC_CSR6_PB BIT(3) /* Pass bad frames */ +#define OWL_EMAC_BIT_MAC_CSR6_HO BIT(2) /* Hash only filtering mode */ +#define OWL_EMAC_BIT_MAC_CSR6_SR BIT(1) /* Start/stop receive command */ +#define OWL_EMAC_BIT_MAC_CSR6_HP BIT(0) /* Hash/perfect receive filtering mode */ +#define OWL_EMAC_MSK_MAC_CSR6_STSR (OWL_EMAC_BIT_MAC_CSR6_ST | \ + OWL_EMAC_BIT_MAC_CSR6_SR) + +/* Interrupt enable register */ +#define OWL_EMAC_REG_MAC_CSR7 0x0038 +#define OWL_EMAC_BIT_MAC_CSR7_NIE BIT(16) /* Normal interrupt summary enable */ +#define OWL_EMAC_BIT_MAC_CSR7_AIE BIT(15) /* Abnormal interrupt summary enable */ +#define OWL_EMAC_BIT_MAC_CSR7_ERE BIT(14) /* Early receive interrupt enable */ +#define OWL_EMAC_BIT_MAC_CSR7_GTE BIT(11) /* General-purpose timer overflow */ +#define OWL_EMAC_BIT_MAC_CSR7_ETE BIT(10) /* Early transmit interrupt enable */ +#define OWL_EMAC_BIT_MAC_CSR7_RSE BIT(8) /* Receive stopped enable */ +#define OWL_EMAC_BIT_MAC_CSR7_RUE BIT(7) /* Receive buffer unavailable enable */ +#define OWL_EMAC_BIT_MAC_CSR7_RIE BIT(6) /* Receive interrupt enable */ +#define OWL_EMAC_BIT_MAC_CSR7_UNE BIT(5) /* Underflow interrupt enable */ +#define OWL_EMAC_BIT_MAC_CSR7_TUE BIT(2) /* Transmit buffer unavailable enable */ +#define OWL_EMAC_BIT_MAC_CSR7_TSE BIT(1) /* Transmit stopped enable */ +#define OWL_EMAC_BIT_MAC_CSR7_TIE BIT(0) /* Transmit interrupt enable */ +#define OWL_EMAC_BIT_MAC_CSR7_ALL_NOT_TUE (OWL_EMAC_BIT_MAC_CSR7_ERE | \ + OWL_EMAC_BIT_MAC_CSR7_GTE | \ + OWL_EMAC_BIT_MAC_CSR7_ETE | \ + OWL_EMAC_BIT_MAC_CSR7_RSE | \ + OWL_EMAC_BIT_MAC_CSR7_RUE | \ + OWL_EMAC_BIT_MAC_CSR7_RIE | \ + OWL_EMAC_BIT_MAC_CSR7_UNE | \ + OWL_EMAC_BIT_MAC_CSR7_TSE | \ + OWL_EMAC_BIT_MAC_CSR7_TIE) + +/* Missed frames and overflow counter register */ +#define OWL_EMAC_REG_MAC_CSR8 0x0040 +/* MII management and serial ROM register */ +#define OWL_EMAC_REG_MAC_CSR9 0x0048 + +/* MII serial management register */ +#define OWL_EMAC_REG_MAC_CSR10 0x0050 +#define OWL_EMAC_BIT_MAC_CSR10_SB BIT(31) /* Start transfer or busy */ +#define OWL_EMAC_MSK_MAC_CSR10_CLKDIV GENMASK(30, 28) /* Clock divider */ +#define OWL_EMAC_OFF_MAC_CSR10_CLKDIV 28 +#define OWL_EMAC_VAL_MAC_CSR10_CLKDIV_128 0x04 +#define OWL_EMAC_VAL_MAC_CSR10_OPCODE_WR 0x01 /* Register write command */ +#define OWL_EMAC_OFF_MAC_CSR10_OPCODE 26 /* Operation mode */ +#define OWL_EMAC_VAL_MAC_CSR10_OPCODE_DCG 0x00 /* Disable clock generation */ +#define OWL_EMAC_VAL_MAC_CSR10_OPCODE_WR 0x01 /* Register write command */ +#define OWL_EMAC_VAL_MAC_CSR10_OPCODE_RD 0x02 /* Register read command */ +#define OWL_EMAC_VAL_MAC_CSR10_OPCODE_CDS 0x03 /* Clock divider set */ +#define OWL_EMAC_MSK_MAC_CSR10_PHYADD GENMASK(25, 21) /* Physical layer address */ +#define OWL_EMAC_OFF_MAC_CSR10_PHYADD 21 +#define OWL_EMAC_MSK_MAC_CSR10_REGADD GENMASK(20, 16) /* Register address */ +#define OWL_EMAC_OFF_MAC_CSR10_REGADD 16 +#define OWL_EMAC_MSK_MAC_CSR10_DATA GENMASK(15, 0) /* Register data */ + +/* General-purpose timer and interrupt mitigation control register */ +#define OWL_EMAC_REG_MAC_CSR11 0x0058 +#define OWL_EMAC_OFF_MAC_CSR11_TT 27 /* Transmit timer */ +#define OWL_EMAC_OFF_MAC_CSR11_NTP 24 /* No. of transmit packets */ +#define OWL_EMAC_OFF_MAC_CSR11_RT 20 /* Receive timer */ +#define OWL_EMAC_OFF_MAC_CSR11_NRP 17 /* No. of receive packets */ + +/* MAC address low/high registers */ +#define OWL_EMAC_REG_MAC_CSR16 0x0080 +#define OWL_EMAC_REG_MAC_CSR17 0x0088 + +/* Pause time & cache thresholds register */ +#define OWL_EMAC_REG_MAC_CSR18 0x0090 +#define OWL_EMAC_OFF_MAC_CSR18_CPTL 24 /* Cache pause threshold level */ +#define OWL_EMAC_OFF_MAC_CSR18_CRTL 16 /* Cache restart threshold level */ +#define OWL_EMAC_OFF_MAC_CSR18_PQT 0 /* Flow control pause quanta time */ + +/* FIFO pause & restart threshold register */ +#define OWL_EMAC_REG_MAC_CSR19 0x0098 +#define OWL_EMAC_OFF_MAC_CSR19_FPTL 16 /* FIFO pause threshold level */ +#define OWL_EMAC_OFF_MAC_CSR19_FRTL 0 /* FIFO restart threshold level */ + +/* Flow control setup & status register */ +#define OWL_EMAC_REG_MAC_CSR20 0x00A0 +#define OWL_EMAC_BIT_MAC_CSR20_FCE BIT(31) /* Flow Control Enable */ +#define OWL_EMAC_BIT_MAC_CSR20_TUE BIT(30) /* Transmit Un-pause frames Enable */ +#define OWL_EMAC_BIT_MAC_CSR20_TPE BIT(29) /* Transmit Pause frames Enable */ +#define OWL_EMAC_BIT_MAC_CSR20_RPE BIT(28) /* Receive Pause frames Enable */ +#define OWL_EMAC_BIT_MAC_CSR20_BPE BIT(27) /* Back pressure (half-duplex) Enable */ + +/* MII control register */ +#define OWL_EMAC_REG_MAC_CTRL 0x00B0 +#define OWL_EMAC_BIT_MAC_CTRL_RRSB BIT(8) /* RMII_REFCLK select bit */ +#define OWL_EMAC_OFF_MAC_CTRL_SSDC 4 /* SMII SYNC delay cycle */ +#define OWL_EMAC_BIT_MAC_CTRL_RCPS BIT(1) /* REF_CLK phase select */ +#define OWL_EMAC_BIT_MAC_CTRL_RSIS BIT(0) /* RMII/SMII interface select */ + +/* Receive descriptor status field */ +#define OWL_EMAC_BIT_RDES0_OWN BIT(31) /* Ownership bit */ +#define OWL_EMAC_BIT_RDES0_FF BIT(30) /* Filtering fail */ +#define OWL_EMAC_MSK_RDES0_FL GENMASK(29, 16) /* Frame length */ +#define OWL_EMAC_OFF_RDES0_FL 16 +#define OWL_EMAC_BIT_RDES0_ES BIT(15) /* Error summary */ +#define OWL_EMAC_BIT_RDES0_DE BIT(14) /* Descriptor error */ +#define OWL_EMAC_BIT_RDES0_RF BIT(11) /* Runt frame */ +#define OWL_EMAC_BIT_RDES0_MF BIT(10) /* Multicast frame */ +#define OWL_EMAC_BIT_RDES0_FS BIT(9) /* First descriptor */ +#define OWL_EMAC_BIT_RDES0_LS BIT(8) /* Last descriptor */ +#define OWL_EMAC_BIT_RDES0_TL BIT(7) /* Frame too long */ +#define OWL_EMAC_BIT_RDES0_CS BIT(6) /* Collision seen */ +#define OWL_EMAC_BIT_RDES0_FT BIT(5) /* Frame type */ +#define OWL_EMAC_BIT_RDES0_RE BIT(3) /* Report on MII error */ +#define OWL_EMAC_BIT_RDES0_DB BIT(2) /* Dribbling bit */ +#define OWL_EMAC_BIT_RDES0_CE BIT(1) /* CRC error */ +#define OWL_EMAC_BIT_RDES0_ZERO BIT(0) /* Legal frame length indicator */ + +/* Receive descriptor control and count field */ +#define OWL_EMAC_BIT_RDES1_RER BIT(25) /* Receive end of ring */ +#define OWL_EMAC_MSK_RDES1_RBS1 GENMASK(10, 0) /* Buffer 1 size */ + +/* Transmit descriptor status field */ +#define OWL_EMAC_BIT_TDES0_OWN BIT(31) /* Ownership bit */ +#define OWL_EMAC_BIT_TDES0_ES BIT(15) /* Error summary */ +#define OWL_EMAC_BIT_TDES0_LO BIT(11) /* Loss of carrier */ +#define OWL_EMAC_BIT_TDES0_NC BIT(10) /* No carrier */ +#define OWL_EMAC_BIT_TDES0_LC BIT(9) /* Late collision */ +#define OWL_EMAC_BIT_TDES0_EC BIT(8) /* Excessive collisions */ +#define OWL_EMAC_MSK_TDES0_CC GENMASK(6, 3) /* Collision count */ +#define OWL_EMAC_BIT_TDES0_UF BIT(1) /* Underflow error */ +#define OWL_EMAC_BIT_TDES0_DE BIT(0) /* Deferred */ + +/* Transmit descriptor control and count field */ +#define OWL_EMAC_BIT_TDES1_IC BIT(31) /* Interrupt on completion */ +#define OWL_EMAC_BIT_TDES1_LS BIT(30) /* Last descriptor */ +#define OWL_EMAC_BIT_TDES1_FS BIT(29) /* First descriptor */ +#define OWL_EMAC_BIT_TDES1_FT1 BIT(28) /* Filtering type */ +#define OWL_EMAC_BIT_TDES1_SET BIT(27) /* Setup packet */ +#define OWL_EMAC_BIT_TDES1_AC BIT(26) /* Add CRC disable */ +#define OWL_EMAC_BIT_TDES1_TER BIT(25) /* Transmit end of ring */ +#define OWL_EMAC_BIT_TDES1_DPD BIT(23) /* Disabled padding */ +#define OWL_EMAC_BIT_TDES1_FT0 BIT(22) /* Filtering type */ +#define OWL_EMAC_MSK_TDES1_TBS1 GENMASK(10, 0) /* Buffer 1 size */ + +static const char *const owl_emac_clk_names[] = { "eth", "rmii" }; +#define OWL_EMAC_NCLKS ARRAY_SIZE(owl_emac_clk_names) + +enum owl_emac_clk_map { + OWL_EMAC_CLK_ETH = 0, + OWL_EMAC_CLK_RMII +}; + +struct owl_emac_addr_list { + u8 addrs[OWL_EMAC_MAX_MULTICAST_ADDRS][ETH_ALEN]; + int count; +}; + +/* TX/RX descriptors */ +struct owl_emac_ring_desc { + u32 status; + u32 control; + u32 buf_addr; + u32 reserved; /* 2nd buffer address is not used */ +}; + +struct owl_emac_ring { + struct owl_emac_ring_desc *descs; + dma_addr_t descs_dma; + struct sk_buff **skbs; + dma_addr_t *skbs_dma; + unsigned int size; + unsigned int head; + unsigned int tail; +}; + +struct owl_emac_priv { + struct net_device *netdev; + void __iomem *base; + + struct clk_bulk_data clks[OWL_EMAC_NCLKS]; + struct reset_control *reset; + + struct owl_emac_ring rx_ring; + struct owl_emac_ring tx_ring; + + struct mii_bus *mii; + struct napi_struct napi; + + phy_interface_t phy_mode; + unsigned int link; + int speed; + int duplex; + int pause; + struct owl_emac_addr_list mcaddr_list; + + struct work_struct mac_reset_task; + + u32 msg_enable; /* Debug message level */ + spinlock_t lock; /* Sync concurrent ring access */ +}; + +#endif /* __OWL_EMAC_H__ */ diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 9c5891bbfe61..d77fafbc1530 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1449,10 +1449,10 @@ static int greth_of_probe(struct platform_device *ofdev) break; } if (i == 6) { - const u8 *addr; + u8 addr[ETH_ALEN]; - addr = of_get_mac_address(ofdev->dev.of_node); - if (!IS_ERR(addr)) { + err = of_get_mac_address(ofdev->dev.of_node, addr); + if (!err) { for (i = 0; i < 6; i++) macaddr[i] = (unsigned int) addr[i]; } else { diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 5ed80d9a6b9f..f99ae317c188 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -790,7 +790,6 @@ static int emac_probe(struct platform_device *pdev) struct emac_board_info *db; struct net_device *ndev; int ret = 0; - const char *mac_addr; ndev = alloc_etherdev(sizeof(struct emac_board_info)); if (!ndev) { @@ -853,12 +852,9 @@ static int emac_probe(struct platform_device *pdev) } /* Read MAC-address from DT */ - mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) - ether_addr_copy(ndev->dev_addr, mac_addr); - - /* Check if the MAC address is valid, if not get a random one */ - if (!is_valid_ether_addr(ndev->dev_addr)) { + ret = of_get_mac_address(np, ndev->dev_addr); + if (ret) { + /* if the MAC address is invalid get a random one */ eth_hw_addr_random(ndev); dev_warn(&pdev->dev, "using random MAC address %pM\n", ndev->dev_addr); diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 907125abef2c..1c00d719e5d7 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -1351,7 +1351,6 @@ static int altera_tse_probe(struct platform_device *pdev) struct resource *control_port; struct resource *dma_res; struct altera_tse_private *priv; - const unsigned char *macaddr; void __iomem *descmap; const struct of_device_id *of_id = NULL; @@ -1525,10 +1524,8 @@ static int altera_tse_probe(struct platform_device *pdev) priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE; /* get default MAC address from device tree */ - macaddr = of_get_mac_address(pdev->dev.of_node); - if (!IS_ERR(macaddr)) - ether_addr_copy(ndev->dev_addr, macaddr); - else + ret = of_get_mac_address(pdev->dev.of_node, ndev->dev_addr); + if (ret) eth_hw_addr_random(ndev); /* get phy addr and create mdio */ diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 02087d443e73..764852ead1d6 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -863,7 +863,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) if (unlikely(i == timeout)) { netdev_err(ena_dev->net_device, - "Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", + "Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n", mmio_read->seq_num, offset, read_resp->req_id, read_resp->reg_off); ret = ENA_MMIO_READ_TIMEOUT; @@ -2396,7 +2396,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, if (key) { if (key_len != sizeof(hash_key->key)) { netdev_err(ena_dev->net_device, - "key len (%hu) doesn't equal the supported size (%zu)\n", + "key len (%u) doesn't equal the supported size (%zu)\n", key_len, sizeof(hash_key->key)); return -EINVAL; } diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index 343caf41e709..73b03ce59412 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -124,7 +124,7 @@ struct ena_com_io_cq { /* holds the number of cdesc of the current packet */ u16 cur_rx_pkt_cdesc_count; - /* save the firt cdesc idx of the current packet */ + /* save the first cdesc idx of the current packet */ u16 cur_rx_pkt_cdesc_start_idx; u16 q_depth; diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index d6cc7aa612b7..2fe7ccee55b2 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -251,10 +251,10 @@ static void ena_queue_strings(struct ena_adapter *adapter, u8 **data) for (j = 0; j < ENA_STATS_ARRAY_TX; j++) { ena_stats = &ena_stats_tx_strings[j]; - snprintf(*data, ETH_GSTRING_LEN, - "queue_%u_%s_%s", i, - is_xdp ? "xdp_tx" : "tx", ena_stats->name); - (*data) += ETH_GSTRING_LEN; + ethtool_sprintf(data, + "queue_%u_%s_%s", i, + is_xdp ? "xdp_tx" : "tx", + ena_stats->name); } if (!is_xdp) { @@ -264,9 +264,9 @@ static void ena_queue_strings(struct ena_adapter *adapter, u8 **data) for (j = 0; j < ENA_STATS_ARRAY_RX; j++) { ena_stats = &ena_stats_rx_strings[j]; - snprintf(*data, ETH_GSTRING_LEN, - "queue_%u_rx_%s", i, ena_stats->name); - (*data) += ETH_GSTRING_LEN; + ethtool_sprintf(data, + "queue_%u_rx_%s", i, + ena_stats->name); } } } @@ -280,9 +280,8 @@ static void ena_com_dev_strings(u8 **data) for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) { ena_stats = &ena_stats_ena_com_strings[i]; - snprintf(*data, ETH_GSTRING_LEN, - "ena_admin_q_%s", ena_stats->name); - (*data) += ETH_GSTRING_LEN; + ethtool_sprintf(data, + "ena_admin_q_%s", ena_stats->name); } } @@ -295,15 +294,13 @@ static void ena_get_strings(struct ena_adapter *adapter, for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) { ena_stats = &ena_stats_global_strings[i]; - memcpy(data, ena_stats->name, ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; + ethtool_sprintf(&data, ena_stats->name); } if (eni_stats_needed) { for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) { ena_stats = &ena_stats_eni_strings[i]; - memcpy(data, ena_stats->name, ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; + ethtool_sprintf(&data, ena_stats->name); } } diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 102f2c91fdb8..881f88754bf6 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -300,7 +300,7 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring, rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len); if (unlikely(rc)) - goto error_drop_packet; + return rc; ena_tx_ctx.ena_bufs = tx_info->bufs; ena_tx_ctx.push_header = push_hdr; @@ -330,8 +330,6 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring, error_unmap_dma: ena_unmap_tx_buff(xdp_ring, tx_info); tx_info->xdpf = NULL; -error_drop_packet: - xdp_return_frame(xdpf); return rc; } @@ -339,8 +337,8 @@ static int ena_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { struct ena_adapter *adapter = netdev_priv(dev); - int qid, i, err, drops = 0; struct ena_ring *xdp_ring; + int qid, i, nxmit = 0; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; @@ -360,12 +358,9 @@ static int ena_xdp_xmit(struct net_device *dev, int n, spin_lock(&xdp_ring->xdp_tx_lock); for (i = 0; i < n; i++) { - err = ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0); - /* The descriptor is freed by ena_xdp_xmit_frame in case - * of an error. - */ - if (err) - drops++; + if (ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0)) + break; + nxmit++; } /* Ring doorbell to make device aware of the packets */ @@ -378,7 +373,7 @@ static int ena_xdp_xmit(struct net_device *dev, int n, spin_unlock(&xdp_ring->xdp_tx_lock); /* Return number of packets sent */ - return n - drops; + return nxmit; } static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) @@ -415,7 +410,9 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */ spin_lock(&xdp_ring->xdp_tx_lock); - ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf, XDP_XMIT_FLUSH); + if (ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf, + XDP_XMIT_FLUSH)) + xdp_return_frame(xdpf); spin_unlock(&xdp_ring->xdp_tx_lock); xdp_stat = &rx_ring->rx_stats.xdp_tx; @@ -3978,7 +3975,7 @@ static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev, max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num); max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num); max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num); - /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */ + /* 1 IRQ for mgmnt and 1 IRQs for each IO direction */ max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1); if (unlikely(!max_num_io_queues)) { dev_err(&pdev->dev, "The device doesn't have io queues\n"); diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 960d483e8997..4a1220cc6f10 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -100,19 +100,19 @@ static int amd8111e_read_phy(struct amd8111e_priv *lp, { void __iomem *mmio = lp->mmio; unsigned int reg_val; - unsigned int repeat= REPEAT_CNT; + unsigned int repeat = REPEAT_CNT; reg_val = readl(mmio + PHY_ACCESS); while (reg_val & PHY_CMD_ACTIVE) - reg_val = readl( mmio + PHY_ACCESS ); + reg_val = readl(mmio + PHY_ACCESS); - writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) | - ((reg & 0x1f) << 16), mmio +PHY_ACCESS); - do{ + writel(PHY_RD_CMD | ((phy_id & 0x1f) << 21) | + ((reg & 0x1f) << 16), mmio + PHY_ACCESS); + do { reg_val = readl(mmio + PHY_ACCESS); udelay(30); /* It takes 30 us to read/write data */ } while (--repeat && (reg_val & PHY_CMD_ACTIVE)); - if(reg_val & PHY_RD_ERR) + if (reg_val & PHY_RD_ERR) goto err_phy_read; *val = reg_val & 0xffff; @@ -133,17 +133,17 @@ static int amd8111e_write_phy(struct amd8111e_priv *lp, reg_val = readl(mmio + PHY_ACCESS); while (reg_val & PHY_CMD_ACTIVE) - reg_val = readl( mmio + PHY_ACCESS ); + reg_val = readl(mmio + PHY_ACCESS); - writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) | + writel(PHY_WR_CMD | ((phy_id & 0x1f) << 21) | ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS); - do{ + do { reg_val = readl(mmio + PHY_ACCESS); udelay(30); /* It takes 30 us to read/write the data */ } while (--repeat && (reg_val & PHY_CMD_ACTIVE)); - if(reg_val & PHY_RD_ERR) + if (reg_val & PHY_RD_ERR) goto err_phy_write; return 0; @@ -159,7 +159,7 @@ static int amd8111e_mdio_read(struct net_device *dev, int phy_id, int reg_num) struct amd8111e_priv *lp = netdev_priv(dev); unsigned int reg_val; - amd8111e_read_phy(lp,phy_id,reg_num,®_val); + amd8111e_read_phy(lp, phy_id, reg_num, ®_val); return reg_val; } @@ -179,17 +179,17 @@ static void amd8111e_mdio_write(struct net_device *dev, static void amd8111e_set_ext_phy(struct net_device *dev) { struct amd8111e_priv *lp = netdev_priv(dev); - u32 bmcr,advert,tmp; + u32 bmcr, advert, tmp; /* Determine mii register values to set the speed */ advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE); tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); - switch (lp->ext_phy_option){ + switch (lp->ext_phy_option) { default: case SPEED_AUTONEG: /* advertise all values */ - tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL| - ADVERTISE_100HALF|ADVERTISE_100FULL) ; + tmp |= (ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL); break; case SPEED10_HALF: tmp |= ADVERTISE_10HALF; @@ -224,20 +224,20 @@ static int amd8111e_free_skbs(struct net_device *dev) int i; /* Freeing transmit skbs */ - for(i = 0; i < NUM_TX_BUFFERS; i++){ - if(lp->tx_skbuff[i]){ + for (i = 0; i < NUM_TX_BUFFERS; i++) { + if (lp->tx_skbuff[i]) { dma_unmap_single(&lp->pci_dev->dev, lp->tx_dma_addr[i], lp->tx_skbuff[i]->len, DMA_TO_DEVICE); - dev_kfree_skb (lp->tx_skbuff[i]); + dev_kfree_skb(lp->tx_skbuff[i]); lp->tx_skbuff[i] = NULL; lp->tx_dma_addr[i] = 0; } } /* Freeing previously allocated receive buffers */ - for (i = 0; i < NUM_RX_BUFFERS; i++){ + for (i = 0; i < NUM_RX_BUFFERS; i++) { rx_skbuff = lp->rx_skbuff[i]; - if(rx_skbuff != NULL){ + if (rx_skbuff != NULL) { dma_unmap_single(&lp->pci_dev->dev, lp->rx_dma_addr[i], lp->rx_buff_len - 2, DMA_FROM_DEVICE); @@ -258,13 +258,13 @@ static inline void amd8111e_set_rx_buff_len(struct net_device *dev) struct amd8111e_priv *lp = netdev_priv(dev); unsigned int mtu = dev->mtu; - if (mtu > ETH_DATA_LEN){ + if (mtu > ETH_DATA_LEN) { /* MTU + ethernet header + FCS * + optional VLAN tag + skb reserve space 2 */ lp->rx_buff_len = mtu + ETH_HLEN + 10; lp->options |= OPTION_JUMBO_ENABLE; - } else{ + } else { lp->rx_buff_len = PKT_BUFF_SZ; lp->options &= ~OPTION_JUMBO_ENABLE; } @@ -285,11 +285,11 @@ static int amd8111e_init_ring(struct net_device *dev) lp->tx_ring_idx = 0; - if(lp->opened) + if (lp->opened) /* Free previously allocated transmit and receive skbs */ amd8111e_free_skbs(dev); - else{ + else { /* allocate the tx and rx descriptors */ lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev, sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR, @@ -312,12 +312,12 @@ static int amd8111e_init_ring(struct net_device *dev) lp->rx_skbuff[i] = netdev_alloc_skb(dev, lp->rx_buff_len); if (!lp->rx_skbuff[i]) { - /* Release previos allocated skbs */ - for(--i; i >= 0 ;i--) - dev_kfree_skb(lp->rx_skbuff[i]); - goto err_free_rx_ring; + /* Release previos allocated skbs */ + for (--i; i >= 0; i--) + dev_kfree_skb(lp->rx_skbuff[i]); + goto err_free_rx_ring; } - skb_reserve(lp->rx_skbuff[i],2); + skb_reserve(lp->rx_skbuff[i], 2); } /* Initilaizing receive descriptors */ for (i = 0; i < NUM_RX_BUFFERS; i++) { @@ -375,40 +375,40 @@ static int amd8111e_set_coalesce(struct net_device *dev, enum coal_mode cmod) case RX_INTR_COAL : timeout = coal_conf->rx_timeout; event_count = coal_conf->rx_event_count; - if( timeout > MAX_TIMEOUT || - event_count > MAX_EVENT_COUNT ) + if (timeout > MAX_TIMEOUT || + event_count > MAX_EVENT_COUNT) return -EINVAL; timeout = timeout * DELAY_TIMER_CONV; writel(VAL0|STINTEN, mmio+INTEN0); - writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout, - mmio+DLY_INT_A); + writel((u32)DLY_INT_A_R0 | (event_count << 16) | + timeout, mmio + DLY_INT_A); break; - case TX_INTR_COAL : + case TX_INTR_COAL: timeout = coal_conf->tx_timeout; event_count = coal_conf->tx_event_count; - if( timeout > MAX_TIMEOUT || - event_count > MAX_EVENT_COUNT ) + if (timeout > MAX_TIMEOUT || + event_count > MAX_EVENT_COUNT) return -EINVAL; timeout = timeout * DELAY_TIMER_CONV; - writel(VAL0|STINTEN,mmio+INTEN0); - writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout, - mmio+DLY_INT_B); + writel(VAL0 | STINTEN, mmio + INTEN0); + writel((u32)DLY_INT_B_T0 | (event_count << 16) | + timeout, mmio + DLY_INT_B); break; case DISABLE_COAL: - writel(0,mmio+STVAL); - writel(STINTEN, mmio+INTEN0); - writel(0, mmio +DLY_INT_B); - writel(0, mmio+DLY_INT_A); + writel(0, mmio + STVAL); + writel(STINTEN, mmio + INTEN0); + writel(0, mmio + DLY_INT_B); + writel(0, mmio + DLY_INT_A); break; case ENABLE_COAL: /* Start the timer */ - writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /* 0.5 sec */ - writel(VAL0|STINTEN, mmio+INTEN0); + writel((u32)SOFT_TIMER_FREQ, mmio + STVAL); /* 0.5 sec */ + writel(VAL0 | STINTEN, mmio + INTEN0); break; default: break; @@ -423,67 +423,67 @@ static int amd8111e_restart(struct net_device *dev) { struct amd8111e_priv *lp = netdev_priv(dev); void __iomem *mmio = lp->mmio; - int i,reg_val; + int i, reg_val; /* stop the chip */ writel(RUN, mmio + CMD0); - if(amd8111e_init_ring(dev)) + if (amd8111e_init_ring(dev)) return -ENOMEM; /* enable the port manager and set auto negotiation always */ - writel((u32) VAL1|EN_PMGR, mmio + CMD3 ); - writel((u32)XPHYANE|XPHYRST , mmio + CTRL2); + writel((u32)VAL1 | EN_PMGR, mmio + CMD3); + writel((u32)XPHYANE | XPHYRST, mmio + CTRL2); amd8111e_set_ext_phy(dev); /* set control registers */ reg_val = readl(mmio + CTRL1); reg_val &= ~XMTSP_MASK; - writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 ); + writel(reg_val | XMTSP_128 | CACHE_ALIGN, mmio + CTRL1); /* enable interrupt */ - writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN | + writel(APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN | APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN | SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0); writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0); /* initialize tx and rx ring base addresses */ - writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0); - writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0); + writel((u32)lp->tx_ring_dma_addr, mmio + XMT_RING_BASE_ADDR0); + writel((u32)lp->rx_ring_dma_addr, mmio + RCV_RING_BASE_ADDR0); writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0); writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0); /* set default IPG to 96 */ - writew((u32)DEFAULT_IPG,mmio+IPG); + writew((u32)DEFAULT_IPG, mmio + IPG); writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1); - if(lp->options & OPTION_JUMBO_ENABLE){ + if (lp->options & OPTION_JUMBO_ENABLE) { writel((u32)VAL2|JUMBO, mmio + CMD3); /* Reset REX_UFLO */ - writel( REX_UFLO, mmio + CMD2); + writel(REX_UFLO, mmio + CMD2); /* Should not set REX_UFLO for jumbo frames */ - writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2); - }else{ - writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2); + writel(VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2); + } else { + writel(VAL0 | APAD_XMT | REX_RTRY | REX_UFLO, mmio + CMD2); writel((u32)JUMBO, mmio + CMD3); } #if AMD8111E_VLAN_TAG_USED - writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3); + writel((u32)VAL2 | VSIZE | VL_TAG_DEL, mmio + CMD3); #endif - writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 ); + writel(VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2); /* Setting the MAC address to the device */ for (i = 0; i < ETH_ALEN; i++) - writeb( dev->dev_addr[i], mmio + PADR + i ); + writeb(dev->dev_addr[i], mmio + PADR + i); /* Enable interrupt coalesce */ - if(lp->options & OPTION_INTR_COAL_ENABLE){ + if (lp->options & OPTION_INTR_COAL_ENABLE) { netdev_info(dev, "Interrupt Coalescing Enabled.\n"); - amd8111e_set_coalesce(dev,ENABLE_COAL); + amd8111e_set_coalesce(dev, ENABLE_COAL); } /* set RUN bit to start the chip */ @@ -499,11 +499,11 @@ static int amd8111e_restart(struct net_device *dev) static void amd8111e_init_hw_default(struct amd8111e_priv *lp) { unsigned int reg_val; - unsigned int logic_filter[2] ={0,}; + unsigned int logic_filter[2] = {0,}; void __iomem *mmio = lp->mmio; - /* stop the chip */ + /* stop the chip */ writel(RUN, mmio + CMD0); /* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */ @@ -519,13 +519,13 @@ static void amd8111e_init_hw_default(struct amd8111e_priv *lp) writel(0, mmio + XMT_RING_BASE_ADDR3); /* Clear CMD0 */ - writel(CMD0_CLEAR,mmio + CMD0); + writel(CMD0_CLEAR, mmio + CMD0); /* Clear CMD2 */ - writel(CMD2_CLEAR, mmio +CMD2); + writel(CMD2_CLEAR, mmio + CMD2); /* Clear CMD7 */ - writel(CMD7_CLEAR , mmio + CMD7); + writel(CMD7_CLEAR, mmio + CMD7); /* Clear DLY_INT_A and DLY_INT_B */ writel(0x0, mmio + DLY_INT_A); @@ -542,16 +542,16 @@ static void amd8111e_init_hw_default(struct amd8111e_priv *lp) writel(0x0, mmio + STVAL); /* Clear INTEN0 */ - writel( INTEN0_CLEAR, mmio + INTEN0); + writel(INTEN0_CLEAR, mmio + INTEN0); /* Clear LADRF */ - writel(0x0 , mmio + LADRF); + writel(0x0, mmio + LADRF); /* Set SRAM_SIZE & SRAM_BOUNDARY registers */ - writel( 0x80010,mmio + SRAM_SIZE); + writel(0x80010, mmio + SRAM_SIZE); /* Clear RCV_RING0_LEN */ - writel(0x0, mmio + RCV_RING_LEN0); + writel(0x0, mmio + RCV_RING_LEN0); /* Clear XMT_RING0/1/2/3_LEN */ writel(0x0, mmio + XMT_RING_LEN0); @@ -571,10 +571,10 @@ static void amd8111e_init_hw_default(struct amd8111e_priv *lp) /* SRAM_SIZE register */ reg_val = readl(mmio + SRAM_SIZE); - if(lp->options & OPTION_JUMBO_ENABLE) - writel( VAL2|JUMBO, mmio + CMD3); + if (lp->options & OPTION_JUMBO_ENABLE) + writel(VAL2 | JUMBO, mmio + CMD3); #if AMD8111E_VLAN_TAG_USED - writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 ); + writel(VAL2 | VSIZE | VL_TAG_DEL, mmio + CMD3); #endif /* Set default value to CTRL1 Register */ writel(CTRL1_DEFAULT, mmio + CTRL1); @@ -616,14 +616,14 @@ static void amd8111e_stop_chip(struct amd8111e_priv *lp) static void amd8111e_free_ring(struct amd8111e_priv *lp) { /* Free transmit and receive descriptor rings */ - if(lp->rx_ring){ + if (lp->rx_ring) { dma_free_coherent(&lp->pci_dev->dev, sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR, lp->rx_ring, lp->rx_ring_dma_addr); lp->rx_ring = NULL; } - if(lp->tx_ring){ + if (lp->tx_ring) { dma_free_coherent(&lp->pci_dev->dev, sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR, lp->tx_ring, lp->tx_ring_dma_addr); @@ -643,11 +643,11 @@ static int amd8111e_tx(struct net_device *dev) int tx_index; int status; /* Complete all the transmit packet */ - while (lp->tx_complete_idx != lp->tx_idx){ + while (lp->tx_complete_idx != lp->tx_idx) { tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK; status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags); - if(status & OWN_BIT) + if (status & OWN_BIT) break; /* It still hasn't been Txed */ lp->tx_ring[tx_index].buff_phy_addr = 0; @@ -669,10 +669,10 @@ static int amd8111e_tx(struct net_device *dev) le16_to_cpu(lp->tx_ring[tx_index].buff_count); if (netif_queue_stopped(dev) && - lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){ + lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS + 2) { /* The ring is no longer full, clear tbusy. */ /* lp->tx_full = 0; */ - netif_wake_queue (dev); + netif_wake_queue(dev); } } return 0; @@ -685,7 +685,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget) struct net_device *dev = lp->amd8111e_net_dev; int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK; void __iomem *mmio = lp->mmio; - struct sk_buff *skb,*new_skb; + struct sk_buff *skb, *new_skb; int min_pkt_len, status; int num_rx_pkt = 0; short pkt_len; @@ -710,7 +710,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget) goto err_next_pkt; } /* check for STP and ENP */ - if (!((status & STP_BIT) && (status & ENP_BIT))){ + if (!((status & STP_BIT) && (status & ENP_BIT))) { /* resetting flags */ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; goto err_next_pkt; @@ -755,7 +755,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget) skb->protocol = eth_type_trans(skb, dev); #if AMD8111E_VLAN_TAG_USED - if (vtag == TT_VLAN_TAGGED){ + if (vtag == TT_VLAN_TAGGED) { u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); } @@ -793,25 +793,25 @@ err_next_pkt: static int amd8111e_link_change(struct net_device *dev) { struct amd8111e_priv *lp = netdev_priv(dev); - int status0,speed; + int status0, speed; /* read the link change */ - status0 = readl(lp->mmio + STAT0); + status0 = readl(lp->mmio + STAT0); - if(status0 & LINK_STATS){ - if(status0 & AUTONEG_COMPLETE) + if (status0 & LINK_STATS) { + if (status0 & AUTONEG_COMPLETE) lp->link_config.autoneg = AUTONEG_ENABLE; else lp->link_config.autoneg = AUTONEG_DISABLE; - if(status0 & FULL_DPLX) + if (status0 & FULL_DPLX) lp->link_config.duplex = DUPLEX_FULL; else lp->link_config.duplex = DUPLEX_HALF; speed = (status0 & SPEED_MASK) >> 7; - if(speed == PHY_SPEED_10) + if (speed == PHY_SPEED_10) lp->link_config.speed = SPEED_10; - else if(speed == PHY_SPEED_100) + else if (speed == PHY_SPEED_100) lp->link_config.speed = SPEED_100; netdev_info(dev, "Link is Up. Speed is %s Mbps %s Duplex\n", @@ -821,8 +821,7 @@ static int amd8111e_link_change(struct net_device *dev) "Full" : "Half"); netif_carrier_on(dev); - } - else{ + } else { lp->link_config.speed = SPEED_INVALID; lp->link_config.duplex = DUPLEX_INVALID; lp->link_config.autoneg = AUTONEG_INVALID; @@ -840,7 +839,7 @@ static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER) unsigned int data; unsigned int repeat = REPEAT_CNT; - writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR); + writew(MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR); do { status = readw(mmio + MIB_ADDR); udelay(2); /* controller takes MAX 2 us to get mib data */ @@ -863,7 +862,7 @@ static struct net_device_stats *amd8111e_get_stats(struct net_device *dev) if (!lp->opened) return new_stats; - spin_lock_irqsave (&lp->lock, flags); + spin_lock_irqsave(&lp->lock, flags); /* stats.rx_packets */ new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+ @@ -943,7 +942,7 @@ static struct net_device_stats *amd8111e_get_stats(struct net_device *dev) /* Reset the mibs for collecting new statistics */ /* writew(MIB_CLEAR, mmio + MIB_ADDR);*/ - spin_unlock_irqrestore (&lp->lock, flags); + spin_unlock_irqrestore(&lp->lock, flags); return new_stats; } @@ -974,96 +973,90 @@ static int amd8111e_calc_coalesce(struct net_device *dev) rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes; coal_conf->rx_prev_bytes = coal_conf->rx_bytes; - if(rx_pkt_rate < 800){ - if(coal_conf->rx_coal_type != NO_COALESCE){ + if (rx_pkt_rate < 800) { + if (coal_conf->rx_coal_type != NO_COALESCE) { coal_conf->rx_timeout = 0x0; coal_conf->rx_event_count = 0; - amd8111e_set_coalesce(dev,RX_INTR_COAL); + amd8111e_set_coalesce(dev, RX_INTR_COAL); coal_conf->rx_coal_type = NO_COALESCE; } - } - else{ + } else { rx_pkt_size = rx_data_rate/rx_pkt_rate; - if (rx_pkt_size < 128){ - if(coal_conf->rx_coal_type != NO_COALESCE){ + if (rx_pkt_size < 128) { + if (coal_conf->rx_coal_type != NO_COALESCE) { coal_conf->rx_timeout = 0; coal_conf->rx_event_count = 0; - amd8111e_set_coalesce(dev,RX_INTR_COAL); + amd8111e_set_coalesce(dev, RX_INTR_COAL); coal_conf->rx_coal_type = NO_COALESCE; } - } - else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){ + } else if ((rx_pkt_size >= 128) && (rx_pkt_size < 512)) { - if(coal_conf->rx_coal_type != LOW_COALESCE){ + if (coal_conf->rx_coal_type != LOW_COALESCE) { coal_conf->rx_timeout = 1; coal_conf->rx_event_count = 4; - amd8111e_set_coalesce(dev,RX_INTR_COAL); + amd8111e_set_coalesce(dev, RX_INTR_COAL); coal_conf->rx_coal_type = LOW_COALESCE; } - } - else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){ + } else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)) { - if(coal_conf->rx_coal_type != MEDIUM_COALESCE){ + if (coal_conf->rx_coal_type != MEDIUM_COALESCE) { coal_conf->rx_timeout = 1; coal_conf->rx_event_count = 4; - amd8111e_set_coalesce(dev,RX_INTR_COAL); + amd8111e_set_coalesce(dev, RX_INTR_COAL); coal_conf->rx_coal_type = MEDIUM_COALESCE; } - } - else if(rx_pkt_size >= 1024){ - if(coal_conf->rx_coal_type != HIGH_COALESCE){ + } else if (rx_pkt_size >= 1024) { + + if (coal_conf->rx_coal_type != HIGH_COALESCE) { coal_conf->rx_timeout = 2; coal_conf->rx_event_count = 3; - amd8111e_set_coalesce(dev,RX_INTR_COAL); + amd8111e_set_coalesce(dev, RX_INTR_COAL); coal_conf->rx_coal_type = HIGH_COALESCE; } } } - /* NOW FOR TX INTR COALESC */ - if(tx_pkt_rate < 800){ - if(coal_conf->tx_coal_type != NO_COALESCE){ + /* NOW FOR TX INTR COALESC */ + if (tx_pkt_rate < 800) { + if (coal_conf->tx_coal_type != NO_COALESCE) { coal_conf->tx_timeout = 0x0; coal_conf->tx_event_count = 0; - amd8111e_set_coalesce(dev,TX_INTR_COAL); + amd8111e_set_coalesce(dev, TX_INTR_COAL); coal_conf->tx_coal_type = NO_COALESCE; } - } - else{ + } else { tx_pkt_size = tx_data_rate/tx_pkt_rate; - if (tx_pkt_size < 128){ + if (tx_pkt_size < 128) { - if(coal_conf->tx_coal_type != NO_COALESCE){ + if (coal_conf->tx_coal_type != NO_COALESCE) { coal_conf->tx_timeout = 0; coal_conf->tx_event_count = 0; - amd8111e_set_coalesce(dev,TX_INTR_COAL); + amd8111e_set_coalesce(dev, TX_INTR_COAL); coal_conf->tx_coal_type = NO_COALESCE; } - } - else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){ + } else if ((tx_pkt_size >= 128) && (tx_pkt_size < 512)) { - if(coal_conf->tx_coal_type != LOW_COALESCE){ + if (coal_conf->tx_coal_type != LOW_COALESCE) { coal_conf->tx_timeout = 1; coal_conf->tx_event_count = 2; - amd8111e_set_coalesce(dev,TX_INTR_COAL); + amd8111e_set_coalesce(dev, TX_INTR_COAL); coal_conf->tx_coal_type = LOW_COALESCE; } - } - else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){ + } else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)) { - if(coal_conf->tx_coal_type != MEDIUM_COALESCE){ + if (coal_conf->tx_coal_type != MEDIUM_COALESCE) { coal_conf->tx_timeout = 2; coal_conf->tx_event_count = 5; - amd8111e_set_coalesce(dev,TX_INTR_COAL); + amd8111e_set_coalesce(dev, TX_INTR_COAL); coal_conf->tx_coal_type = MEDIUM_COALESCE; } } else if (tx_pkt_size >= 1024) { @@ -1091,7 +1084,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id) unsigned int intr0, intren0; unsigned int handled = 1; - if(unlikely(dev == NULL)) + if (unlikely(dev == NULL)) return IRQ_NONE; spin_lock(&lp->lock); @@ -1105,7 +1098,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id) /* Process all the INT event until INTR bit is clear. */ - if (!(intr0 & INTR)){ + if (!(intr0 & INTR)) { handled = 0; goto err_no_interrupt; } @@ -1140,7 +1133,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id) amd8111e_calc_coalesce(dev); err_no_interrupt: - writel( VAL0 | INTREN,mmio + CMD0); + writel(VAL0 | INTREN, mmio + CMD0); spin_unlock(&lp->lock); @@ -1180,7 +1173,7 @@ static int amd8111e_close(struct net_device *dev) netif_carrier_off(lp->amd8111e_net_dev); /* Delete ipg timer */ - if(lp->options & OPTION_DYN_IPG_ENABLE) + if (lp->options & OPTION_DYN_IPG_ENABLE) del_timer_sync(&lp->ipg_data.ipg_timer); spin_unlock_irq(&lp->lock); @@ -1200,8 +1193,8 @@ static int amd8111e_open(struct net_device *dev) { struct amd8111e_priv *lp = netdev_priv(dev); - if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, IRQF_SHARED, - dev->name, dev)) + if (dev->irq == 0 || request_irq(dev->irq, amd8111e_interrupt, + IRQF_SHARED, dev->name, dev)) return -EAGAIN; napi_enable(&lp->napi); @@ -1210,7 +1203,7 @@ static int amd8111e_open(struct net_device *dev) amd8111e_init_hw_default(lp); - if(amd8111e_restart(dev)){ + if (amd8111e_restart(dev)) { spin_unlock_irq(&lp->lock); napi_disable(&lp->napi); if (dev->irq) @@ -1218,7 +1211,7 @@ static int amd8111e_open(struct net_device *dev) return -ENOMEM; } /* Start ipg timer */ - if(lp->options & OPTION_DYN_IPG_ENABLE){ + if (lp->options & OPTION_DYN_IPG_ENABLE) { add_timer(&lp->ipg_data.ipg_timer); netdev_info(dev, "Dynamic IPG Enabled\n"); } @@ -1289,10 +1282,10 @@ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb, lp->tx_idx++; /* Trigger an immediate send poll. */ - writel( VAL1 | TDMD0, lp->mmio + CMD0); - writel( VAL2 | RDMD0,lp->mmio + CMD0); + writel(VAL1 | TDMD0, lp->mmio + CMD0); + writel(VAL2 | RDMD0, lp->mmio + CMD0); - if(amd8111e_tx_queue_avail(lp) < 0){ + if (amd8111e_tx_queue_avail(lp) < 0) { netif_stop_queue(dev); } spin_unlock_irqrestore(&lp->lock, flags); @@ -1326,15 +1319,15 @@ static void amd8111e_set_multicast_list(struct net_device *dev) { struct netdev_hw_addr *ha; struct amd8111e_priv *lp = netdev_priv(dev); - u32 mc_filter[2] ; + u32 mc_filter[2]; int bit_num; - if(dev->flags & IFF_PROMISC){ - writel( VAL2 | PROM, lp->mmio + CMD2); + if (dev->flags & IFF_PROMISC) { + writel(VAL2 | PROM, lp->mmio + CMD2); return; } else - writel( PROM, lp->mmio + CMD2); + writel(PROM, lp->mmio + CMD2); if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > MAX_FILTER_SIZE) { /* get all multicast packet */ @@ -1439,7 +1432,7 @@ static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_ if (wol_info->wolopts & WAKE_MAGIC) lp->options |= (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE); - else if(wol_info->wolopts & WAKE_PHY) + else if (wol_info->wolopts & WAKE_PHY) lp->options |= (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE); else @@ -1464,14 +1457,14 @@ static const struct ethtool_ops ops = { * gets/sets driver speed, gets memory mapped register values, forces * auto negotiation, sets/gets WOL options for ethtool application. */ -static int amd8111e_ioctl(struct net_device *dev , struct ifreq *ifr, int cmd) +static int amd8111e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mii_ioctl_data *data = if_mii(ifr); struct amd8111e_priv *lp = netdev_priv(dev); int err; u32 mii_regval; - switch(cmd) { + switch (cmd) { case SIOCGMIIPHY: data->phy_id = lp->ext_phy_addr; @@ -1511,7 +1504,7 @@ static int amd8111e_set_mac_address(struct net_device *dev, void *p) spin_lock_irq(&lp->lock); /* Setting the MAC address to the device */ for (i = 0; i < ETH_ALEN; i++) - writeb( dev->dev_addr[i], lp->mmio + PADR + i ); + writeb(dev->dev_addr[i], lp->mmio + PADR + i); spin_unlock_irq(&lp->lock); @@ -1536,22 +1529,22 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu) spin_lock_irq(&lp->lock); - /* stop the chip */ + /* stop the chip */ writel(RUN, lp->mmio + CMD0); dev->mtu = new_mtu; err = amd8111e_restart(dev); spin_unlock_irq(&lp->lock); - if(!err) + if (!err) netif_start_queue(dev); return err; } static int amd8111e_enable_magicpkt(struct amd8111e_priv *lp) { - writel( VAL1|MPPLBA, lp->mmio + CMD3); - writel( VAL0|MPEN_SW, lp->mmio + CMD7); + writel(VAL1 | MPPLBA, lp->mmio + CMD3); + writel(VAL0 | MPEN_SW, lp->mmio + CMD7); /* To eliminate PCI posting bug */ readl(lp->mmio + CMD7); @@ -1562,7 +1555,7 @@ static int amd8111e_enable_link_change(struct amd8111e_priv *lp) { /* Adapter is already stoped/suspended/interrupt-disabled */ - writel(VAL0|LCMODE_SW,lp->mmio + CMD7); + writel(VAL0 | LCMODE_SW, lp->mmio + CMD7); /* To eliminate PCI posting bug */ readl(lp->mmio + CMD7); @@ -1584,7 +1577,7 @@ static void amd8111e_tx_timeout(struct net_device *dev, unsigned int txqueue) spin_lock_irq(&lp->lock); err = amd8111e_restart(dev); spin_unlock_irq(&lp->lock); - if(!err) + if (!err) netif_wake_queue(dev); } @@ -1605,22 +1598,21 @@ static int __maybe_unused amd8111e_suspend(struct device *dev_d) /* stop chip */ spin_lock_irq(&lp->lock); - if(lp->options & OPTION_DYN_IPG_ENABLE) + if (lp->options & OPTION_DYN_IPG_ENABLE) del_timer_sync(&lp->ipg_data.ipg_timer); amd8111e_stop_chip(lp); spin_unlock_irq(&lp->lock); - if(lp->options & OPTION_WOL_ENABLE){ + if (lp->options & OPTION_WOL_ENABLE) { /* enable wol */ - if(lp->options & OPTION_WAKE_MAGIC_ENABLE) + if (lp->options & OPTION_WAKE_MAGIC_ENABLE) amd8111e_enable_magicpkt(lp); - if(lp->options & OPTION_WAKE_PHY_ENABLE) + if (lp->options & OPTION_WAKE_PHY_ENABLE) amd8111e_enable_link_change(lp); device_set_wakeup_enable(dev_d, 1); - } - else{ + } else { device_set_wakeup_enable(dev_d, 0); } @@ -1640,7 +1632,7 @@ static int __maybe_unused amd8111e_resume(struct device *dev_d) spin_lock_irq(&lp->lock); amd8111e_restart(dev); /* Restart ipg timer */ - if(lp->options & OPTION_DYN_IPG_ENABLE) + if (lp->options & OPTION_DYN_IPG_ENABLE) mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES); spin_unlock_irq(&lp->lock); @@ -1657,14 +1649,14 @@ static void amd8111e_config_ipg(struct timer_list *t) unsigned int total_col_cnt; unsigned int tmp_ipg; - if(lp->link_config.duplex == DUPLEX_FULL){ + if (lp->link_config.duplex == DUPLEX_FULL) { ipg_data->ipg = DEFAULT_IPG; return; } - if(ipg_data->ipg_state == SSTATE){ + if (ipg_data->ipg_state == SSTATE) { - if(ipg_data->timer_tick == IPG_STABLE_TIME){ + if (ipg_data->timer_tick == IPG_STABLE_TIME) { ipg_data->timer_tick = 0; ipg_data->ipg = MIN_IPG - IPG_STEP; @@ -1676,7 +1668,7 @@ static void amd8111e_config_ipg(struct timer_list *t) ipg_data->timer_tick++; } - if(ipg_data->ipg_state == CSTATE){ + if (ipg_data->ipg_state == CSTATE) { /* Get the current collision count */ @@ -1684,10 +1676,10 @@ static void amd8111e_config_ipg(struct timer_list *t) amd8111e_read_mib(mmio, xmt_collisions); if ((total_col_cnt - prev_col_cnt) < - (ipg_data->diff_col_cnt)){ + (ipg_data->diff_col_cnt)) { ipg_data->diff_col_cnt = - total_col_cnt - prev_col_cnt ; + total_col_cnt - prev_col_cnt; ipg_data->ipg = ipg_data->current_ipg; } @@ -1696,7 +1688,7 @@ static void amd8111e_config_ipg(struct timer_list *t) if (ipg_data->current_ipg <= MAX_IPG) tmp_ipg = ipg_data->current_ipg; - else{ + else { tmp_ipg = ipg_data->ipg; ipg_data->ipg_state = SSTATE; } @@ -1748,24 +1740,24 @@ static int amd8111e_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int err, i; - unsigned long reg_addr,reg_len; + unsigned long reg_addr, reg_len; struct amd8111e_priv *lp; struct net_device *dev; err = pci_enable_device(pdev); - if(err){ + if (err) { dev_err(&pdev->dev, "Cannot enable new PCI device\n"); return err; } - if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){ + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "Cannot find PCI base address\n"); err = -ENODEV; goto err_disable_pdev; } err = pci_request_regions(pdev, MODULE_NAME); - if(err){ + if (err) { dev_err(&pdev->dev, "Cannot obtain PCI resources\n"); goto err_disable_pdev; } @@ -1798,7 +1790,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev, SET_NETDEV_DEV(dev, &pdev->dev); #if AMD8111E_VLAN_TAG_USED - dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX ; + dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; #endif lp = netdev_priv(dev); @@ -1821,16 +1813,16 @@ static int amd8111e_probe_one(struct pci_dev *pdev, /* Setting user defined parametrs */ lp->ext_phy_option = speed_duplex[card_idx]; - if(coalesce[card_idx]) + if (coalesce[card_idx]) lp->options |= OPTION_INTR_COAL_ENABLE; - if(dynamic_ipg[card_idx++]) + if (dynamic_ipg[card_idx++]) lp->options |= OPTION_DYN_IPG_ENABLE; /* Initialize driver entry points */ dev->netdev_ops = &amd8111e_netdev_ops; dev->ethtool_ops = &ops; - dev->irq =pdev->irq; + dev->irq = pdev->irq; dev->watchdog_timeo = AMD8111E_TX_TIMEOUT; dev->min_mtu = AMD8111E_MIN_MTU; dev->max_mtu = AMD8111E_MAX_MTU; @@ -1861,7 +1853,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev, pci_set_drvdata(pdev, dev); /* Initialize software ipg timer */ - if(lp->options & OPTION_DYN_IPG_ENABLE){ + if (lp->options & OPTION_DYN_IPG_ENABLE) { timer_setup(&lp->ipg_data.ipg_timer, amd8111e_config_ipg, 0); lp->ipg_data.ipg_timer.expires = jiffies + IPG_CONVERGE_JIFFIES; @@ -1870,7 +1862,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev, } /* display driver and device information */ - chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28; + chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000) >> 28; dev_info(&pdev->dev, "[ Rev %x ] PCI 10/100BaseT Ethernet %pM\n", chip_version, dev->dev_addr); if (lp->ext_phy_id) @@ -1879,7 +1871,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev, else dev_info(&pdev->dev, "Couldn't detect MII PHY, assuming address 0x01\n"); - return 0; + return 0; err_free_dev: free_netdev(dev); @@ -1919,7 +1911,7 @@ MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl); static SIMPLE_DEV_PM_OPS(amd8111e_pm_ops, amd8111e_suspend, amd8111e_resume); static struct pci_driver amd8111e_driver = { - .name = MODULE_NAME, + .name = MODULE_NAME, .id_table = amd8111e_pci_tbl, .probe = amd8111e_probe_one, .remove = amd8111e_remove_one, diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c index e10aceb2b767..6784f8748638 100644 --- a/drivers/net/ethernet/amd/hplance.c +++ b/drivers/net/ethernet/amd/hplance.c @@ -170,6 +170,7 @@ static void hplance_init(struct net_device *dev, struct dio_dev *d) static void hplance_writerap(void *priv, unsigned short value) { struct lance_private *lp = (struct lance_private *)priv; + do { out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value); } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); @@ -178,6 +179,7 @@ static void hplance_writerap(void *priv, unsigned short value) static void hplance_writerdp(void *priv, unsigned short value) { struct lance_private *lp = (struct lance_private *)priv; + do { out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value); } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); @@ -187,6 +189,7 @@ static unsigned short hplance_readrdp(void *priv) { struct lance_private *lp = (struct lance_private *)priv; __u16 value; + do { value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP); } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index f78daba60b35..aa412506832d 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -2853,8 +2853,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose) netif_info(lp, link, dev, "link down\n"); } if (lp->phycount > 1) { - curr_link = pcnet32_check_otherphy(dev); - prev_link = 0; + pcnet32_check_otherphy(dev); } } else if (verbose || !prev_link) { netif_carrier_on(dev); diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index b56a9e2aecd9..67b8113a2b53 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -857,7 +857,6 @@ int arc_emac_probe(struct net_device *ndev, int interface) struct device_node *phy_node; struct phy_device *phydev = NULL; struct arc_emac_priv *priv; - const char *mac_addr; unsigned int id, clock_frequency, irq; int err; @@ -942,11 +941,8 @@ int arc_emac_probe(struct net_device *ndev, int interface) } /* Get MAC address from device tree */ - mac_addr = of_get_mac_address(dev->of_node); - - if (!IS_ERR(mac_addr)) - ether_addr_copy(ndev->dev_addr, mac_addr); - else + err = of_get_mac_address(dev->of_node, ndev->dev_addr); + if (err) eth_hw_addr_random(ndev); arc_emac_set_address_internal(ndev); diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig index fb803bf92ded..482c58c4c584 100644 --- a/drivers/net/ethernet/atheros/Kconfig +++ b/drivers/net/ethernet/atheros/Kconfig @@ -21,6 +21,7 @@ config AG71XX tristate "Atheros AR7XXX/AR9XXX built-in ethernet mac support" depends on ATH79 select PHYLINK + imply NET_SELFTESTS help If you wish to compile a kernel for AR7XXX/91XXX and enable ethernet support, then you should always answer Y to this. diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index a60ce9030581..1ba81b1eb6fd 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -37,6 +37,7 @@ #include <linux/reset.h> #include <linux/clk.h> #include <linux/io.h> +#include <net/selftests.h> /* For our NAPI weight bigger does *NOT* mean better - it means more * D-cache misses and lots more wasted cycles than we'll ever @@ -497,12 +498,17 @@ static int ag71xx_ethtool_set_pauseparam(struct net_device *ndev, static void ag71xx_ethtool_get_strings(struct net_device *netdev, u32 sset, u8 *data) { - if (sset == ETH_SS_STATS) { - int i; + int i; + switch (sset) { + case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++) memcpy(data + i * ETH_GSTRING_LEN, ag71xx_statistics[i].name, ETH_GSTRING_LEN); + break; + case ETH_SS_TEST: + net_selftest_get_strings(data); + break; } } @@ -519,9 +525,14 @@ static void ag71xx_ethtool_get_stats(struct net_device *ndev, static int ag71xx_ethtool_get_sset_count(struct net_device *ndev, int sset) { - if (sset == ETH_SS_STATS) + switch (sset) { + case ETH_SS_STATS: return ARRAY_SIZE(ag71xx_statistics); - return -EOPNOTSUPP; + case ETH_SS_TEST: + return net_selftest_get_count(); + default: + return -EOPNOTSUPP; + } } static const struct ethtool_ops ag71xx_ethtool_ops = { @@ -536,6 +547,7 @@ static const struct ethtool_ops ag71xx_ethtool_ops = { .get_strings = ag71xx_ethtool_get_strings, .get_ethtool_stats = ag71xx_ethtool_get_stats, .get_sset_count = ag71xx_ethtool_get_sset_count, + .self_test = net_selftest, }; static int ag71xx_mdio_wait_busy(struct ag71xx *ag) @@ -1658,9 +1670,9 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit) struct net_device *ndev = ag->ndev; int ring_mask, ring_size, done = 0; unsigned int pktlen_mask, offset; - struct sk_buff *next, *skb; struct ag71xx_ring *ring; struct list_head rx_list; + struct sk_buff *skb; ring = &ag->rx_ring; pktlen_mask = ag->dcfg->desc_pktlen_mask; @@ -1725,7 +1737,7 @@ next: ag71xx_ring_rx_refill(ag); - list_for_each_entry_safe(skb, next, &rx_list, list) + list_for_each_entry(skb, &rx_list, list) skb->protocol = eth_type_trans(skb, ndev); netif_receive_skb_list(&rx_list); @@ -1856,7 +1868,6 @@ static int ag71xx_probe(struct platform_device *pdev) const struct ag71xx_dcfg *dcfg; struct net_device *ndev; struct resource *res; - const void *mac_addr; int tx_size, err, i; struct ag71xx *ag; @@ -1957,10 +1968,8 @@ static int ag71xx_probe(struct platform_device *pdev) ag->stop_desc->ctrl = 0; ag->stop_desc->next = (u32)ag->stop_desc_dma; - mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) - memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); - if (IS_ERR(mac_addr) || !is_valid_ether_addr(ndev->dev_addr)) { + err = of_get_mac_address(np, ndev->dev_addr); + if (err) { netif_err(ag, probe, ndev, "invalid MAC address, using random address\n"); eth_random_addr(ndev->dev_addr); } diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h index a0562a90fb6d..28ae5c16831e 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c.h +++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h @@ -367,6 +367,7 @@ struct atl1c_hw { u16 phy_id1; u16 phy_id2; + spinlock_t intr_mask_lock; /* protect the intr_mask */ u32 intr_mask; u8 preamble_len; @@ -506,6 +507,7 @@ struct atl1c_adapter { struct net_device *netdev; struct pci_dev *pdev; struct napi_struct napi; + struct napi_struct tx_napi; struct page *rx_page; unsigned int rx_page_offset; unsigned int rx_frag_size; diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 3f65f2b370c5..1d17c24e6d75 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -47,7 +47,7 @@ static void atl1c_down(struct atl1c_adapter *adapter); static int atl1c_reset_mac(struct atl1c_hw *hw); static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter); static int atl1c_configure(struct atl1c_adapter *adapter); -static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter); +static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, bool napi_mode); static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | @@ -470,7 +470,7 @@ static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter, adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ? roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE; - head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD) + + head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD + NET_IP_ALIGN) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); adapter->rx_frag_size = roundup_pow_of_two(head_size); } @@ -813,6 +813,7 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter) atl1c_set_rxbufsize(adapter, adapter->netdev); atomic_set(&adapter->irq_sem, 1); spin_lock_init(&adapter->mdio_lock); + spin_lock_init(&adapter->hw.intr_mask_lock); set_bit(__AT_DOWN, &adapter->flags); return 0; @@ -1434,7 +1435,7 @@ static int atl1c_configure(struct atl1c_adapter *adapter) atl1c_set_multi(netdev); atl1c_restore_vlan(adapter); - num = atl1c_alloc_rx_buffer(adapter); + num = atl1c_alloc_rx_buffer(adapter, false); if (unlikely(num == 0)) return -ENOMEM; @@ -1530,20 +1531,19 @@ static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter) spin_unlock(&adapter->mdio_lock); } -static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter, - enum atl1c_trans_queue type) +static int atl1c_clean_tx(struct napi_struct *napi, int budget) { - struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; + struct atl1c_adapter *adapter = + container_of(napi, struct atl1c_adapter, tx_napi); + struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[atl1c_trans_normal]; struct atl1c_buffer *buffer_info; struct pci_dev *pdev = adapter->pdev; u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); u16 hw_next_to_clean; - u16 reg; unsigned int total_bytes = 0, total_packets = 0; + unsigned long flags; - reg = type == atl1c_trans_high ? REG_TPD_PRI1_CIDX : REG_TPD_PRI0_CIDX; - - AT_READ_REGW(&adapter->hw, reg, &hw_next_to_clean); + AT_READ_REGW(&adapter->hw, REG_TPD_PRI0_CIDX, &hw_next_to_clean); while (next_to_clean != hw_next_to_clean) { buffer_info = &tpd_ring->buffer_info[next_to_clean]; @@ -1564,7 +1564,15 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter, netif_wake_queue(adapter->netdev); } - return true; + if (total_packets < budget) { + napi_complete_done(napi, total_packets); + spin_lock_irqsave(&adapter->hw.intr_mask_lock, flags); + adapter->hw.intr_mask |= ISR_TX_PKT; + AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask); + spin_unlock_irqrestore(&adapter->hw.intr_mask_lock, flags); + return total_packets; + } + return budget; } /** @@ -1599,13 +1607,22 @@ static irqreturn_t atl1c_intr(int irq, void *data) AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT); if (status & ISR_RX_PKT) { if (likely(napi_schedule_prep(&adapter->napi))) { + spin_lock(&hw->intr_mask_lock); hw->intr_mask &= ~ISR_RX_PKT; AT_WRITE_REG(hw, REG_IMR, hw->intr_mask); + spin_unlock(&hw->intr_mask_lock); __napi_schedule(&adapter->napi); } } - if (status & ISR_TX_PKT) - atl1c_clean_tx_irq(adapter, atl1c_trans_normal); + if (status & ISR_TX_PKT) { + if (napi_schedule_prep(&adapter->tx_napi)) { + spin_lock(&hw->intr_mask_lock); + hw->intr_mask &= ~ISR_TX_PKT; + AT_WRITE_REG(hw, REG_IMR, hw->intr_mask); + spin_unlock(&hw->intr_mask_lock); + __napi_schedule(&adapter->tx_napi); + } + } handled = IRQ_HANDLED; /* check if PCIE PHY Link down */ @@ -1650,14 +1667,20 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter, skb_checksum_none_assert(skb); } -static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter) +static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter, + bool napi_mode) { struct sk_buff *skb; struct page *page; - if (adapter->rx_frag_size > PAGE_SIZE) - return netdev_alloc_skb(adapter->netdev, - adapter->rx_buffer_len); + if (adapter->rx_frag_size > PAGE_SIZE) { + if (likely(napi_mode)) + return napi_alloc_skb(&adapter->napi, + adapter->rx_buffer_len); + else + return netdev_alloc_skb_ip_align(adapter->netdev, + adapter->rx_buffer_len); + } page = adapter->rx_page; if (!page) { @@ -1670,7 +1693,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter) skb = build_skb(page_address(page) + adapter->rx_page_offset, adapter->rx_frag_size); if (likely(skb)) { - skb_reserve(skb, NET_SKB_PAD); + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); adapter->rx_page_offset += adapter->rx_frag_size; if (adapter->rx_page_offset >= PAGE_SIZE) adapter->rx_page = NULL; @@ -1680,7 +1703,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter) return skb; } -static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter) +static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, bool napi_mode) { struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; struct pci_dev *pdev = adapter->pdev; @@ -1701,7 +1724,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter) while (next_info->flags & ATL1C_BUFFER_FREE) { rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use); - skb = atl1c_alloc_skb(adapter); + skb = atl1c_alloc_skb(adapter, napi_mode); if (unlikely(!skb)) { if (netif_msg_rx_err(adapter)) dev_warn(&pdev->dev, "alloc rx buffer failed\n"); @@ -1851,13 +1874,13 @@ rrs_checked: vlan = le16_to_cpu(vlan); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan); } - netif_receive_skb(skb); + napi_gro_receive(&adapter->napi, skb); (*work_done)++; count++; } if (count) - atl1c_alloc_rx_buffer(adapter); + atl1c_alloc_rx_buffer(adapter, true); } /** @@ -1870,6 +1893,7 @@ static int atl1c_clean(struct napi_struct *napi, int budget) struct atl1c_adapter *adapter = container_of(napi, struct atl1c_adapter, napi); int work_done = 0; + unsigned long flags; /* Keep link state information with original netdev */ if (!netif_carrier_ok(adapter->netdev)) @@ -1880,8 +1904,10 @@ static int atl1c_clean(struct napi_struct *napi, int budget) if (work_done < budget) { quit_polling: napi_complete_done(napi, work_done); + spin_lock_irqsave(&adapter->hw.intr_mask_lock, flags); adapter->hw.intr_mask |= ISR_RX_PKT; AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask); + spin_unlock_irqrestore(&adapter->hw.intr_mask_lock, flags); } return work_done; } @@ -2319,6 +2345,7 @@ static int atl1c_up(struct atl1c_adapter *adapter) atl1c_check_link_status(adapter); clear_bit(__AT_DOWN, &adapter->flags); napi_enable(&adapter->napi); + napi_enable(&adapter->tx_napi); atl1c_irq_enable(adapter); netif_start_queue(netdev); return err; @@ -2339,6 +2366,7 @@ static void atl1c_down(struct atl1c_adapter *adapter) set_bit(__AT_DOWN, &adapter->flags); netif_carrier_off(netdev); napi_disable(&adapter->napi); + napi_disable(&adapter->tx_napi); atl1c_irq_disable(adapter); atl1c_free_irq(adapter); /* disable ASPM if device inactive */ @@ -2587,7 +2615,9 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->mii.mdio_write = atl1c_mdio_write; adapter->mii.phy_id_mask = 0x1f; adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK; + dev_set_threaded(netdev, true); netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64); + netif_napi_add(netdev, &adapter->tx_napi, atl1c_clean_tx, 64); timer_setup(&adapter->phy_config_timer, atl1c_phy_config, 0); /* setup the private structure */ err = atl1c_sw_init(adapter); diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index f016f2e12ee7..0cc0db04c27d 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -1675,29 +1675,7 @@ static struct pci_driver atl2_driver = { .shutdown = atl2_shutdown, }; -/** - * atl2_init_module - Driver Registration Routine - * - * atl2_init_module is the first routine called when the driver is - * loaded. All it does is register with the PCI subsystem. - */ -static int __init atl2_init_module(void) -{ - return pci_register_driver(&atl2_driver); -} -module_init(atl2_init_module); - -/** - * atl2_exit_module - Driver Exit Cleanup Routine - * - * atl2_exit_module is called just before the driver is removed - * from memory. - */ -static void __exit atl2_exit_module(void) -{ - pci_unregister_driver(&atl2_driver); -} -module_exit(atl2_exit_module); +module_pci_driver(atl2_driver); static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value) { diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c index 65981931a798..60d908507f51 100644 --- a/drivers/net/ethernet/broadcom/bcm4908_enet.c +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c @@ -9,6 +9,7 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_net.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/string.h> @@ -53,6 +54,7 @@ struct bcm4908_enet_dma_ring { int length; u16 cfg_block; u16 st_ram_block; + struct napi_struct napi; union { void *cpu_addr; @@ -66,8 +68,8 @@ struct bcm4908_enet_dma_ring { struct bcm4908_enet { struct device *dev; struct net_device *netdev; - struct napi_struct napi; void __iomem *base; + int irq_tx; struct bcm4908_enet_dma_ring tx_ring; struct bcm4908_enet_dma_ring rx_ring; @@ -122,24 +124,31 @@ static void enet_umac_set(struct bcm4908_enet *enet, u16 offset, u32 set) * Helpers */ -static void bcm4908_enet_intrs_on(struct bcm4908_enet *enet) +static void bcm4908_enet_set_mtu(struct bcm4908_enet *enet, int mtu) { - enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, ENET_DMA_INT_DEFAULTS); + enet_umac_write(enet, UMAC_MAX_FRAME_LEN, mtu + ENET_MAX_ETH_OVERHEAD); } -static void bcm4908_enet_intrs_off(struct bcm4908_enet *enet) +/*** + * DMA ring ops + */ + +static void bcm4908_enet_dma_ring_intrs_on(struct bcm4908_enet *enet, + struct bcm4908_enet_dma_ring *ring) { - enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, 0); + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, ENET_DMA_INT_DEFAULTS); } -static void bcm4908_enet_intrs_ack(struct bcm4908_enet *enet) +static void bcm4908_enet_dma_ring_intrs_off(struct bcm4908_enet *enet, + struct bcm4908_enet_dma_ring *ring) { - enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_STAT, ENET_DMA_INT_DEFAULTS); + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, 0); } -static void bcm4908_enet_set_mtu(struct bcm4908_enet *enet, int mtu) +static void bcm4908_enet_dma_ring_intrs_ack(struct bcm4908_enet *enet, + struct bcm4908_enet_dma_ring *ring) { - enet_umac_write(enet, UMAC_MAX_FRAME_LEN, mtu + ENET_MAX_ETH_OVERHEAD); + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_STAT, ENET_DMA_INT_DEFAULTS); } /*** @@ -414,11 +423,14 @@ static void bcm4908_enet_gmac_init(struct bcm4908_enet *enet) static irqreturn_t bcm4908_enet_irq_handler(int irq, void *dev_id) { struct bcm4908_enet *enet = dev_id; + struct bcm4908_enet_dma_ring *ring; - bcm4908_enet_intrs_off(enet); - bcm4908_enet_intrs_ack(enet); + ring = (irq == enet->irq_tx) ? &enet->tx_ring : &enet->rx_ring; - napi_schedule(&enet->napi); + bcm4908_enet_dma_ring_intrs_off(enet, ring); + bcm4908_enet_dma_ring_intrs_ack(enet, ring); + + napi_schedule(&ring->napi); return IRQ_HANDLED; } @@ -426,6 +438,8 @@ static irqreturn_t bcm4908_enet_irq_handler(int irq, void *dev_id) static int bcm4908_enet_open(struct net_device *netdev) { struct bcm4908_enet *enet = netdev_priv(netdev); + struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring; + struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring; struct device *dev = enet->dev; int err; @@ -435,6 +449,17 @@ static int bcm4908_enet_open(struct net_device *netdev) return err; } + if (enet->irq_tx > 0) { + err = request_irq(enet->irq_tx, bcm4908_enet_irq_handler, 0, + "tx", enet); + if (err) { + dev_err(dev, "Failed to request IRQ %d: %d\n", + enet->irq_tx, err); + free_irq(netdev->irq, enet); + return err; + } + } + bcm4908_enet_gmac_init(enet); bcm4908_enet_dma_reset(enet); bcm4908_enet_dma_init(enet); @@ -443,14 +468,19 @@ static int bcm4908_enet_open(struct net_device *netdev) enet_set(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN); enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_FLOWC_CH1_EN, 0); - bcm4908_enet_dma_rx_ring_enable(enet, &enet->rx_ring); - napi_enable(&enet->napi); + if (enet->irq_tx > 0) { + napi_enable(&tx_ring->napi); + bcm4908_enet_dma_ring_intrs_ack(enet, tx_ring); + bcm4908_enet_dma_ring_intrs_on(enet, tx_ring); + } + + bcm4908_enet_dma_rx_ring_enable(enet, rx_ring); + napi_enable(&rx_ring->napi); netif_carrier_on(netdev); netif_start_queue(netdev); - - bcm4908_enet_intrs_ack(enet); - bcm4908_enet_intrs_on(enet); + bcm4908_enet_dma_ring_intrs_ack(enet, rx_ring); + bcm4908_enet_dma_ring_intrs_on(enet, rx_ring); return 0; } @@ -458,16 +488,20 @@ static int bcm4908_enet_open(struct net_device *netdev) static int bcm4908_enet_stop(struct net_device *netdev) { struct bcm4908_enet *enet = netdev_priv(netdev); + struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring; + struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring; netif_stop_queue(netdev); netif_carrier_off(netdev); - napi_disable(&enet->napi); + napi_disable(&rx_ring->napi); + napi_disable(&tx_ring->napi); bcm4908_enet_dma_rx_ring_disable(enet, &enet->rx_ring); bcm4908_enet_dma_tx_ring_disable(enet, &enet->tx_ring); bcm4908_enet_dma_uninit(enet); + free_irq(enet->irq_tx, enet); free_irq(enet->netdev->irq, enet); return 0; @@ -484,25 +518,19 @@ static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netde u32 tmp; /* Free transmitted skbs */ - while (ring->read_idx != ring->write_idx) { - buf_desc = &ring->buf_desc[ring->read_idx]; - if (le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN) - break; - slot = &ring->slots[ring->read_idx]; - - dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE); - dev_kfree_skb(slot->skb); - if (++ring->read_idx == ring->length) - ring->read_idx = 0; - } + if (enet->irq_tx < 0 && + !(le32_to_cpu(ring->buf_desc[ring->read_idx].ctl) & DMA_CTL_STATUS_OWN)) + napi_schedule(&enet->tx_ring.napi); /* Don't use the last empty buf descriptor */ if (ring->read_idx <= ring->write_idx) free_buf_descs = ring->read_idx - ring->write_idx + ring->length; else free_buf_descs = ring->read_idx - ring->write_idx; - if (free_buf_descs < 2) + if (free_buf_descs < 2) { + netif_stop_queue(netdev); return NETDEV_TX_BUSY; + } /* Hardware removes OWN bit after sending data */ buf_desc = &ring->buf_desc[ring->write_idx]; @@ -539,9 +567,10 @@ static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netde return NETDEV_TX_OK; } -static int bcm4908_enet_poll(struct napi_struct *napi, int weight) +static int bcm4908_enet_poll_rx(struct napi_struct *napi, int weight) { - struct bcm4908_enet *enet = container_of(napi, struct bcm4908_enet, napi); + struct bcm4908_enet_dma_ring *rx_ring = container_of(napi, struct bcm4908_enet_dma_ring, napi); + struct bcm4908_enet *enet = container_of(rx_ring, struct bcm4908_enet, rx_ring); struct device *dev = enet->dev; int handled = 0; @@ -590,7 +619,7 @@ static int bcm4908_enet_poll(struct napi_struct *napi, int weight) if (handled < weight) { napi_complete_done(napi, handled); - bcm4908_enet_intrs_on(enet); + bcm4908_enet_dma_ring_intrs_on(enet, rx_ring); } /* Hardware could disable ring if it run out of descriptors */ @@ -599,6 +628,42 @@ static int bcm4908_enet_poll(struct napi_struct *napi, int weight) return handled; } +static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight) +{ + struct bcm4908_enet_dma_ring *tx_ring = container_of(napi, struct bcm4908_enet_dma_ring, napi); + struct bcm4908_enet *enet = container_of(tx_ring, struct bcm4908_enet, tx_ring); + struct bcm4908_enet_dma_ring_bd *buf_desc; + struct bcm4908_enet_dma_ring_slot *slot; + struct device *dev = enet->dev; + unsigned int bytes = 0; + int handled = 0; + + while (handled < weight && tx_ring->read_idx != tx_ring->write_idx) { + buf_desc = &tx_ring->buf_desc[tx_ring->read_idx]; + if (le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN) + break; + slot = &tx_ring->slots[tx_ring->read_idx]; + + dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE); + dev_kfree_skb(slot->skb); + bytes += slot->len; + if (++tx_ring->read_idx == tx_ring->length) + tx_ring->read_idx = 0; + + handled++; + } + + if (handled < weight) { + napi_complete_done(napi, handled); + bcm4908_enet_dma_ring_intrs_on(enet, tx_ring); + } + + if (netif_queue_stopped(enet->netdev)) + netif_wake_queue(enet->netdev); + + return handled; +} + static int bcm4908_enet_change_mtu(struct net_device *netdev, int new_mtu) { struct bcm4908_enet *enet = netdev_priv(netdev); @@ -641,6 +706,8 @@ static int bcm4908_enet_probe(struct platform_device *pdev) if (netdev->irq < 0) return netdev->irq; + enet->irq_tx = platform_get_irq_byname(pdev, "tx"); + dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); err = bcm4908_enet_dma_alloc(enet); @@ -648,12 +715,15 @@ static int bcm4908_enet_probe(struct platform_device *pdev) return err; SET_NETDEV_DEV(netdev, &pdev->dev); - eth_hw_addr_random(netdev); + err = of_get_mac_address(dev->of_node, netdev->dev_addr); + if (err) + eth_hw_addr_random(netdev); netdev->netdev_ops = &bcm4908_enet_netdev_ops; netdev->min_mtu = ETH_ZLEN; netdev->mtu = ETH_DATA_LEN; netdev->max_mtu = ENET_MTU_MAX; - netif_napi_add(netdev, &enet->napi, bcm4908_enet_poll, 64); + netif_tx_napi_add(netdev, &enet->tx_ring.napi, bcm4908_enet_poll_tx, NAPI_POLL_WEIGHT); + netif_napi_add(netdev, &enet->rx_ring.napi, bcm4908_enet_poll_rx, NAPI_POLL_WEIGHT); err = register_netdev(netdev); if (err) { @@ -671,7 +741,8 @@ static int bcm4908_enet_remove(struct platform_device *pdev) struct bcm4908_enet *enet = platform_get_drvdata(pdev); unregister_netdev(enet->netdev); - netif_napi_del(&enet->napi); + netif_napi_del(&enet->rx_ring.napi); + netif_napi_del(&enet->tx_ring.napi); bcm4908_enet_dma_free(enet); return 0; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 777bbf6d2586..d9f0f0df8f7b 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2457,7 +2457,6 @@ static int bcm_sysport_probe(struct platform_device *pdev) struct bcm_sysport_priv *priv; struct device_node *dn; struct net_device *dev; - const void *macaddr; u32 txq, rxq; int ret; @@ -2552,12 +2551,10 @@ static int bcm_sysport_probe(struct platform_device *pdev) } /* Initialize netdevice members */ - macaddr = of_get_mac_address(dn); - if (IS_ERR(macaddr)) { + ret = of_get_mac_address(dn, dev->dev_addr); + if (ret) { dev_warn(&pdev->dev, "using random Ethernet MAC\n"); eth_hw_addr_random(dev); - } else { - ether_addr_copy(dev->dev_addr, macaddr); } SET_NETDEV_DEV(dev, &pdev->dev); diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index a5fd161ab5ee..85fa0ab7201c 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -115,7 +115,7 @@ static int bgmac_probe(struct bcma_device *core) struct ssb_sprom *sprom = &core->bus->sprom; struct mii_bus *mii_bus; struct bgmac *bgmac; - const u8 *mac = NULL; + const u8 *mac; int err; bgmac = bgmac_alloc(&core->dev); @@ -128,11 +128,10 @@ static int bgmac_probe(struct bcma_device *core) bcma_set_drvdata(core, bgmac); - if (bgmac->dev->of_node) - mac = of_get_mac_address(bgmac->dev->of_node); + err = of_get_mac_address(bgmac->dev->of_node, bgmac->net_dev->dev_addr); /* If no MAC address assigned via device tree, check SPROM */ - if (IS_ERR_OR_NULL(mac)) { + if (err) { switch (core->core_unit) { case 0: mac = sprom->et0mac; @@ -149,10 +148,9 @@ static int bgmac_probe(struct bcma_device *core) err = -ENOTSUPP; goto err; } + ether_addr_copy(bgmac->net_dev->dev_addr, mac); } - ether_addr_copy(bgmac->net_dev->dev_addr, mac); - /* On BCM4706 we need common core to access PHY */ if (core->id.id == BCMA_CORE_4706_MAC_GBIT && !core->bus->drv_gmac_cmn.core) { diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index f37f1c58f368..9834b77cf4b6 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -173,7 +173,7 @@ static int bgmac_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct bgmac *bgmac; struct resource *regs; - const u8 *mac_addr; + int ret; bgmac = bgmac_alloc(&pdev->dev); if (!bgmac) @@ -192,11 +192,10 @@ static int bgmac_probe(struct platform_device *pdev) bgmac->dev = &pdev->dev; bgmac->dma_dev = &pdev->dev; - mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) - ether_addr_copy(bgmac->net_dev->dev_addr, mac_addr); - else - dev_warn(&pdev->dev, "MAC address not present in device tree\n"); + ret = of_get_mac_address(np, bgmac->net_dev->dev_addr); + if (ret) + dev_warn(&pdev->dev, + "MAC address not present in device tree\n"); bgmac->irq = platform_get_irq(pdev, 0); if (bgmac->irq < 0) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index b652ed72a621..56801387591d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -1395,7 +1395,6 @@ int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) u32 op_gen_command = 0; u32 comp_addr = BAR_CSTRORM_INTMEM + CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); - int ret = 0; if (REG_RD(bp, comp_addr)) { BNX2X_ERR("Cleanup complete was not 0 before sending\n"); @@ -1420,7 +1419,7 @@ int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) /* Zero completion for next FLR */ REG_WR(bp, comp_addr, 0); - return ret; + return 0; } u8 bnx2x_is_pcie_pending(struct pci_dev *dev) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index b53a0d87371a..39ac9e2f5118 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -122,7 +122,10 @@ enum board_idx { NETXTREME_E_VF, NETXTREME_C_VF, NETXTREME_S_VF, + NETXTREME_C_VF_HV, + NETXTREME_E_VF_HV, NETXTREME_E_P5_VF, + NETXTREME_E_P5_VF_HV, }; /* indexed by enum above */ @@ -170,7 +173,10 @@ static const struct { [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, + [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" }, + [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" }, [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, + [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" }, }; static const struct pci_device_id bnxt_pci_tbl[] = { @@ -222,15 +228,25 @@ static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, #ifdef CONFIG_BNXT_SRIOV { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, + { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV }, { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, + { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV }, { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, + { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV }, { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, + { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV }, { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF }, { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, + { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV }, { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, #endif { 0 } @@ -265,7 +281,8 @@ static struct workqueue_struct *bnxt_pf_wq; static bool bnxt_vf_pciid(enum board_idx idx) { return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || - idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF); + idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV || + idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF); } #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) @@ -358,6 +375,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) struct pci_dev *pdev = bp->pdev; struct bnxt_tx_ring_info *txr; struct bnxt_sw_tx_bd *tx_buf; + __le32 lflags = 0; i = skb_get_queue_mapping(skb); if (unlikely(i >= bp->tx_nr_rings)) { @@ -399,6 +417,11 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; } + if (unlikely(skb->no_fcs)) { + lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC); + goto normal_tx; + } + if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { struct tx_push_buffer *tx_push_buf = txr->tx_push; struct tx_push_bd *tx_push = &tx_push_buf->push_bd; @@ -500,7 +523,7 @@ normal_tx: txbd1 = (struct tx_bd_ext *) &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; - txbd1->tx_bd_hsize_lflags = 0; + txbd1->tx_bd_hsize_lflags = lflags; if (skb_is_gso(skb)) { u32 hdr_len; @@ -512,14 +535,14 @@ normal_tx: hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO | + txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO | TX_BD_FLAGS_T_IPID | (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); length = skb_shinfo(skb)->gso_size; txbd1->tx_bd_mss = cpu_to_le32(length); length += hdr_len; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { - txbd1->tx_bd_hsize_lflags = + txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); txbd1->tx_bd_mss = 0; } @@ -1732,14 +1755,16 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, cons = rxcmp->rx_cmp_opaque; if (unlikely(cons != rxr->rx_next_cons)) { - int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp); + int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp); /* 0xffff is forced error, don't print it */ if (rxr->rx_next_cons != 0xffff) netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", cons, rxr->rx_next_cons); bnxt_sched_reset(bp, rxr); - return rc1; + if (rc1) + return rc1; + goto next_rx_no_prod_no_len; } rx_buf = &rxr->rx_buf_ring[cons]; data = rx_buf->data; @@ -4145,7 +4170,7 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) bnxt_free_ntp_fltrs(bp, irq_re_init); if (irq_re_init) { bnxt_free_ring_stats(bp); - if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET) || + if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) bnxt_free_port_stats(bp); bnxt_free_ring_grps(bp); @@ -4470,7 +4495,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, writel(1, bp->bar0 + doorbell_offset); if (!pci_is_enabled(bp->pdev)) - return 0; + return -ENODEV; if (!timeout) timeout = DFLT_HWRM_CMD_TIMEOUT; @@ -4500,12 +4525,15 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) return -EBUSY; /* on first few passes, just barely sleep */ - if (i < HWRM_SHORT_TIMEOUT_COUNTER) + if (i < HWRM_SHORT_TIMEOUT_COUNTER) { usleep_range(HWRM_SHORT_MIN_TIMEOUT, HWRM_SHORT_MAX_TIMEOUT); - else + } else { + if (HWRM_WAIT_MUST_ABORT(bp, req)) + break; usleep_range(HWRM_MIN_TIMEOUT, HWRM_MAX_TIMEOUT); + } } if (bp->hwrm_intr_seq_id != (u16)~seq_id) { @@ -4530,15 +4558,19 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, if (len) break; /* on first few passes, just barely sleep */ - if (i < HWRM_SHORT_TIMEOUT_COUNTER) + if (i < HWRM_SHORT_TIMEOUT_COUNTER) { usleep_range(HWRM_SHORT_MIN_TIMEOUT, HWRM_SHORT_MAX_TIMEOUT); - else + } else { + if (HWRM_WAIT_MUST_ABORT(bp, req)) + goto timeout_abort; usleep_range(HWRM_MIN_TIMEOUT, HWRM_MAX_TIMEOUT); + } } if (i >= tmo_count) { +timeout_abort: if (!silent) netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", HWRM_TOTAL_TIMEOUT(i), @@ -7540,6 +7572,32 @@ static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg) BNXT_FW_HEALTH_WIN_MAP_OFF); } +bool bnxt_is_fw_healthy(struct bnxt *bp) +{ + if (bp->fw_health && bp->fw_health->status_reliable) { + u32 fw_status; + + fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); + if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status)) + return false; + } + + return true; +} + +static void bnxt_inv_fw_health_reg(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + u32 reg_type; + + if (!fw_health || !fw_health->status_reliable) + return; + + reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]); + if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) + fw_health->status_reliable = false; +} + static void bnxt_try_map_fw_health_reg(struct bnxt *bp) { void __iomem *hs; @@ -7547,6 +7605,9 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp) u32 reg_type; u32 sig; + if (bp->fw_health) + bp->fw_health->status_reliable = false; + __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC); hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC); @@ -7558,11 +7619,9 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp) BNXT_FW_HEALTH_WIN_BASE + BNXT_GRC_REG_CHIP_NUM); } - if (!BNXT_CHIP_P5(bp)) { - if (bp->fw_health) - bp->fw_health->status_reliable = false; + if (!BNXT_CHIP_P5(bp)) return; - } + status_loc = BNXT_GRC_REG_STATUS_P5 | BNXT_FW_HEALTH_REG_TYPE_BAR0; } else { @@ -7592,6 +7651,7 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp) u32 reg_base = 0xffffffff; int i; + bp->fw_health->status_reliable = false; /* Only pre-map the monitoring GRC registers using window 3 */ for (i = 0; i < 4; i++) { u32 reg = fw_health->regs[i]; @@ -7604,6 +7664,7 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp) return -ERANGE; fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg); } + bp->fw_health->status_reliable = true; if (reg_base == 0xffffffff) return 0; @@ -8304,11 +8365,11 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) #endif } -/* Allow PF and VF with default VLAN to be in promiscuous mode */ +/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */ static bool bnxt_promisc_ok(struct bnxt *bp) { #ifdef CONFIG_BNXT_SRIOV - if (BNXT_VF(bp) && !bp->vf.vlan) + if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf)) return false; #endif return true; @@ -8405,7 +8466,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) if (bp->dev->flags & IFF_BROADCAST) vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; - if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) + if (bp->dev->flags & IFF_PROMISC) vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; if (bp->dev->flags & IFF_ALLMULTI) { @@ -9039,8 +9100,9 @@ static char *bnxt_report_fec(struct bnxt_link_info *link_info) static void bnxt_report_link(struct bnxt *bp) { if (bp->link_info.link_up) { - const char *duplex; + const char *signal = ""; const char *flow_ctrl; + const char *duplex; u32 speed; u16 fec; @@ -9062,9 +9124,24 @@ static void bnxt_report_link(struct bnxt *bp) flow_ctrl = "ON - receive"; else flow_ctrl = "none"; - netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n", - speed, duplex, flow_ctrl); - if (bp->flags & BNXT_FLAG_EEE_CAP) + if (bp->link_info.phy_qcfg_resp.option_flags & + PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) { + u8 sig_mode = bp->link_info.active_fec_sig_mode & + PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK; + switch (sig_mode) { + case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ: + signal = "(NRZ) "; + break; + case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4: + signal = "(PAM4) "; + break; + default: + break; + } + } + netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n", + speed, signal, duplex, flow_ctrl); + if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) netdev_info(bp->dev, "EEE is %s\n", bp->eee.eee_active ? "active" : "not active"); @@ -9096,10 +9173,6 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_link_info *link_info = &bp->link_info; - bp->flags &= ~BNXT_FLAG_EEE_CAP; - if (bp->test_info) - bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK | - BNXT_TEST_FL_AN_PHY_LPBK); if (bp->hwrm_spec_code < 0x10201) return 0; @@ -9110,31 +9183,17 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) if (rc) goto hwrm_phy_qcaps_exit; + bp->phy_flags = resp->flags; if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { struct ethtool_eee *eee = &bp->eee; u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); - bp->flags |= BNXT_FLAG_EEE_CAP; eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; } - if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) { - if (bp->test_info) - bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK; - } - if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) { - if (bp->test_info) - bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK; - } - if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) { - if (BNXT_PF(bp)) - bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG; - } - if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET) - bp->fw_cap |= BNXT_FW_CAP_PORT_STATS_NO_RESET; if (bp->hwrm_spec_code >= 0x10a01) { if (bnxt_phy_qcaps_no_speed(resp)) { @@ -9225,7 +9284,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state) PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; link_info->module_status = resp->module_status; - if (bp->flags & BNXT_FLAG_EEE_CAP) { + if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) { struct ethtool_eee *eee = &bp->eee; u16 fw_speeds; @@ -9461,7 +9520,8 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp) if (!BNXT_SINGLE_PF(bp)) return 0; - if (pci_num_vf(bp->pdev)) + if (pci_num_vf(bp->pdev) && + !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN)) return 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); @@ -9494,9 +9554,10 @@ static int bnxt_try_recover_fw(struct bnxt *bp) mutex_lock(&bp->hwrm_cmd_lock); do { - rc = __bnxt_hwrm_ver_get(bp, true); sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); - if (!sts || !BNXT_FW_IS_BOOTING(sts)) + rc = __bnxt_hwrm_ver_get(bp, true); + if (!BNXT_FW_IS_BOOTING(sts) && + !BNXT_FW_IS_RECOVERING(sts)) break; retry++; } while (rc == -EBUSY && retry < BNXT_FW_RETRY); @@ -9556,13 +9617,17 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) if (rc) return rc; - if (!up) + if (!up) { + bnxt_inv_fw_health_reg(bp); return 0; + } if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE) resc_reinit = true; if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE) fw_reset = true; + else if (bp->fw_health && !bp->fw_health->status_reliable) + bnxt_try_map_fw_health_reg(bp); if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); @@ -9571,6 +9636,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) } if (resc_reinit || fw_reset) { if (fw_reset) { + set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) bnxt_ulp_stop(bp); bnxt_free_ctx_mem(bp); @@ -9579,21 +9645,25 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) bnxt_dcb_free(bp); rc = bnxt_fw_init_one(bp); if (rc) { + clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); set_bit(BNXT_STATE_ABORT_ERR, &bp->state); return rc; } bnxt_clear_int_mode(bp); rc = bnxt_init_int_mode(bp); if (rc) { + clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); netdev_err(bp->dev, "init int mode failed\n"); return rc; } - set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); } if (BNXT_NEW_RM(bp)) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; rc = bnxt_hwrm_func_resc_qcaps(bp, true); + if (rc) + netdev_err(bp->dev, "resc_qcaps failed\n"); + hw_resc->resv_cp_rings = 0; hw_resc->resv_stat_ctxs = 0; hw_resc->resv_irqs = 0; @@ -9607,7 +9677,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) } } } - return 0; + return rc; } static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) @@ -9736,7 +9806,9 @@ static ssize_t bnxt_show_temp(struct device *dev, if (!rc) len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */ mutex_unlock(&bp->hwrm_cmd_lock); - return rc ?: len; + if (rc) + return rc; + return len; } static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0); @@ -9793,7 +9865,7 @@ static bool bnxt_eee_config_ok(struct bnxt *bp) struct ethtool_eee *eee = &bp->eee; struct bnxt_link_info *link_info = &bp->link_info; - if (!(bp->flags & BNXT_FLAG_EEE_CAP)) + if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) return true; if (eee->eee_enabled) { @@ -10440,7 +10512,7 @@ static void bnxt_set_rx_mode(struct net_device *dev) CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | CFA_L2_SET_RX_MASK_REQ_MASK_BCAST); - if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) + if (dev->flags & IFF_PROMISC) mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; uc_update = bnxt_uc_list_updated(bp); @@ -10516,6 +10588,9 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp) } skip_uc: + if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) && + !bnxt_promisc_ok(bp)) + vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); if (rc && vnic->mc_list_count) { netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", @@ -10710,6 +10785,40 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) return rc; } +static netdev_features_t bnxt_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + struct bnxt *bp; + __be16 udp_port; + u8 l4_proto = 0; + + features = vlan_features_check(skb, features); + if (!skb->encapsulation) + return features; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + l4_proto = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + l4_proto = ipv6_hdr(skb)->nexthdr; + break; + default: + return features; + } + + if (l4_proto != IPPROTO_UDP) + return features; + + bp = netdev_priv(dev); + /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */ + udp_port = udp_hdr(skb)->dest; + if (udp_port == bp->vxlan_port || udp_port == bp->nge_port) + return features; + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); +} + int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, u32 *reg_buf) { @@ -11035,6 +11144,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp) pci_disable_device(bp->pdev); } __bnxt_close_nic(bp, true, false); + bnxt_vf_reps_free(bp); bnxt_clear_int_mode(bp); bnxt_hwrm_func_drv_unrgtr(bp); if (pci_is_enabled(bp->pdev)) @@ -11640,7 +11750,7 @@ static void bnxt_reset_all(struct bnxt *bp) req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) + if (rc != -ENODEV) netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); } bp->fw_reset_timestamp = jiffies; @@ -11723,28 +11833,20 @@ static void bnxt_fw_reset_task(struct work_struct *work) bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); return; case BNXT_FW_RESET_STATE_ENABLE_DEV: - if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { - u32 val; - - if (!bp->fw_reset_min_dsecs) { - u16 val; - - pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, - &val); - if (val == 0xffff) { - if (bnxt_fw_reset_timeout(bp)) { - netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n"); - goto fw_reset_abort; - } - bnxt_queue_fw_reset_work(bp, HZ / 1000); - return; + bnxt_inv_fw_health_reg(bp); + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && + !bp->fw_reset_min_dsecs) { + u16 val; + + pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); + if (val == 0xffff) { + if (bnxt_fw_reset_timeout(bp)) { + netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n"); + goto fw_reset_abort; } + bnxt_queue_fw_reset_work(bp, HZ / 1000); + return; } - val = bnxt_fw_health_readl(bp, - BNXT_FW_RESET_INPROG_REG); - if (val) - netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n", - val); } clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); if (pci_enable_device(bp->pdev)) { @@ -11787,6 +11889,8 @@ static void bnxt_fw_reset_task(struct work_struct *work) bnxt_ulp_start(bp, rc); if (!rc) bnxt_reenable_sriov(bp); + bnxt_vf_reps_alloc(bp); + bnxt_vf_reps_open(bp); bnxt_dl_health_recovery_done(bp); bnxt_dl_health_status_update(bp, true); rtnl_unlock(); @@ -12222,10 +12326,13 @@ static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table) unsigned int cmd; udp_tunnel_nic_get_port(netdev, table, 0, &ti); - if (ti.type == UDP_TUNNEL_TYPE_VXLAN) + if (ti.type == UDP_TUNNEL_TYPE_VXLAN) { + bp->vxlan_port = ti.port; cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; - else + } else { + bp->nge_port = ti.port; cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; + } if (ti.port) return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd); @@ -12325,6 +12432,7 @@ static const struct net_device_ops bnxt_netdev_ops = { .ndo_change_mtu = bnxt_change_mtu, .ndo_fix_features = bnxt_fix_features, .ndo_set_features = bnxt_set_features, + .ndo_features_check = bnxt_features_check, .ndo_tx_timeout = bnxt_tx_timeout, #ifdef CONFIG_BNXT_SRIOV .ndo_get_vf_config = bnxt_get_vf_config, @@ -12393,12 +12501,17 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) int rc = 0; struct bnxt_link_info *link_info = &bp->link_info; + bp->phy_flags = 0; rc = bnxt_hwrm_phy_qcaps(bp); if (rc) { netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", rc); return rc; } + if (bp->phy_flags & BNXT_PHY_FL_NO_FCS) + bp->dev->priv_flags |= IFF_SUPP_NOFCS; + else + bp->dev->priv_flags &= ~IFF_SUPP_NOFCS; if (!fw_dflt) return 0; @@ -12934,6 +13047,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rc); } + bnxt_inv_fw_health_reg(bp); bnxt_dl_register(bp); rc = register_netdev(dev); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 1259e68cba2a..24d2ad6a8740 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -671,6 +671,10 @@ struct nqe_cn { #define HWRM_MIN_TIMEOUT 25 #define HWRM_MAX_TIMEOUT 40 +#define HWRM_WAIT_MUST_ABORT(bp, req) \ + (le16_to_cpu((req)->req_type) != HWRM_VER_GET && \ + !bnxt_is_fw_healthy(bp)) + #define HWRM_TOTAL_TIMEOUT(n) (((n) <= HWRM_SHORT_TIMEOUT_COUNTER) ? \ ((n) * HWRM_SHORT_MIN_TIMEOUT) : \ (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \ @@ -1337,9 +1341,6 @@ struct bnxt_led_info { struct bnxt_test_info { u8 offline_mask; - u8 flags; -#define BNXT_TEST_FL_EXT_LPBK 0x1 -#define BNXT_TEST_FL_AN_PHY_LPBK 0x2 u16 timeout; char string[BNXT_MAX_TEST][ETH_GSTRING_LEN]; }; @@ -1560,6 +1561,7 @@ struct bnxt_fw_reporter_ctx { #define BNXT_FW_STATUS_HEALTH_MSK 0xffff #define BNXT_FW_STATUS_HEALTHY 0x8000 #define BNXT_FW_STATUS_SHUTDOWN 0x100000 +#define BNXT_FW_STATUS_RECOVERING 0x400000 #define BNXT_FW_IS_HEALTHY(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) ==\ BNXT_FW_STATUS_HEALTHY) @@ -1570,6 +1572,9 @@ struct bnxt_fw_reporter_ctx { #define BNXT_FW_IS_ERR(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) > \ BNXT_FW_STATUS_HEALTHY) +#define BNXT_FW_IS_RECOVERING(sts) (BNXT_FW_IS_ERR(sts) && \ + ((sts) & BNXT_FW_STATUS_RECOVERING)) + #define BNXT_FW_RETRY 5 #define BNXT_FW_IF_RETRY 10 @@ -1685,7 +1690,6 @@ struct bnxt { #define BNXT_FLAG_SHARED_RINGS 0x200 #define BNXT_FLAG_PORT_STATS 0x400 #define BNXT_FLAG_UDP_RSS_CAP 0x800 - #define BNXT_FLAG_EEE_CAP 0x1000 #define BNXT_FLAG_NEW_RSS_CAP 0x2000 #define BNXT_FLAG_WOL_CAP 0x4000 #define BNXT_FLAG_ROCEV1_CAP 0x8000 @@ -1712,8 +1716,10 @@ struct bnxt { #define BNXT_NPAR(bp) ((bp)->port_partition_type) #define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST) #define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp)) +#define BNXT_SH_PORT_CFG_OK(bp) (BNXT_PF(bp) && \ + ((bp)->phy_flags & BNXT_PHY_FL_SHARED_PORT_CFG)) #define BNXT_PHY_CFG_ABLE(bp) ((BNXT_SINGLE_PF(bp) || \ - ((bp)->fw_cap & BNXT_FW_CAP_SHARED_PORT_CFG)) && \ + BNXT_SH_PORT_CFG_OK(bp)) && \ (bp)->link_info.phy_state == BNXT_PHY_STATE_ENABLED) #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0) #define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE) @@ -1863,11 +1869,9 @@ struct bnxt { #define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000 #define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000 #define BNXT_FW_CAP_HOT_RESET 0x00200000 - #define BNXT_FW_CAP_SHARED_PORT_CFG 0x00400000 #define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000 #define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000 #define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000 - #define BNXT_FW_CAP_PORT_STATS_NO_RESET 0x10000000 #define BNXT_FW_CAP_RING_MONITOR 0x40000000 #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) @@ -1910,6 +1914,8 @@ struct bnxt { u16 vxlan_fw_dst_port_id; u16 nge_fw_dst_port_id; + __be16 vxlan_port; + __be16 nge_port; u8 port_partition_type; u8 port_count; u16 br_mode; @@ -2002,6 +2008,17 @@ struct bnxt { u32 lpi_tmr_lo; u32 lpi_tmr_hi; + /* copied from flags in hwrm_port_phy_qcaps_output */ + u8 phy_flags; +#define BNXT_PHY_FL_EEE_CAP PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED +#define BNXT_PHY_FL_EXT_LPBK PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED +#define BNXT_PHY_FL_AN_PHY_LPBK PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED +#define BNXT_PHY_FL_SHARED_PORT_CFG PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED +#define BNXT_PHY_FL_PORT_STATS_NO_RESET PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET +#define BNXT_PHY_FL_NO_PHY_LPBK PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED +#define BNXT_PHY_FL_FW_MANAGED_LKDN PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN +#define BNXT_PHY_FL_NO_FCS PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS + u8 num_tests; struct bnxt_test_info *test_info; @@ -2228,6 +2245,7 @@ int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool); int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp); int bnxt_hwrm_free_wol_fltr(struct bnxt *bp); int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all); +bool bnxt_is_fw_healthy(struct bnxt *bp); int bnxt_hwrm_fw_set_time(struct bnxt *); int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_half_open_nic(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 2f8b193a772d..c664ec52ebcf 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1930,6 +1930,20 @@ static int bnxt_get_fecparam(struct net_device *dev, return 0; } +static void bnxt_get_fec_stats(struct net_device *dev, + struct ethtool_fec_stats *fec_stats) +{ + struct bnxt *bp = netdev_priv(dev); + u64 *rx; + + if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) + return; + + rx = bp->rx_port_stats_ext.sw_stats; + fec_stats->corrected_bits.total = + *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits)); +} + static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info, u32 fec) { @@ -2898,7 +2912,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) if (!BNXT_PHY_CFG_ABLE(bp)) return -EOPNOTSUPP; - if (!(bp->flags & BNXT_FLAG_EEE_CAP)) + if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) return -EOPNOTSUPP; mutex_lock(&bp->link_lock); @@ -2949,7 +2963,7 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata) { struct bnxt *bp = netdev_priv(dev); - if (!(bp->flags & BNXT_FLAG_EEE_CAP)) + if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) return -EOPNOTSUPP; *edata = bp->eee; @@ -3201,7 +3215,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp, int rc; if (!link_info->autoneg || - (bp->test_info->flags & BNXT_TEST_FL_AN_PHY_LPBK)) + (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK)) return 0; rc = bnxt_query_force_speeds(bp, &fw_advertising); @@ -3402,7 +3416,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, } if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) && - (bp->test_info->flags & BNXT_TEST_FL_EXT_LPBK)) + (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK)) do_ext_lpbk = true; if (etest->flags & ETH_TEST_FL_OFFLINE) { @@ -3976,6 +3990,133 @@ ethtool_init_exit: mutex_unlock(&bp->hwrm_cmd_lock); } +static void bnxt_get_eth_phy_stats(struct net_device *dev, + struct ethtool_eth_phy_stats *phy_stats) +{ + struct bnxt *bp = netdev_priv(dev); + u64 *rx; + + if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) + return; + + rx = bp->rx_port_stats_ext.sw_stats; + phy_stats->SymbolErrorDuringCarrier = + *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err)); +} + +static void bnxt_get_eth_mac_stats(struct net_device *dev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct bnxt *bp = netdev_priv(dev); + u64 *rx, *tx; + + if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) + return; + + rx = bp->port_stats.sw_stats; + tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + + mac_stats->FramesReceivedOK = + BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames); + mac_stats->FramesTransmittedOK = + BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames); + mac_stats->FrameCheckSequenceErrors = + BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); + mac_stats->AlignmentErrors = + BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); + mac_stats->OutOfRangeLengthField = + BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames); +} + +static void bnxt_get_eth_ctrl_stats(struct net_device *dev, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + struct bnxt *bp = netdev_priv(dev); + u64 *rx; + + if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) + return; + + rx = bp->port_stats.sw_stats; + ctrl_stats->MACControlFramesReceived = + BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames); +} + +static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 2047 }, + { 2048, 4095 }, + { 4096, 9216 }, + { 9217, 16383 }, + {} +}; + +static void bnxt_get_rmon_stats(struct net_device *dev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct bnxt *bp = netdev_priv(dev); + u64 *rx, *tx; + + if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) + return; + + rx = bp->port_stats.sw_stats; + tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + + rmon_stats->jabbers = + BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); + rmon_stats->oversize_pkts = + BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames); + rmon_stats->undersize_pkts = + BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames); + + rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames); + rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames); + rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames); + rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames); + rmon_stats->hist[4] = + BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames); + rmon_stats->hist[5] = + BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames); + rmon_stats->hist[6] = + BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames); + rmon_stats->hist[7] = + BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames); + rmon_stats->hist[8] = + BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames); + rmon_stats->hist[9] = + BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames); + + rmon_stats->hist_tx[0] = + BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames); + rmon_stats->hist_tx[1] = + BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames); + rmon_stats->hist_tx[2] = + BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames); + rmon_stats->hist_tx[3] = + BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames); + rmon_stats->hist_tx[4] = + BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames); + rmon_stats->hist_tx[5] = + BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames); + rmon_stats->hist_tx[6] = + BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames); + rmon_stats->hist_tx[7] = + BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames); + rmon_stats->hist_tx[8] = + BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames); + rmon_stats->hist_tx[9] = + BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames); + + *ranges = bnxt_rmon_ranges; +} + void bnxt_ethtool_free(struct bnxt *bp) { kfree(bp->test_info); @@ -3991,6 +4132,7 @@ const struct ethtool_ops bnxt_ethtool_ops = { ETHTOOL_COALESCE_USE_ADAPTIVE_RX, .get_link_ksettings = bnxt_get_link_ksettings, .set_link_ksettings = bnxt_set_link_ksettings, + .get_fec_stats = bnxt_get_fec_stats, .get_fecparam = bnxt_get_fecparam, .set_fecparam = bnxt_set_fecparam, .get_pause_stats = bnxt_get_pause_stats, @@ -4034,4 +4176,8 @@ const struct ethtool_ops bnxt_ethtool_ops = { .set_dump = bnxt_set_dump, .get_dump_flag = bnxt_get_dump_flag, .get_dump_data = bnxt_get_dump_data, + .get_eth_phy_stats = bnxt_get_eth_phy_stats, + .get_eth_mac_stats = bnxt_get_eth_mac_stats, + .get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats, + .get_rmon_stats = bnxt_get_rmon_stats, }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index a217316228f4..eb00a219aa51 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -49,10 +49,6 @@ static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) { - if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { - netdev_err(bp->dev, "vf ndo called though PF is down\n"); - return -EINVAL; - } if (!bp->pf.active_vfs) { netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); return -EINVAL; @@ -113,7 +109,7 @@ static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf) int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); + req.fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff); mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) { @@ -125,9 +121,9 @@ static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf) return 0; } -static bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) +bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) { - if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) + if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) return !!(vf->flags & BNXT_VF_TRUST); bnxt_hwrm_func_qcfg_flags(bp, vf); @@ -1120,10 +1116,38 @@ void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) } } +int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) +{ + struct hwrm_func_vf_cfg_input req = {0}; + int rc = 0; + + if (!BNXT_VF(bp)) + return 0; + + if (bp->hwrm_spec_code < 0x10202) { + if (is_valid_ether_addr(bp->vf.mac_addr)) + rc = -EADDRNOTAVAIL; + goto mac_done; + } + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); + req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req.dflt_mac_addr, mac, ETH_ALEN); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +mac_done: + if (rc && strict) { + rc = -EADDRNOTAVAIL; + netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", + mac); + return rc; + } + return 0; +} + void bnxt_update_vf_mac(struct bnxt *bp) { struct hwrm_func_qcaps_input req = {0}; struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + bool inform_pf = false; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); req.fid = cpu_to_le16(0xffff); @@ -1139,42 +1163,24 @@ void bnxt_update_vf_mac(struct bnxt *bp) * default but the stored zero MAC will allow the VF user to change * the random MAC address using ndo_set_mac_address() if he wants. */ - if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) + if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) { memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN); + /* This means we are now using our own MAC address, let + * the PF know about this MAC address. + */ + if (!is_valid_ether_addr(bp->vf.mac_addr)) + inform_pf = true; + } /* overwrite netdev dev_addr with admin VF MAC */ if (is_valid_ether_addr(bp->vf.mac_addr)) memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); update_vf_mac_exit: mutex_unlock(&bp->hwrm_cmd_lock); + if (inform_pf) + bnxt_approve_mac(bp, bp->dev->dev_addr, false); } -int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) -{ - struct hwrm_func_vf_cfg_input req = {0}; - int rc = 0; - - if (!BNXT_VF(bp)) - return 0; - - if (bp->hwrm_spec_code < 0x10202) { - if (is_valid_ether_addr(bp->vf.mac_addr)) - rc = -EADDRNOTAVAIL; - goto mac_done; - } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); - req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR); - memcpy(req.dflt_mac_addr, mac, ETH_ALEN); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); -mac_done: - if (rc && strict) { - rc = -EADDRNOTAVAIL; - netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", - mac); - return rc; - } - return 0; -} #else int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h index 629641bf6fc5..995535e4c11b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h @@ -34,6 +34,7 @@ int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16); int bnxt_set_vf_bw(struct net_device *, int, int, int); int bnxt_set_vf_link_state(struct net_device *, int, int); int bnxt_set_vf_spoofchk(struct net_device *, int, bool); +bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf); int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust); int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs); int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index 4b5c8fd76a51..dd66302343a2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -284,8 +284,26 @@ void bnxt_vf_reps_open(struct bnxt *bp) if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) return; - for (i = 0; i < pci_num_vf(bp->pdev); i++) - bnxt_vf_rep_open(bp->vf_reps[i]->dev); + for (i = 0; i < pci_num_vf(bp->pdev); i++) { + /* Open the VF-Rep only if it is allocated in the FW */ + if (bp->vf_reps[i]->tx_cfa_action != CFA_HANDLE_INVALID) + bnxt_vf_rep_open(bp->vf_reps[i]->dev); + } +} + +static void __bnxt_free_one_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep) +{ + if (!vf_rep) + return; + + if (vf_rep->dst) { + dst_release((struct dst_entry *)vf_rep->dst); + vf_rep->dst = NULL; + } + if (vf_rep->tx_cfa_action != CFA_HANDLE_INVALID) { + hwrm_cfa_vfr_free(bp, vf_rep->vf_idx); + vf_rep->tx_cfa_action = CFA_HANDLE_INVALID; + } } static void __bnxt_vf_reps_destroy(struct bnxt *bp) @@ -297,11 +315,7 @@ static void __bnxt_vf_reps_destroy(struct bnxt *bp) for (i = 0; i < num_vfs; i++) { vf_rep = bp->vf_reps[i]; if (vf_rep) { - dst_release((struct dst_entry *)vf_rep->dst); - - if (vf_rep->tx_cfa_action != CFA_HANDLE_INVALID) - hwrm_cfa_vfr_free(bp, vf_rep->vf_idx); - + __bnxt_free_one_vf_rep(bp, vf_rep); if (vf_rep->dev) { /* if register_netdev failed, then netdev_ops * would have been set to NULL @@ -350,6 +364,80 @@ void bnxt_vf_reps_destroy(struct bnxt *bp) __bnxt_vf_reps_destroy(bp); } +/* Free the VF-Reps in firmware, during firmware hot-reset processing. + * Note that the VF-Rep netdevs are still active (not unregistered) during + * this process. As the mode transition from SWITCHDEV to LEGACY happens + * under the rtnl_lock() this routine is safe under the rtnl_lock(). + */ +void bnxt_vf_reps_free(struct bnxt *bp) +{ + u16 num_vfs = pci_num_vf(bp->pdev); + int i; + + if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return; + + for (i = 0; i < num_vfs; i++) + __bnxt_free_one_vf_rep(bp, bp->vf_reps[i]); +} + +static int bnxt_alloc_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, + u16 *cfa_code_map) +{ + /* get cfa handles from FW */ + if (hwrm_cfa_vfr_alloc(bp, vf_rep->vf_idx, &vf_rep->tx_cfa_action, + &vf_rep->rx_cfa_code)) + return -ENOLINK; + + cfa_code_map[vf_rep->rx_cfa_code] = vf_rep->vf_idx; + vf_rep->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL); + if (!vf_rep->dst) + return -ENOMEM; + + /* only cfa_action is needed to mux a packet while TXing */ + vf_rep->dst->u.port_info.port_id = vf_rep->tx_cfa_action; + vf_rep->dst->u.port_info.lower_dev = bp->dev; + + return 0; +} + +/* Allocate the VF-Reps in firmware, during firmware hot-reset processing. + * Note that the VF-Rep netdevs are still active (not unregistered) during + * this process. As the mode transition from SWITCHDEV to LEGACY happens + * under the rtnl_lock() this routine is safe under the rtnl_lock(). + */ +int bnxt_vf_reps_alloc(struct bnxt *bp) +{ + u16 *cfa_code_map = bp->cfa_code_map, num_vfs = pci_num_vf(bp->pdev); + struct bnxt_vf_rep *vf_rep; + int rc, i; + + if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return 0; + + if (!cfa_code_map) + return -EINVAL; + + for (i = 0; i < MAX_CFA_CODE; i++) + cfa_code_map[i] = VF_IDX_INVALID; + + for (i = 0; i < num_vfs; i++) { + vf_rep = bp->vf_reps[i]; + vf_rep->vf_idx = i; + + rc = bnxt_alloc_vf_rep(bp, vf_rep, cfa_code_map); + if (rc) + goto err; + } + + return 0; + +err: + netdev_info(bp->dev, "%s error=%d\n", __func__, rc); + bnxt_vf_reps_free(bp); + return rc; +} + /* Use the OUI of the PF's perm addr and report the same mac addr * for the same VF-rep each time */ @@ -428,25 +516,9 @@ static int bnxt_vf_reps_create(struct bnxt *bp) vf_rep->vf_idx = i; vf_rep->tx_cfa_action = CFA_HANDLE_INVALID; - /* get cfa handles from FW */ - rc = hwrm_cfa_vfr_alloc(bp, vf_rep->vf_idx, - &vf_rep->tx_cfa_action, - &vf_rep->rx_cfa_code); - if (rc) { - rc = -ENOLINK; + rc = bnxt_alloc_vf_rep(bp, vf_rep, cfa_code_map); + if (rc) goto err; - } - cfa_code_map[vf_rep->rx_cfa_code] = vf_rep->vf_idx; - - vf_rep->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, - GFP_KERNEL); - if (!vf_rep->dst) { - rc = -ENOMEM; - goto err; - } - /* only cfa_action is needed to mux a packet while TXing */ - vf_rep->dst->u.port_info.port_id = vf_rep->tx_cfa_action; - vf_rep->dst->u.port_info.lower_dev = bp->dev; bnxt_vf_rep_netdev_init(bp, vf_rep, dev); rc = register_netdev(dev); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h index d7287651422f..5637a84884d7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h @@ -19,6 +19,8 @@ void bnxt_vf_reps_close(struct bnxt *bp); void bnxt_vf_reps_open(struct bnxt *bp); void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb); struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code); +int bnxt_vf_reps_alloc(struct bnxt *bp); +void bnxt_vf_reps_free(struct bnxt *bp); static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev) { @@ -61,5 +63,15 @@ static inline bool bnxt_dev_is_vf_rep(struct net_device *dev) { return false; } + +static inline int bnxt_vf_reps_alloc(struct bnxt *bp) +{ + return 0; +} + +static inline void bnxt_vf_reps_free(struct bnxt *bp) +{ +} + #endif /* CONFIG_BNXT_SRIOV */ #endif /* BNXT_VFR_H */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 641303894341..ec9564e584e0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -217,7 +217,7 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames, struct pci_dev *pdev = bp->pdev; struct bnxt_tx_ring_info *txr; dma_addr_t mapping; - int drops = 0; + int nxmit = 0; int ring; int i; @@ -233,21 +233,17 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames, struct xdp_frame *xdp = frames[i]; if (!txr || !bnxt_tx_avail(bp, txr) || - !(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP)) { - xdp_return_frame_rx_napi(xdp); - drops++; - continue; - } + !(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP)) + break; mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len, DMA_TO_DEVICE); - if (dma_mapping_error(&pdev->dev, mapping)) { - xdp_return_frame_rx_napi(xdp); - drops++; - continue; - } + if (dma_mapping_error(&pdev->dev, mapping)) + break; + __bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp); + nxmit++; } if (flags & XDP_XMIT_FLUSH) { @@ -256,7 +252,7 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames, bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); } - return num_frames - drops; + return nxmit; } /* Under rtnl_lock */ diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index 1c86eddb1b51..facde824bcaa 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -18,7 +18,6 @@ #include <linux/delay.h> #include <linux/pm.h> #include <linux/clk.h> -#include <linux/version.h> #include <linux/platform_device.h> #include <net/arp.h> diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index 588c4804d10a..265c2fa6bbe0 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c @@ -524,6 +524,68 @@ bnad_set_pauseparam(struct net_device *netdev, return 0; } +static void bnad_get_txf_strings(u8 **string, int f_num) +{ + ethtool_sprintf(string, "txf%d_ucast_octets", f_num); + ethtool_sprintf(string, "txf%d_ucast", f_num); + ethtool_sprintf(string, "txf%d_ucast_vlan", f_num); + ethtool_sprintf(string, "txf%d_mcast_octets", f_num); + ethtool_sprintf(string, "txf%d_mcast", f_num); + ethtool_sprintf(string, "txf%d_mcast_vlan", f_num); + ethtool_sprintf(string, "txf%d_bcast_octets", f_num); + ethtool_sprintf(string, "txf%d_bcast", f_num); + ethtool_sprintf(string, "txf%d_bcast_vlan", f_num); + ethtool_sprintf(string, "txf%d_errors", f_num); + ethtool_sprintf(string, "txf%d_filter_vlan", f_num); + ethtool_sprintf(string, "txf%d_filter_mac_sa", f_num); +} + +static void bnad_get_rxf_strings(u8 **string, int f_num) +{ + ethtool_sprintf(string, "rxf%d_ucast_octets", f_num); + ethtool_sprintf(string, "rxf%d_ucast", f_num); + ethtool_sprintf(string, "rxf%d_ucast_vlan", f_num); + ethtool_sprintf(string, "rxf%d_mcast_octets", f_num); + ethtool_sprintf(string, "rxf%d_mcast", f_num); + ethtool_sprintf(string, "rxf%d_mcast_vlan", f_num); + ethtool_sprintf(string, "rxf%d_bcast_octets", f_num); + ethtool_sprintf(string, "rxf%d_bcast", f_num); + ethtool_sprintf(string, "rxf%d_bcast_vlan", f_num); + ethtool_sprintf(string, "rxf%d_frame_drops", f_num); +} + +static void bnad_get_cq_strings(u8 **string, int q_num) +{ + ethtool_sprintf(string, "cq%d_producer_index", q_num); + ethtool_sprintf(string, "cq%d_consumer_index", q_num); + ethtool_sprintf(string, "cq%d_hw_producer_index", q_num); + ethtool_sprintf(string, "cq%d_intr", q_num); + ethtool_sprintf(string, "cq%d_poll", q_num); + ethtool_sprintf(string, "cq%d_schedule", q_num); + ethtool_sprintf(string, "cq%d_keep_poll", q_num); + ethtool_sprintf(string, "cq%d_complete", q_num); +} + +static void bnad_get_rxq_strings(u8 **string, int q_num) +{ + ethtool_sprintf(string, "rxq%d_packets", q_num); + ethtool_sprintf(string, "rxq%d_bytes", q_num); + ethtool_sprintf(string, "rxq%d_packets_with_error", q_num); + ethtool_sprintf(string, "rxq%d_allocbuf_failed", q_num); + ethtool_sprintf(string, "rxq%d_mapbuf_failed", q_num); + ethtool_sprintf(string, "rxq%d_producer_index", q_num); + ethtool_sprintf(string, "rxq%d_consumer_index", q_num); +} + +static void bnad_get_txq_strings(u8 **string, int q_num) +{ + ethtool_sprintf(string, "txq%d_packets", q_num); + ethtool_sprintf(string, "txq%d_bytes", q_num); + ethtool_sprintf(string, "txq%d_producer_index", q_num); + ethtool_sprintf(string, "txq%d_consumer_index", q_num); + ethtool_sprintf(string, "txq%d_hw_consumer_index", q_num); +} + static void bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string) { @@ -531,175 +593,57 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string) int i, j, q_num; u32 bmap; + if (stringset != ETH_SS_STATS) + return; + mutex_lock(&bnad->conf_mutex); - switch (stringset) { - case ETH_SS_STATS: - for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) { - BUG_ON(!(strlen(bnad_net_stats_strings[i]) < - ETH_GSTRING_LEN)); - strncpy(string, bnad_net_stats_strings[i], - ETH_GSTRING_LEN); - string += ETH_GSTRING_LEN; - } - bmap = bna_tx_rid_mask(&bnad->bna); - for (i = 0; bmap; i++) { - if (bmap & 1) { - sprintf(string, "txf%d_ucast_octets", i); - string += ETH_GSTRING_LEN; - sprintf(string, "txf%d_ucast", i); - string += ETH_GSTRING_LEN; - sprintf(string, "txf%d_ucast_vlan", i); - string += ETH_GSTRING_LEN; - sprintf(string, "txf%d_mcast_octets", i); - string += ETH_GSTRING_LEN; - sprintf(string, "txf%d_mcast", i); - string += ETH_GSTRING_LEN; - sprintf(string, "txf%d_mcast_vlan", i); - string += ETH_GSTRING_LEN; - sprintf(string, "txf%d_bcast_octets", i); - string += ETH_GSTRING_LEN; - sprintf(string, "txf%d_bcast", i); - string += ETH_GSTRING_LEN; - sprintf(string, "txf%d_bcast_vlan", i); - string += ETH_GSTRING_LEN; - sprintf(string, "txf%d_errors", i); - string += ETH_GSTRING_LEN; - sprintf(string, "txf%d_filter_vlan", i); - string += ETH_GSTRING_LEN; - sprintf(string, "txf%d_filter_mac_sa", i); - string += ETH_GSTRING_LEN; - } - bmap >>= 1; - } + for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) { + BUG_ON(!(strlen(bnad_net_stats_strings[i]) < ETH_GSTRING_LEN)); + ethtool_sprintf(&string, bnad_net_stats_strings[i]); + } - bmap = bna_rx_rid_mask(&bnad->bna); - for (i = 0; bmap; i++) { - if (bmap & 1) { - sprintf(string, "rxf%d_ucast_octets", i); - string += ETH_GSTRING_LEN; - sprintf(string, "rxf%d_ucast", i); - string += ETH_GSTRING_LEN; - sprintf(string, "rxf%d_ucast_vlan", i); - string += ETH_GSTRING_LEN; - sprintf(string, "rxf%d_mcast_octets", i); - string += ETH_GSTRING_LEN; - sprintf(string, "rxf%d_mcast", i); - string += ETH_GSTRING_LEN; - sprintf(string, "rxf%d_mcast_vlan", i); - string += ETH_GSTRING_LEN; - sprintf(string, "rxf%d_bcast_octets", i); - string += ETH_GSTRING_LEN; - sprintf(string, "rxf%d_bcast", i); - string += ETH_GSTRING_LEN; - sprintf(string, "rxf%d_bcast_vlan", i); - string += ETH_GSTRING_LEN; - sprintf(string, "rxf%d_frame_drops", i); - string += ETH_GSTRING_LEN; - } - bmap >>= 1; - } + bmap = bna_tx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++) { + if (bmap & 1) + bnad_get_txf_strings(&string, i); + bmap >>= 1; + } - q_num = 0; - for (i = 0; i < bnad->num_rx; i++) { - if (!bnad->rx_info[i].rx) - continue; - for (j = 0; j < bnad->num_rxp_per_rx; j++) { - sprintf(string, "cq%d_producer_index", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "cq%d_consumer_index", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "cq%d_hw_producer_index", - q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "cq%d_intr", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "cq%d_poll", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "cq%d_schedule", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "cq%d_keep_poll", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "cq%d_complete", q_num); - string += ETH_GSTRING_LEN; - q_num++; - } - } + bmap = bna_rx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++, bmap >>= 1) { + if (bmap & 1) + bnad_get_rxf_strings(&string, i); + bmap >>= 1; + } - q_num = 0; - for (i = 0; i < bnad->num_rx; i++) { - if (!bnad->rx_info[i].rx) - continue; - for (j = 0; j < bnad->num_rxp_per_rx; j++) { - sprintf(string, "rxq%d_packets", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "rxq%d_bytes", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "rxq%d_packets_with_error", - q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "rxq%d_allocbuf_failed", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "rxq%d_mapbuf_failed", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "rxq%d_producer_index", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "rxq%d_consumer_index", q_num); - string += ETH_GSTRING_LEN; - q_num++; - if (bnad->rx_info[i].rx_ctrl[j].ccb && - bnad->rx_info[i].rx_ctrl[j].ccb-> - rcb[1] && - bnad->rx_info[i].rx_ctrl[j].ccb-> - rcb[1]->rxq) { - sprintf(string, "rxq%d_packets", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "rxq%d_bytes", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, - "rxq%d_packets_with_error", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "rxq%d_allocbuf_failed", - q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "rxq%d_mapbuf_failed", - q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "rxq%d_producer_index", - q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "rxq%d_consumer_index", - q_num); - string += ETH_GSTRING_LEN; - q_num++; - } - } - } + q_num = 0; + for (i = 0; i < bnad->num_rx; i++) { + if (!bnad->rx_info[i].rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) + bnad_get_cq_strings(&string, q_num++); + } - q_num = 0; - for (i = 0; i < bnad->num_tx; i++) { - if (!bnad->tx_info[i].tx) - continue; - for (j = 0; j < bnad->num_txq_per_tx; j++) { - sprintf(string, "txq%d_packets", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "txq%d_bytes", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "txq%d_producer_index", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "txq%d_consumer_index", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "txq%d_hw_consumer_index", - q_num); - string += ETH_GSTRING_LEN; - q_num++; - } + q_num = 0; + for (i = 0; i < bnad->num_rx; i++) { + if (!bnad->rx_info[i].rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) { + bnad_get_rxq_strings(&string, q_num++); + if (bnad->rx_info[i].rx_ctrl[j].ccb && + bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && + bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq) + bnad_get_rxq_strings(&string, q_num++); } + } - break; - - default: - break; + q_num = 0; + for (i = 0; i < bnad->num_tx; i++) { + if (!bnad->tx_info[i].tx) + continue; + for (j = 0; j < bnad->num_txq_per_tx; j++) + bnad_get_txq_strings(&string, q_num++); } mutex_unlock(&bnad->conf_mutex); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index d8c68906525a..d8d87213697c 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -159,6 +159,16 @@ #define GEM_PEFTN 0x01f4 /* PTP Peer Event Frame Tx Ns */ #define GEM_PEFRSL 0x01f8 /* PTP Peer Event Frame Rx Sec Low */ #define GEM_PEFRN 0x01fc /* PTP Peer Event Frame Rx Ns */ +#define GEM_PCSCNTRL 0x0200 /* PCS Control */ +#define GEM_PCSSTS 0x0204 /* PCS Status */ +#define GEM_PCSPHYTOPID 0x0208 /* PCS PHY Top ID */ +#define GEM_PCSPHYBOTID 0x020c /* PCS PHY Bottom ID */ +#define GEM_PCSANADV 0x0210 /* PCS AN Advertisement */ +#define GEM_PCSANLPBASE 0x0214 /* PCS AN Link Partner Base */ +#define GEM_PCSANEXP 0x0218 /* PCS AN Expansion */ +#define GEM_PCSANNPTX 0x021c /* PCS AN Next Page TX */ +#define GEM_PCSANNPLP 0x0220 /* PCS AN Next Page LP */ +#define GEM_PCSANEXTSTS 0x023c /* PCS AN Extended Status */ #define GEM_DCFG1 0x0280 /* Design Config 1 */ #define GEM_DCFG2 0x0284 /* Design Config 2 */ #define GEM_DCFG3 0x0288 /* Design Config 3 */ @@ -478,6 +488,10 @@ #define GEM_HS_MAC_SPEED_OFFSET 0 #define GEM_HS_MAC_SPEED_SIZE 3 +/* Bitfields in PCSCNTRL */ +#define GEM_PCSAUTONEG_OFFSET 12 +#define GEM_PCSAUTONEG_SIZE 1 + /* Bitfields in DCFG1. */ #define GEM_IRQCOR_OFFSET 23 #define GEM_IRQCOR_SIZE 1 diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 0f6a6cb7e98d..0e94db9cd45d 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -694,6 +694,22 @@ static void macb_mac_config(struct phylink_config *config, unsigned int mode, if (old_ncr ^ ncr) macb_or_gem_writel(bp, NCR, ncr); + /* Disable AN for SGMII fixed link configuration, enable otherwise. + * Must be written after PCSSEL is set in NCFGR, + * otherwise writes will not take effect. + */ + if (macb_is_gem(bp) && state->interface == PHY_INTERFACE_MODE_SGMII) { + u32 pcsctrl, old_pcsctrl; + + old_pcsctrl = gem_readl(bp, PCSCNTRL); + if (mode == MLO_AN_FIXED) + pcsctrl = old_pcsctrl & ~GEM_BIT(PCSAUTONEG); + else + pcsctrl = old_pcsctrl | GEM_BIT(PCSAUTONEG); + if (old_pcsctrl != pcsctrl) + gem_writel(bp, PCSCNTRL, pcsctrl); + } + spin_unlock_irqrestore(&bp->lock, flags); } @@ -847,6 +863,15 @@ static int macb_phylink_connect(struct macb *bp) return 0; } +static void macb_get_pcs_fixed_state(struct phylink_config *config, + struct phylink_link_state *state) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct macb *bp = netdev_priv(ndev); + + state->link = (macb_readl(bp, NSR) & MACB_BIT(NSR_LINK)) != 0; +} + /* based on au1000_eth. c*/ static int macb_mii_probe(struct net_device *dev) { @@ -855,6 +880,11 @@ static int macb_mii_probe(struct net_device *dev) bp->phylink_config.dev = &dev->dev; bp->phylink_config.type = PHYLINK_NETDEV; + if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) { + bp->phylink_config.poll_fixed_state = true; + bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state; + } + bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode, bp->phy_interface, &macb_phylink_ops); if (IS_ERR(bp->phylink)) { @@ -3735,17 +3765,15 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, *hclk = devm_clk_get(&pdev->dev, "hclk"); } - if (IS_ERR_OR_NULL(*pclk)) { - err = IS_ERR(*pclk) ? PTR_ERR(*pclk) : -ENODEV; - dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err); - return err; - } + if (IS_ERR_OR_NULL(*pclk)) + return dev_err_probe(&pdev->dev, + IS_ERR(*pclk) ? PTR_ERR(*pclk) : -ENODEV, + "failed to get pclk\n"); - if (IS_ERR_OR_NULL(*hclk)) { - err = IS_ERR(*hclk) ? PTR_ERR(*hclk) : -ENODEV; - dev_err(&pdev->dev, "failed to get hclk (%d)\n", err); - return err; - } + if (IS_ERR_OR_NULL(*hclk)) + return dev_err_probe(&pdev->dev, + IS_ERR(*hclk) ? PTR_ERR(*hclk) : -ENODEV, + "failed to get hclk\n"); *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk"); if (IS_ERR(*tx_clk)) @@ -4621,7 +4649,6 @@ static int macb_probe(struct platform_device *pdev) struct net_device *dev; struct resource *regs; void __iomem *mem; - const char *mac; struct macb *bp; int err, val; @@ -4736,15 +4763,11 @@ static int macb_probe(struct platform_device *pdev) if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) bp->rx_intr_mask |= MACB_BIT(RXUBR); - mac = of_get_mac_address(np); - if (PTR_ERR(mac) == -EPROBE_DEFER) { - err = -EPROBE_DEFER; + err = of_get_mac_address(np, bp->dev->dev_addr); + if (err == -EPROBE_DEFER) goto err_out_free_netdev; - } else if (!IS_ERR_OR_NULL(mac)) { - ether_addr_copy(bp->dev->dev_addr, mac); - } else { + else if (err) macb_get_hwaddr(bp); - } err = of_get_phy_mode(np, &interface); if (err) diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h index e6d4ad99cc38..3f1c189646f4 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h @@ -521,7 +521,7 @@ #define CN23XX_BAR1_INDEX_OFFSET 3 #define CN23XX_PEM_BAR1_INDEX_REG(port, idx) \ - (CN23XX_PEM_BAR1_INDEX_START + ((port) << CN23XX_PEM_OFFSET) + \ + (CN23XX_PEM_BAR1_INDEX_START + (((u64)port) << CN23XX_PEM_OFFSET) + \ ((idx) << CN23XX_BAR1_INDEX_OFFSET)) /*############################ DPI #########################*/ diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index ecffebd513be..48ff6fb0eed9 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -1385,7 +1385,6 @@ static int octeon_mgmt_probe(struct platform_device *pdev) struct net_device *netdev; struct octeon_mgmt *p; const __be32 *data; - const u8 *mac; struct resource *res_mix; struct resource *res_agl; struct resource *res_agl_prt_ctl; @@ -1502,11 +1501,8 @@ static int octeon_mgmt_probe(struct platform_device *pdev) netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM; netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN; - mac = of_get_mac_address(pdev->dev.of_node); - - if (!IS_ERR(mac)) - ether_addr_copy(netdev->dev_addr, mac); - else + result = of_get_mac_address(pdev->dev.of_node, netdev->dev_addr); + if (result) eth_hw_addr_random(netdev); p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index f782e6af45e9..50bbe79fb93d 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -776,7 +776,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; mbx.rq.qs_num = qs->vnic_id; mbx.rq.rq_num = qidx; - mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | + mbx.rq.cfg = ((u64)rq->caching << 26) | (rq->cq_qs << 19) | (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 8ff28ed04b7f..0c783aadf393 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1474,7 +1474,6 @@ static int bgx_init_of_phy(struct bgx *bgx) device_for_each_child_node(&bgx->pdev->dev, fwn) { struct phy_device *pd; struct device_node *phy_np; - const char *mac; /* Should always be an OF node. But if it is not, we * cannot handle it, so exit the loop. @@ -1483,9 +1482,7 @@ static int bgx_init_of_phy(struct bgx *bgx) if (!node) break; - mac = of_get_mac_address(node); - if (!IS_ERR(mac)) - ether_addr_copy(bgx->lmac[lmac].mac, mac); + of_get_mac_address(node, bgx->lmac[lmac].mac); SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev); bgx->lmac[lmac].lmacid = lmac; diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c index ce28820c57c9..12fcf84d67ad 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c @@ -323,8 +323,7 @@ void t4_cleanup_clip_tbl(struct adapter *adap) struct clip_tbl *ctbl = adap->clipt; if (ctbl) { - if (ctbl->cl_list) - kvfree(ctbl->cl_list); + kvfree(ctbl->cl_list); kvfree(ctbl); } } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 23a2ebdfd503..a7f291c89702 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -3551,8 +3551,7 @@ out: } out_free: - if (data) - kvfree(data); + kvfree(data); #undef QDESC_GET_FLQ #undef QDESC_GET_RXQ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 77648e4ab4cc..dd66b244466d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -157,8 +157,7 @@ static int cudbg_alloc_compress_buff(struct cudbg_init *pdbg_init) static void cudbg_free_compress_buff(struct cudbg_init *pdbg_init) { - if (pdbg_init->compress_buff) - vfree(pdbg_init->compress_buff); + vfree(pdbg_init->compress_buff); } int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 83b46440408b..bc581b149b11 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -174,31 +174,31 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f, WORD_MASK, f->fs.nat_lip[15] | f->fs.nat_lip[14] << 8 | f->fs.nat_lip[13] << 16 | - f->fs.nat_lip[12] << 24, 1); + (u64)f->fs.nat_lip[12] << 24, 1); set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 1, WORD_MASK, f->fs.nat_lip[11] | f->fs.nat_lip[10] << 8 | f->fs.nat_lip[9] << 16 | - f->fs.nat_lip[8] << 24, 1); + (u64)f->fs.nat_lip[8] << 24, 1); set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 2, WORD_MASK, f->fs.nat_lip[7] | f->fs.nat_lip[6] << 8 | f->fs.nat_lip[5] << 16 | - f->fs.nat_lip[4] << 24, 1); + (u64)f->fs.nat_lip[4] << 24, 1); set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 3, WORD_MASK, f->fs.nat_lip[3] | f->fs.nat_lip[2] << 8 | f->fs.nat_lip[1] << 16 | - f->fs.nat_lip[0] << 24, 1); + (u64)f->fs.nat_lip[0] << 24, 1); } else { set_tcb_field(adap, f, tid, TCB_RX_FRAG3_LEN_RAW_W, WORD_MASK, f->fs.nat_lip[3] | f->fs.nat_lip[2] << 8 | f->fs.nat_lip[1] << 16 | - f->fs.nat_lip[0] << 24, 1); + (u64)f->fs.nat_lip[0] << 25, 1); } } @@ -208,25 +208,25 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f, WORD_MASK, f->fs.nat_fip[15] | f->fs.nat_fip[14] << 8 | f->fs.nat_fip[13] << 16 | - f->fs.nat_fip[12] << 24, 1); + (u64)f->fs.nat_fip[12] << 24, 1); set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 1, WORD_MASK, f->fs.nat_fip[11] | f->fs.nat_fip[10] << 8 | f->fs.nat_fip[9] << 16 | - f->fs.nat_fip[8] << 24, 1); + (u64)f->fs.nat_fip[8] << 24, 1); set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 2, WORD_MASK, f->fs.nat_fip[7] | f->fs.nat_fip[6] << 8 | f->fs.nat_fip[5] << 16 | - f->fs.nat_fip[4] << 24, 1); + (u64)f->fs.nat_fip[4] << 24, 1); set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 3, WORD_MASK, f->fs.nat_fip[3] | f->fs.nat_fip[2] << 8 | f->fs.nat_fip[1] << 16 | - f->fs.nat_fip[0] << 24, 1); + (u64)f->fs.nat_fip[0] << 24, 1); } else { set_tcb_field(adap, f, tid, @@ -234,13 +234,13 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f, WORD_MASK, f->fs.nat_fip[3] | f->fs.nat_fip[2] << 8 | f->fs.nat_fip[1] << 16 | - f->fs.nat_fip[0] << 24, 1); + (u64)f->fs.nat_fip[0] << 24, 1); } } set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK, (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) | - (sp ? (nat_fp[1] << 16 | nat_fp[0] << 24) : 0), + (sp ? (nat_fp[1] << 16 | (u64)nat_fp[0] << 24) : 0), 1); } @@ -979,7 +979,7 @@ void clear_filter(struct adapter *adap, struct filter_entry *f) { struct port_info *pi = netdev_priv(f->dev); - /* If the new or old filter have loopback rewriteing rules then we'll + /* If the new or old filter have loopback rewriting rules then we'll * need to free any existing L2T, SMT, CLIP entries of filter * rule. */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c index 2e309f6673f7..28fd2de9e4cf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c @@ -48,6 +48,11 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev, flow_action_for_each(i, entry, actions) { switch (entry->id) { case FLOW_ACTION_POLICE: + if (entry->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, + "QoS offload not support packets per second"); + return -EOPNOTSUPP; + } /* Convert bytes per second to bits per second */ if (entry->police.rate_bytes_ps * 8 > max_link_rate) { NL_SET_ERR_MSG_MOD(extack, @@ -145,7 +150,11 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev, flow_action_for_each(i, entry, &cls->rule->action) if (entry->id == FLOW_ACTION_POLICE) break; - + if (entry->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, + "QoS offload not support packets per second"); + return -EOPNOTSUPP; + } /* Convert from bytes per second to Kbps */ p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000); p.u.params.channel = pi->tx_chan; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index dede02505ceb..a5d2f84dcdd5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c @@ -524,13 +524,9 @@ struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap) out_no_mem: for (i = 0; i < t->size; i++) { struct cxgb4_link *link = &t->table[i]; - - if (link->tid_map) - kvfree(link->tid_map); + kvfree(link->tid_map); } - - if (t) - kvfree(t); + kvfree(t); return NULL; } diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c index a3f5b80888e5..ef3f1e92632f 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c @@ -33,7 +33,6 @@ static int chcr_get_nfrags_to_send(struct sk_buff *skb, u32 start, u32 len) if (unlikely(start < skb_linear_data_len)) { frag_size = min(len, skb_linear_data_len - start); - start = 0; } else { start -= skb_linear_data_len; @@ -873,10 +872,10 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info, } /* update receive window */ if (first_wr || tx_info->prev_win != tcp_win) { - pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos, - TCB_RCV_WND_W, - TCB_RCV_WND_V(TCB_RCV_WND_M), - TCB_RCV_WND_V(tcp_win), 0); + chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos, + TCB_RCV_WND_W, + TCB_RCV_WND_V(TCB_RCV_WND_M), + TCB_RCV_WND_V(tcp_win), 0); tx_info->prev_win = tcp_win; cpl++; } @@ -1485,7 +1484,6 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info, wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR)); wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16)); wr->cookie = 0; - pos += sizeof(*wr); /* ULP_TXPKT */ ulptx = (struct ulp_txpkt *)(wr + 1); ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index f04ec53544ae..f48957a17c3a 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -3040,15 +3040,4 @@ static struct pci_driver enic_driver = { .remove = enic_remove, }; -static int __init enic_init_module(void) -{ - return pci_register_driver(&enic_driver); -} - -static void __exit enic_cleanup_module(void) -{ - pci_unregister_driver(&enic_driver); -} - -module_init(enic_init_module); -module_exit(enic_cleanup_module); +module_pci_driver(enic_driver); diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 8a9096aa85cd..2a8bf53c2f75 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1385,7 +1385,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev) { struct dm9000_plat_data *pdata; struct device_node *np = dev->of_node; - const void *mac_addr; + int ret; if (!IS_ENABLED(CONFIG_OF) || !np) return ERR_PTR(-ENXIO); @@ -1399,11 +1399,9 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev) if (of_find_property(np, "davicom,no-eeprom", NULL)) pdata->flags |= DM9000_PLATF_NO_EEPROM; - mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) - ether_addr_copy(pdata->dev_addr, mac_addr); - else if (PTR_ERR(mac_addr) == -EPROBE_DEFER) - return ERR_CAST(mac_addr); + ret = of_get_mac_address(np, pdata->dev_addr); + if (ret == -EPROBE_DEFER) + return ERR_PTR(ret); return pdata; } @@ -1524,7 +1522,6 @@ dm9000_probe(struct platform_device *pdev) if (ret) { dev_err(db->dev, "irq %d cannot set wakeup (%d)\n", db->irq_wake, ret); - ret = 0; } else { irq_set_irq_wake(db->irq_wake, 0); db->wake_supported = 1; diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index c3cbe55205a7..b018195f0243 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -2193,15 +2193,4 @@ static struct pci_driver de_driver = { .driver.pm = &de_pm_ops, }; -static int __init de_init (void) -{ - return pci_register_driver(&de_driver); -} - -static void __exit de_exit (void) -{ - pci_unregister_driver (&de_driver); -} - -module_init(de_init); -module_exit(de_exit); +module_pci_driver(de_driver); diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 89cbdc1f4857..514df170ec5d 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -1629,15 +1629,4 @@ static struct pci_driver w840_driver = { .driver.pm = &w840_pm_ops, }; -static int __init w840_init(void) -{ - return pci_register_driver(&w840_driver); -} - -static void __exit w840_exit(void) -{ - pci_unregister_driver(&w840_driver); -} - -module_init(w840_init); -module_exit(w840_exit); +module_pci_driver(w840_driver); diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index df0eab479d51..ce61f79f3b7c 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -1982,17 +1982,4 @@ static struct pci_driver sundance_driver = { .driver.pm = &sundance_pm_ops, }; -static int __init sundance_init(void) -{ - return pci_register_driver(&sundance_driver); -} - -static void __exit sundance_exit(void) -{ - pci_unregister_driver(&sundance_driver); -} - -module_init(sundance_init); -module_exit(sundance_exit); - - +module_pci_driver(sundance_driver); diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 3d9b0b161e24..e1b43b07755b 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1151,11 +1151,7 @@ static int ethoc_probe(struct platform_device *pdev) ether_addr_copy(netdev->dev_addr, pdata->hwaddr); priv->phy_id = pdata->phy_id; } else { - const void *mac; - - mac = of_get_mac_address(pdev->dev.of_node); - if (!IS_ERR(mac)) - ether_addr_copy(netdev->dev_addr, mac); + of_get_mac_address(pdev->dev.of_node, netdev->dev_addr); priv->phy_id = -1; } diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 815fb62c4b02..e3954d8835e7 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -575,7 +575,6 @@ static s32 nps_enet_probe(struct platform_device *pdev) struct net_device *ndev; struct nps_enet_priv *priv; s32 err = 0; - const char *mac_addr; if (!dev->of_node) return -ENODEV; @@ -602,10 +601,8 @@ static s32 nps_enet_probe(struct platform_device *pdev) dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs_base); /* set kernel MAC address to dev */ - mac_addr = of_get_mac_address(dev->of_node); - if (!IS_ERR(mac_addr)) - ether_addr_copy(ndev->dev_addr, mac_addr); - else + err = of_get_mac_address(dev->of_node, ndev->dev_addr); + if (err) eth_hw_addr_random(ndev); /* Get IRQ number */ diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index 473b337b2e3b..5a1a8f2ea63c 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -1177,18 +1177,7 @@ static struct platform_driver ftmac100_driver = { /****************************************************************************** * initialization / finalization *****************************************************************************/ -static int __init ftmac100_init(void) -{ - return platform_driver_register(&ftmac100_driver); -} - -static void __exit ftmac100_exit(void) -{ - platform_driver_unregister(&ftmac100_driver); -} - -module_init(ftmac100_init); -module_exit(ftmac100_exit); +module_platform_driver(ftmac100_driver); MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); MODULE_DESCRIPTION("FTMAC100 driver"); diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index c696651dd735..0908771aa9ac 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -1948,15 +1948,4 @@ static struct pci_driver fealnx_driver = { .remove = fealnx_remove_one, }; -static int __init fealnx_init(void) -{ - return pci_register_driver(&fealnx_driver); -} - -static void __exit fealnx_exit(void) -{ - pci_unregister_driver(&fealnx_driver); -} - -module_init(fealnx_init); -module_exit(fealnx_exit); +module_pci_driver(fealnx_driver); diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index 3f9175bdce77..2d1abdd58fab 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -27,6 +27,7 @@ config FEC default ARCH_MXC || SOC_IMX28 if ARM select CRC32 select PHYLIB + imply NET_SELFTESTS imply PTP_1588_CLOCK help Say Y here if you want to use the built-in 10/100 Fast ethernet diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile index 67c436400352..de7b31842233 100644 --- a/drivers/net/ethernet/freescale/Makefile +++ b/drivers/net/ethernet/freescale/Makefile @@ -24,6 +24,4 @@ obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/ obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/ -obj-$(CONFIG_FSL_ENETC) += enetc/ -obj-$(CONFIG_FSL_ENETC_MDIO) += enetc/ -obj-$(CONFIG_FSL_ENETC_VF) += enetc/ +obj-y += enetc/ diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 720dc99bd1fc..177c020bf34a 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -3081,7 +3081,7 @@ static int dpaa_xdp_xmit(struct net_device *net_dev, int n, struct xdp_frame **frames, u32 flags) { struct xdp_frame *xdpf; - int i, err, drops = 0; + int i, nxmit = 0; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; @@ -3091,14 +3091,12 @@ static int dpaa_xdp_xmit(struct net_device *net_dev, int n, for (i = 0; i < n; i++) { xdpf = frames[i]; - err = dpaa_xdp_xmit_frame(net_dev, xdpf); - if (err) { - xdp_return_frame_rx_napi(xdpf); - drops++; - } + if (dpaa_xdp_xmit_frame(net_dev, xdpf)) + break; + nxmit++; } - return n - drops; + return nxmit; } static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig index ee7a906e30b3..d029b69c3f18 100644 --- a/drivers/net/ethernet/freescale/dpaa2/Kconfig +++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig @@ -29,3 +29,11 @@ config FSL_DPAA2_PTP_CLOCK help This driver adds support for using the DPAA2 1588 timer module as a PTP clock. + +config FSL_DPAA2_SWITCH + tristate "Freescale DPAA2 Ethernet Switch" + depends on BRIDGE || BRIDGE=n + depends on NET_SWITCHDEV + help + Driver for Freescale DPAA2 Ethernet Switch. This driver manages + switch objects discovered on the Freeescale MC bus. diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile index 146cb3540e61..c2ef74052ef8 100644 --- a/drivers/net/ethernet/freescale/dpaa2/Makefile +++ b/drivers/net/ethernet/freescale/dpaa2/Makefile @@ -5,11 +5,13 @@ obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o +obj-$(CONFIG_FSL_DPAA2_SWITCH) += fsl-dpaa2-switch.o fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o +fsl-dpaa2-switch-objs := dpaa2-switch.o dpaa2-switch-ethtool.o dpsw.o dpaa2-switch-flower.o # Needed by the tracing framework CFLAGS_dpaa2-eth.o := -I$(src) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 492943bb9c48..e0c3c58e2ac7 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -223,31 +223,31 @@ static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, } } -static void dpaa2_eth_xdp_release_buf(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - dma_addr_t addr) +static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + dma_addr_t addr) { int retries = 0; int err; - ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr; - if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD) + ch->recycled_bufs[ch->recycled_bufs_cnt++] = addr; + if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD) return; while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid, - ch->xdp.drop_bufs, - ch->xdp.drop_cnt)) == -EBUSY) { + ch->recycled_bufs, + ch->recycled_bufs_cnt)) == -EBUSY) { if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) break; cpu_relax(); } if (err) { - dpaa2_eth_free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt); - ch->buf_count -= ch->xdp.drop_cnt; + dpaa2_eth_free_bufs(priv, ch->recycled_bufs, ch->recycled_bufs_cnt); + ch->buf_count -= ch->recycled_bufs_cnt; } - ch->xdp.drop_cnt = 0; + ch->recycled_bufs_cnt = 0; } static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv, @@ -300,7 +300,7 @@ static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv, ch->stats.xdp_tx++; } for (i = enqueued; i < fq->xdp_tx_fds.num; i++) { - dpaa2_eth_xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i])); + dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(&fds[i])); percpu_stats->tx_errors++; ch->stats.xdp_tx_err++; } @@ -382,7 +382,7 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv, trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); fallthrough; case XDP_DROP: - dpaa2_eth_xdp_release_buf(priv, ch, addr); + dpaa2_eth_recycle_buf(priv, ch, addr); ch->stats.xdp_drop++; break; case XDP_REDIRECT: @@ -403,7 +403,7 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv, free_pages((unsigned long)vaddr, 0); } else { ch->buf_count++; - dpaa2_eth_xdp_release_buf(priv, ch, addr); + dpaa2_eth_recycle_buf(priv, ch, addr); } ch->stats.xdp_drop++; } else { @@ -418,6 +418,35 @@ out: return xdp_act; } +static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + void *fd_vaddr) +{ + u16 fd_offset = dpaa2_fd_get_offset(fd); + struct dpaa2_eth_priv *priv = ch->priv; + u32 fd_length = dpaa2_fd_get_len(fd); + struct sk_buff *skb = NULL; + unsigned int skb_len; + + if (fd_length > priv->rx_copybreak) + return NULL; + + skb_len = fd_length + dpaa2_eth_needed_headroom(NULL); + + skb = napi_alloc_skb(&ch->napi, skb_len); + if (!skb) + return NULL; + + skb_reserve(skb, dpaa2_eth_needed_headroom(NULL)); + skb_put(skb, fd_length); + + memcpy(skb->data, fd_vaddr + fd_offset, fd_length); + + dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd)); + + return skb; +} + /* Main Rx frame processing routine */ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, struct dpaa2_eth_channel *ch, @@ -459,9 +488,12 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, return; } - dma_unmap_page(dev, addr, priv->rx_buf_size, - DMA_BIDIRECTIONAL); - skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr); + skb = dpaa2_eth_copybreak(ch, fd, vaddr); + if (!skb) { + dma_unmap_page(dev, addr, priv->rx_buf_size, + DMA_BIDIRECTIONAL); + skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr); + } } else if (fd_format == dpaa2_fd_sg) { WARN_ON(priv->xdp_prog); @@ -2431,8 +2463,6 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n, percpu_stats->tx_packets += enqueued; for (i = 0; i < enqueued; i++) percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]); - for (i = enqueued; i < n; i++) - xdp_return_frame_rx_napi(frames[i]); return enqueued; } @@ -4304,6 +4334,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) skb_queue_head_init(&priv->tx_skbs); + priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK; + /* Obtain a MC portal */ err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &priv->mc_io); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index 9b6a89709ce1..cdb623d5f2c1 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -438,8 +438,6 @@ struct dpaa2_eth_fq { struct dpaa2_eth_ch_xdp { struct bpf_prog *prog; - u64 drop_bufs[DPAA2_ETH_BUFS_PER_CMD]; - int drop_cnt; unsigned int res; }; @@ -457,6 +455,10 @@ struct dpaa2_eth_channel { struct dpaa2_eth_ch_xdp xdp; struct xdp_rxq_info xdp_rxq; struct list_head *rx_list; + + /* Buffers to be recycled back in the buffer pool */ + u64 recycled_bufs[DPAA2_ETH_BUFS_PER_CMD]; + int recycled_bufs_cnt; }; struct dpaa2_eth_dist_fields { @@ -487,6 +489,8 @@ struct dpaa2_eth_trap_data { struct dpaa2_eth_priv *priv; }; +#define DPAA2_ETH_DEFAULT_COPYBREAK 512 + /* Driver private data */ struct dpaa2_eth_priv { struct net_device *net_dev; @@ -567,6 +571,8 @@ struct dpaa2_eth_priv { struct devlink *devlink; struct dpaa2_eth_trap_data *trap_data; struct devlink_port devlink_port; + + u32 rx_copybreak; }; struct dpaa2_eth_devlink_priv { diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index bf59708b869e..ad5e374eeccf 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -782,6 +782,44 @@ static int dpaa2_eth_get_ts_info(struct net_device *dev, return 0; } +static int dpaa2_eth_get_tunable(struct net_device *net_dev, + const struct ethtool_tunable *tuna, + void *data) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int err = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)data = priv->rx_copybreak; + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int dpaa2_eth_set_tunable(struct net_device *net_dev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int err = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + priv->rx_copybreak = *(u32 *)data; + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + const struct ethtool_ops dpaa2_ethtool_ops = { .get_drvinfo = dpaa2_eth_get_drvinfo, .nway_reset = dpaa2_eth_nway_reset, @@ -796,4 +834,6 @@ const struct ethtool_ops dpaa2_ethtool_ops = { .get_rxnfc = dpaa2_eth_get_rxnfc, .set_rxnfc = dpaa2_eth_set_rxnfc, .get_ts_info = dpaa2_eth_get_ts_info, + .get_tunable = dpaa2_eth_get_tunable, + .set_tunable = dpaa2_eth_set_tunable, }; diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c index 0af2e9914ec4..70e04321c420 100644 --- a/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c @@ -9,7 +9,7 @@ #include <linux/ethtool.h> -#include "ethsw.h" +#include "dpaa2-switch.h" static struct { enum dpsw_counter id; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c new file mode 100644 index 000000000000..f9451ec5f2cb --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c @@ -0,0 +1,492 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DPAA2 Ethernet Switch flower support + * + * Copyright 2021 NXP + * + */ + +#include "dpaa2-switch.h" + +static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls, + struct dpsw_acl_key *acl_key) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + struct netlink_ext_ack *extack = cls->common.extack; + struct dpsw_acl_fields *acl_h, *acl_m; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_VLAN) | + BIT(FLOW_DISSECTOR_KEY_PORTS) | + BIT(FLOW_DISSECTOR_KEY_IP) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) { + NL_SET_ERR_MSG_MOD(extack, + "Unsupported keys used"); + return -EOPNOTSUPP; + } + + acl_h = &acl_key->match; + acl_m = &acl_key->mask; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + acl_h->l3_protocol = match.key->ip_proto; + acl_h->l2_ether_type = be16_to_cpu(match.key->n_proto); + acl_m->l3_protocol = match.mask->ip_proto; + acl_m->l2_ether_type = be16_to_cpu(match.mask->n_proto); + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + ether_addr_copy(acl_h->l2_dest_mac, &match.key->dst[0]); + ether_addr_copy(acl_h->l2_source_mac, &match.key->src[0]); + ether_addr_copy(acl_m->l2_dest_mac, &match.mask->dst[0]); + ether_addr_copy(acl_m->l2_source_mac, &match.mask->src[0]); + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + acl_h->l2_vlan_id = match.key->vlan_id; + acl_h->l2_tpid = be16_to_cpu(match.key->vlan_tpid); + acl_h->l2_pcp_dei = match.key->vlan_priority << 1 | + match.key->vlan_dei; + + acl_m->l2_vlan_id = match.mask->vlan_id; + acl_m->l2_tpid = be16_to_cpu(match.mask->vlan_tpid); + acl_m->l2_pcp_dei = match.mask->vlan_priority << 1 | + match.mask->vlan_dei; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(rule, &match); + acl_h->l3_source_ip = be32_to_cpu(match.key->src); + acl_h->l3_dest_ip = be32_to_cpu(match.key->dst); + acl_m->l3_source_ip = be32_to_cpu(match.mask->src); + acl_m->l3_dest_ip = be32_to_cpu(match.mask->dst); + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_ports(rule, &match); + acl_h->l4_source_port = be16_to_cpu(match.key->src); + acl_h->l4_dest_port = be16_to_cpu(match.key->dst); + acl_m->l4_source_port = be16_to_cpu(match.mask->src); + acl_m->l4_dest_port = be16_to_cpu(match.mask->dst); + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { + struct flow_match_ip match; + + flow_rule_match_ip(rule, &match); + if (match.mask->ttl != 0) { + NL_SET_ERR_MSG_MOD(extack, + "Matching on TTL not supported"); + return -EOPNOTSUPP; + } + + if ((match.mask->tos & 0x3) != 0) { + NL_SET_ERR_MSG_MOD(extack, + "Matching on ECN not supported, only DSCP"); + return -EOPNOTSUPP; + } + + acl_h->l3_dscp = match.key->tos >> 2; + acl_m->l3_dscp = match.mask->tos >> 2; + } + + return 0; +} + +int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg; + struct ethsw_core *ethsw = acl_tbl->ethsw; + struct dpsw_acl_key *acl_key = &entry->key; + struct device *dev = ethsw->dev; + u8 *cmd_buff; + int err; + + cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL); + if (!cmd_buff) + return -ENOMEM; + + dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff); + + acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff, + DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) { + dev_err(dev, "DMA mapping failed\n"); + return -EFAULT; + } + + err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle, + acl_tbl->id, acl_entry_cfg); + + dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff), + DMA_TO_DEVICE); + if (err) { + dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err); + return err; + } + + kfree(cmd_buff); + + return 0; +} + +static int dpaa2_switch_acl_entry_remove(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg; + struct dpsw_acl_key *acl_key = &entry->key; + struct ethsw_core *ethsw = acl_tbl->ethsw; + struct device *dev = ethsw->dev; + u8 *cmd_buff; + int err; + + cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL); + if (!cmd_buff) + return -ENOMEM; + + dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff); + + acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff, + DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) { + dev_err(dev, "DMA mapping failed\n"); + return -EFAULT; + } + + err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle, + acl_tbl->id, acl_entry_cfg); + + dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff), + DMA_TO_DEVICE); + if (err) { + dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err); + return err; + } + + kfree(cmd_buff); + + return 0; +} + +static int +dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpaa2_switch_acl_entry *tmp; + struct list_head *pos, *n; + int index = 0; + + if (list_empty(&acl_tbl->entries)) { + list_add(&entry->list, &acl_tbl->entries); + return index; + } + + list_for_each_safe(pos, n, &acl_tbl->entries) { + tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list); + if (entry->prio < tmp->prio) + break; + index++; + } + list_add(&entry->list, pos->prev); + return index; +} + +static struct dpaa2_switch_acl_entry* +dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_acl_tbl *acl_tbl, + int index) +{ + struct dpaa2_switch_acl_entry *tmp; + int i = 0; + + list_for_each_entry(tmp, &acl_tbl->entries, list) { + if (i == index) + return tmp; + ++i; + } + + return NULL; +} + +static int +dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry, + int precedence) +{ + int err; + + err = dpaa2_switch_acl_entry_remove(acl_tbl, entry); + if (err) + return err; + + entry->cfg.precedence = precedence; + return dpaa2_switch_acl_entry_add(acl_tbl, entry); +} + +static int dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpaa2_switch_acl_entry *tmp; + int index, i, precedence, err; + + /* Add the new ACL entry to the linked list and get its index */ + index = dpaa2_switch_acl_entry_add_to_list(acl_tbl, entry); + + /* Move up in priority the ACL entries to make space + * for the new filter. + */ + precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - acl_tbl->num_rules - 1; + for (i = 0; i < index; i++) { + tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i); + + err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp, + precedence); + if (err) + return err; + + precedence++; + } + + /* Add the new entry to hardware */ + entry->cfg.precedence = precedence; + err = dpaa2_switch_acl_entry_add(acl_tbl, entry); + acl_tbl->num_rules++; + + return err; +} + +static struct dpaa2_switch_acl_entry * +dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_acl_tbl *acl_tbl, + unsigned long cookie) +{ + struct dpaa2_switch_acl_entry *tmp, *n; + + list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) { + if (tmp->cookie == cookie) + return tmp; + } + return NULL; +} + +static int +dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpaa2_switch_acl_entry *tmp, *n; + int index = 0; + + list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) { + if (tmp->cookie == entry->cookie) + return index; + index++; + } + return -ENOENT; +} + +static int +dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpaa2_switch_acl_entry *tmp; + int index, i, precedence, err; + + index = dpaa2_switch_acl_entry_get_index(acl_tbl, entry); + + /* Remove from hardware the ACL entry */ + err = dpaa2_switch_acl_entry_remove(acl_tbl, entry); + if (err) + return err; + + acl_tbl->num_rules--; + + /* Remove it from the list also */ + list_del(&entry->list); + + /* Move down in priority the entries over the deleted one */ + precedence = entry->cfg.precedence; + for (i = index - 1; i >= 0; i--) { + tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i); + err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp, + precedence); + if (err) + return err; + + precedence--; + } + + kfree(entry); + + return 0; +} + +static int dpaa2_switch_tc_parse_action(struct ethsw_core *ethsw, + struct flow_action_entry *cls_act, + struct dpsw_acl_result *dpsw_act, + struct netlink_ext_ack *extack) +{ + int err = 0; + + switch (cls_act->id) { + case FLOW_ACTION_TRAP: + dpsw_act->action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF; + break; + case FLOW_ACTION_REDIRECT: + if (!dpaa2_switch_port_dev_check(cls_act->dev)) { + NL_SET_ERR_MSG_MOD(extack, + "Destination not a DPAA2 switch port"); + return -EOPNOTSUPP; + } + + dpsw_act->if_id = dpaa2_switch_get_index(ethsw, cls_act->dev); + dpsw_act->action = DPSW_ACL_ACTION_REDIRECT; + break; + case FLOW_ACTION_DROP: + dpsw_act->action = DPSW_ACL_ACTION_DROP; + break; + default: + NL_SET_ERR_MSG_MOD(extack, + "Action not supported"); + err = -EOPNOTSUPP; + goto out; + } + +out: + return err; +} + +int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl, + struct flow_cls_offload *cls) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct netlink_ext_ack *extack = cls->common.extack; + struct ethsw_core *ethsw = acl_tbl->ethsw; + struct dpaa2_switch_acl_entry *acl_entry; + struct flow_action_entry *act; + int err; + + if (!flow_offload_has_one_action(&rule->action)) { + NL_SET_ERR_MSG(extack, "Only singular actions are supported"); + return -EOPNOTSUPP; + } + + if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) { + NL_SET_ERR_MSG(extack, "Maximum filter capacity reached"); + return -ENOMEM; + } + + acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL); + if (!acl_entry) + return -ENOMEM; + + err = dpaa2_switch_flower_parse_key(cls, &acl_entry->key); + if (err) + goto free_acl_entry; + + act = &rule->action.entries[0]; + err = dpaa2_switch_tc_parse_action(ethsw, act, + &acl_entry->cfg.result, extack); + if (err) + goto free_acl_entry; + + acl_entry->prio = cls->common.prio; + acl_entry->cookie = cls->cookie; + + err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry); + if (err) + goto free_acl_entry; + + return 0; + +free_acl_entry: + kfree(acl_entry); + + return err; +} + +int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, + struct flow_cls_offload *cls) +{ + struct dpaa2_switch_acl_entry *entry; + + entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie); + if (!entry) + return 0; + + return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry); +} + +int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl, + struct tc_cls_matchall_offload *cls) +{ + struct netlink_ext_ack *extack = cls->common.extack; + struct ethsw_core *ethsw = acl_tbl->ethsw; + struct dpaa2_switch_acl_entry *acl_entry; + struct flow_action_entry *act; + int err; + + if (!flow_offload_has_one_action(&cls->rule->action)) { + NL_SET_ERR_MSG(extack, "Only singular actions are supported"); + return -EOPNOTSUPP; + } + + if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) { + NL_SET_ERR_MSG(extack, "Maximum filter capacity reached"); + return -ENOMEM; + } + + acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL); + if (!acl_entry) + return -ENOMEM; + + act = &cls->rule->action.entries[0]; + err = dpaa2_switch_tc_parse_action(ethsw, act, + &acl_entry->cfg.result, extack); + if (err) + goto free_acl_entry; + + acl_entry->prio = cls->common.prio; + acl_entry->cookie = cls->cookie; + + err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry); + if (err) + goto free_acl_entry; + + return 0; + +free_acl_entry: + kfree(acl_entry); + + return err; +} + +int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, + struct tc_cls_matchall_offload *cls) +{ + struct dpaa2_switch_acl_entry *entry; + + entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie); + if (!entry) + return 0; + + return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry); +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c new file mode 100644 index 000000000000..05de37c3b64c --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -0,0 +1,3394 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DPAA2 Ethernet Switch driver + * + * Copyright 2014-2016 Freescale Semiconductor Inc. + * Copyright 2017-2021 NXP + * + */ + +#include <linux/module.h> + +#include <linux/interrupt.h> +#include <linux/msi.h> +#include <linux/kthread.h> +#include <linux/workqueue.h> +#include <linux/iommu.h> +#include <net/pkt_cls.h> + +#include <linux/fsl/mc.h> + +#include "dpaa2-switch.h" + +/* Minimal supported DPSW version */ +#define DPSW_MIN_VER_MAJOR 8 +#define DPSW_MIN_VER_MINOR 9 + +#define DEFAULT_VLAN_ID 1 + +static u16 dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv *port_priv) +{ + return port_priv->fdb->fdb_id; +} + +static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *ethsw) +{ + int i; + + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) + if (!ethsw->fdbs[i].in_use) + return ðsw->fdbs[i]; + return NULL; +} + +static struct dpaa2_switch_acl_tbl * +dpaa2_switch_acl_tbl_get_unused(struct ethsw_core *ethsw) +{ + int i; + + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) + if (!ethsw->acls[i].in_use) + return ðsw->acls[i]; + return NULL; +} + +static u16 dpaa2_switch_port_set_fdb(struct ethsw_port_priv *port_priv, + struct net_device *bridge_dev) +{ + struct ethsw_port_priv *other_port_priv = NULL; + struct dpaa2_switch_fdb *fdb; + struct net_device *other_dev; + struct list_head *iter; + + /* If we leave a bridge (bridge_dev is NULL), find an unused + * FDB and use that. + */ + if (!bridge_dev) { + fdb = dpaa2_switch_fdb_get_unused(port_priv->ethsw_data); + + /* If there is no unused FDB, we must be the last port that + * leaves the last bridge, all the others are standalone. We + * can just keep the FDB that we already have. + */ + + if (!fdb) { + port_priv->fdb->bridge_dev = NULL; + return 0; + } + + port_priv->fdb = fdb; + port_priv->fdb->in_use = true; + port_priv->fdb->bridge_dev = NULL; + return 0; + } + + /* The below call to netdev_for_each_lower_dev() demands the RTNL lock + * being held. Assert on it so that it's easier to catch new code + * paths that reach this point without the RTNL lock. + */ + ASSERT_RTNL(); + + /* If part of a bridge, use the FDB of the first dpaa2 switch interface + * to be present in that bridge + */ + netdev_for_each_lower_dev(bridge_dev, other_dev, iter) { + if (!dpaa2_switch_port_dev_check(other_dev)) + continue; + + if (other_dev == port_priv->netdev) + continue; + + other_port_priv = netdev_priv(other_dev); + break; + } + + /* The current port is about to change its FDB to the one used by the + * first port that joined the bridge. + */ + if (other_port_priv) { + /* The previous FDB is about to become unused, since the + * interface is no longer standalone. + */ + port_priv->fdb->in_use = false; + port_priv->fdb->bridge_dev = NULL; + + /* Get a reference to the new FDB */ + port_priv->fdb = other_port_priv->fdb; + } + + /* Keep track of the new upper bridge device */ + port_priv->fdb->bridge_dev = bridge_dev; + + return 0; +} + +static void dpaa2_switch_fdb_get_flood_cfg(struct ethsw_core *ethsw, u16 fdb_id, + enum dpsw_flood_type type, + struct dpsw_egress_flood_cfg *cfg) +{ + int i = 0, j; + + memset(cfg, 0, sizeof(*cfg)); + + /* Add all the DPAA2 switch ports found in the same bridging domain to + * the egress flooding domain + */ + for (j = 0; j < ethsw->sw_attr.num_ifs; j++) { + if (!ethsw->ports[j]) + continue; + if (ethsw->ports[j]->fdb->fdb_id != fdb_id) + continue; + + if (type == DPSW_BROADCAST && ethsw->ports[j]->bcast_flood) + cfg->if_id[i++] = ethsw->ports[j]->idx; + else if (type == DPSW_FLOODING && ethsw->ports[j]->ucast_flood) + cfg->if_id[i++] = ethsw->ports[j]->idx; + } + + /* Add the CTRL interface to the egress flooding domain */ + cfg->if_id[i++] = ethsw->sw_attr.num_ifs; + + cfg->fdb_id = fdb_id; + cfg->flood_type = type; + cfg->num_ifs = i; +} + +static int dpaa2_switch_fdb_set_egress_flood(struct ethsw_core *ethsw, u16 fdb_id) +{ + struct dpsw_egress_flood_cfg flood_cfg; + int err; + + /* Setup broadcast flooding domain */ + dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_BROADCAST, &flood_cfg); + err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle, + &flood_cfg); + if (err) { + dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err); + return err; + } + + /* Setup unknown flooding domain */ + dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_FLOODING, &flood_cfg); + err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle, + &flood_cfg); + if (err) { + dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err); + return err; + } + + return 0; +} + +static void *dpaa2_iova_to_virt(struct iommu_domain *domain, + dma_addr_t iova_addr) +{ + phys_addr_t phys_addr; + + phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; + + return phys_to_virt(phys_addr); +} + +static int dpaa2_switch_add_vlan(struct ethsw_port_priv *port_priv, u16 vid) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpsw_vlan_cfg vcfg = {0}; + int err; + + vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); + err = dpsw_vlan_add(ethsw->mc_io, 0, + ethsw->dpsw_handle, vid, &vcfg); + if (err) { + dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err); + return err; + } + ethsw->vlans[vid] = ETHSW_VLAN_MEMBER; + + return 0; +} + +static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv) +{ + struct net_device *netdev = port_priv->netdev; + struct dpsw_link_state state; + int err; + + err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx, &state); + if (err) { + netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); + return true; + } + + WARN_ONCE(state.up > 1, "Garbage read into link_state"); + + return state.up ? true : false; +} + +static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct net_device *netdev = port_priv->netdev; + struct dpsw_tci_cfg tci_cfg = { 0 }; + bool up; + int err, ret; + + err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, + port_priv->idx, &tci_cfg); + if (err) { + netdev_err(netdev, "dpsw_if_get_tci err %d\n", err); + return err; + } + + tci_cfg.vlan_id = pvid; + + /* Interface needs to be down to change PVID */ + up = dpaa2_switch_port_is_up(port_priv); + if (up) { + err = dpsw_if_disable(ethsw->mc_io, 0, + ethsw->dpsw_handle, + port_priv->idx); + if (err) { + netdev_err(netdev, "dpsw_if_disable err %d\n", err); + return err; + } + } + + err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, + port_priv->idx, &tci_cfg); + if (err) { + netdev_err(netdev, "dpsw_if_set_tci err %d\n", err); + goto set_tci_error; + } + + /* Delete previous PVID info and mark the new one */ + port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID; + port_priv->vlans[pvid] |= ETHSW_VLAN_PVID; + port_priv->pvid = pvid; + +set_tci_error: + if (up) { + ret = dpsw_if_enable(ethsw->mc_io, 0, + ethsw->dpsw_handle, + port_priv->idx); + if (ret) { + netdev_err(netdev, "dpsw_if_enable err %d\n", ret); + return ret; + } + } + + return err; +} + +static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv, + u16 vid, u16 flags) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct net_device *netdev = port_priv->netdev; + struct dpsw_vlan_if_cfg vcfg = {0}; + int err; + + if (port_priv->vlans[vid]) { + netdev_warn(netdev, "VLAN %d already configured\n", vid); + return -EEXIST; + } + + /* If hit, this VLAN rule will lead the packet into the FDB table + * specified in the vlan configuration below + */ + vcfg.num_ifs = 1; + vcfg.if_id[0] = port_priv->idx; + vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); + vcfg.options |= DPSW_VLAN_ADD_IF_OPT_FDB_ID; + err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg); + if (err) { + netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err); + return err; + } + + port_priv->vlans[vid] = ETHSW_VLAN_MEMBER; + + if (flags & BRIDGE_VLAN_INFO_UNTAGGED) { + err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0, + ethsw->dpsw_handle, + vid, &vcfg); + if (err) { + netdev_err(netdev, + "dpsw_vlan_add_if_untagged err %d\n", err); + return err; + } + port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED; + } + + if (flags & BRIDGE_VLAN_INFO_PVID) { + err = dpaa2_switch_port_set_pvid(port_priv, vid); + if (err) + return err; + } + + return 0; +} + +static enum dpsw_stp_state br_stp_state_to_dpsw(u8 state) +{ + switch (state) { + case BR_STATE_DISABLED: + return DPSW_STP_STATE_DISABLED; + case BR_STATE_LISTENING: + return DPSW_STP_STATE_LISTENING; + case BR_STATE_LEARNING: + return DPSW_STP_STATE_LEARNING; + case BR_STATE_FORWARDING: + return DPSW_STP_STATE_FORWARDING; + case BR_STATE_BLOCKING: + return DPSW_STP_STATE_BLOCKING; + default: + return DPSW_STP_STATE_DISABLED; + } +} + +static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state) +{ + struct dpsw_stp_cfg stp_cfg = {0}; + int err; + u16 vid; + + if (!netif_running(port_priv->netdev) || state == port_priv->stp_state) + return 0; /* Nothing to do */ + + stp_cfg.state = br_stp_state_to_dpsw(state); + for (vid = 0; vid <= VLAN_VID_MASK; vid++) { + if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) { + stp_cfg.vlan_id = vid; + err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx, &stp_cfg); + if (err) { + netdev_err(port_priv->netdev, + "dpsw_if_set_stp err %d\n", err); + return err; + } + } + } + + port_priv->stp_state = state; + + return 0; +} + +static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid) +{ + struct ethsw_port_priv *ppriv_local = NULL; + int i, err; + + if (!ethsw->vlans[vid]) + return -ENOENT; + + err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid); + if (err) { + dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err); + return err; + } + ethsw->vlans[vid] = 0; + + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { + ppriv_local = ethsw->ports[i]; + ppriv_local->vlans[vid] = 0; + } + + return 0; +} + +static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv, + const unsigned char *addr) +{ + struct dpsw_fdb_unicast_cfg entry = {0}; + u16 fdb_id; + int err; + + entry.if_egress = port_priv->idx; + entry.type = DPSW_FDB_ENTRY_STATIC; + ether_addr_copy(entry.mac_addr, addr); + + fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); + err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + fdb_id, &entry); + if (err) + netdev_err(port_priv->netdev, + "dpsw_fdb_add_unicast err %d\n", err); + return err; +} + +static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv, + const unsigned char *addr) +{ + struct dpsw_fdb_unicast_cfg entry = {0}; + u16 fdb_id; + int err; + + entry.if_egress = port_priv->idx; + entry.type = DPSW_FDB_ENTRY_STATIC; + ether_addr_copy(entry.mac_addr, addr); + + fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); + err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + fdb_id, &entry); + /* Silently discard error for calling multiple times the del command */ + if (err && err != -ENXIO) + netdev_err(port_priv->netdev, + "dpsw_fdb_remove_unicast err %d\n", err); + return err; +} + +static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv, + const unsigned char *addr) +{ + struct dpsw_fdb_multicast_cfg entry = {0}; + u16 fdb_id; + int err; + + ether_addr_copy(entry.mac_addr, addr); + entry.type = DPSW_FDB_ENTRY_STATIC; + entry.num_ifs = 1; + entry.if_id[0] = port_priv->idx; + + fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); + err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + fdb_id, &entry); + /* Silently discard error for calling multiple times the add command */ + if (err && err != -ENXIO) + netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n", + err); + return err; +} + +static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv, + const unsigned char *addr) +{ + struct dpsw_fdb_multicast_cfg entry = {0}; + u16 fdb_id; + int err; + + ether_addr_copy(entry.mac_addr, addr); + entry.type = DPSW_FDB_ENTRY_STATIC; + entry.num_ifs = 1; + entry.if_id[0] = port_priv->idx; + + fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); + err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + fdb_id, &entry); + /* Silently discard error for calling multiple times the del command */ + if (err && err != -ENAVAIL) + netdev_err(port_priv->netdev, + "dpsw_fdb_remove_multicast err %d\n", err); + return err; +} + +static void dpaa2_switch_port_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + u64 tmp; + int err; + + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx, + DPSW_CNT_ING_FRAME, &stats->rx_packets); + if (err) + goto error; + + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx, + DPSW_CNT_EGR_FRAME, &stats->tx_packets); + if (err) + goto error; + + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx, + DPSW_CNT_ING_BYTE, &stats->rx_bytes); + if (err) + goto error; + + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx, + DPSW_CNT_EGR_BYTE, &stats->tx_bytes); + if (err) + goto error; + + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx, + DPSW_CNT_ING_FRAME_DISCARD, + &stats->rx_dropped); + if (err) + goto error; + + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx, + DPSW_CNT_ING_FLTR_FRAME, + &tmp); + if (err) + goto error; + stats->rx_dropped += tmp; + + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx, + DPSW_CNT_EGR_FRAME_DISCARD, + &stats->tx_dropped); + if (err) + goto error; + + return; + +error: + netdev_err(netdev, "dpsw_if_get_counter err %d\n", err); +} + +static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev, + int attr_id) +{ + return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT); +} + +static int dpaa2_switch_port_get_offload_stats(int attr_id, + const struct net_device *netdev, + void *sp) +{ + switch (attr_id) { + case IFLA_OFFLOAD_XSTATS_CPU_HIT: + dpaa2_switch_port_get_stats((struct net_device *)netdev, sp); + return 0; + } + + return -EINVAL; +} + +static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + int err; + + err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io, + 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx, + (u16)ETHSW_L2_MAX_FRM(mtu)); + if (err) { + netdev_err(netdev, + "dpsw_if_set_max_frame_length() err %d\n", err); + return err; + } + + netdev->mtu = mtu; + return 0; +} + +static int dpaa2_switch_port_carrier_state_sync(struct net_device *netdev) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct dpsw_link_state state; + int err; + + /* Interrupts are received even though no one issued an 'ifconfig up' + * on the switch interface. Ignore these link state update interrupts + */ + if (!netif_running(netdev)) + return 0; + + err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx, &state); + if (err) { + netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); + return err; + } + + WARN_ONCE(state.up > 1, "Garbage read into link_state"); + + if (state.up != port_priv->link_state) { + if (state.up) { + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); + } else { + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + } + port_priv->link_state = state.up; + } + + return 0; +} + +/* Manage all NAPI instances for the control interface. + * + * We only have one RX queue and one Tx Conf queue for all + * switch ports. Therefore, we only need to enable the NAPI instance once, the + * first time one of the switch ports runs .dev_open(). + */ + +static void dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core *ethsw) +{ + int i; + + /* Access to the ethsw->napi_users relies on the RTNL lock */ + ASSERT_RTNL(); + + /* a new interface is using the NAPI instance */ + ethsw->napi_users++; + + /* if there is already a user of the instance, return */ + if (ethsw->napi_users > 1) + return; + + for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) + napi_enable(ðsw->fq[i].napi); +} + +static void dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core *ethsw) +{ + int i; + + /* Access to the ethsw->napi_users relies on the RTNL lock */ + ASSERT_RTNL(); + + /* If we are not the last interface using the NAPI, return */ + ethsw->napi_users--; + if (ethsw->napi_users) + return; + + for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) + napi_disable(ðsw->fq[i].napi); +} + +static int dpaa2_switch_port_open(struct net_device *netdev) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct ethsw_core *ethsw = port_priv->ethsw_data; + int err; + + /* Explicitly set carrier off, otherwise + * netif_carrier_ok() will return true and cause 'ip link show' + * to report the LOWER_UP flag, even though the link + * notification wasn't even received. + */ + netif_carrier_off(netdev); + + err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx); + if (err) { + netdev_err(netdev, "dpsw_if_enable err %d\n", err); + return err; + } + + /* sync carrier state */ + err = dpaa2_switch_port_carrier_state_sync(netdev); + if (err) { + netdev_err(netdev, + "dpaa2_switch_port_carrier_state_sync err %d\n", err); + goto err_carrier_sync; + } + + dpaa2_switch_enable_ctrl_if_napi(ethsw); + + return 0; + +err_carrier_sync: + dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx); + return err; +} + +static int dpaa2_switch_port_stop(struct net_device *netdev) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct ethsw_core *ethsw = port_priv->ethsw_data; + int err; + + err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx); + if (err) { + netdev_err(netdev, "dpsw_if_disable err %d\n", err); + return err; + } + + dpaa2_switch_disable_ctrl_if_napi(ethsw); + + return 0; +} + +static int dpaa2_switch_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct ethsw_port_priv *port_priv = netdev_priv(dev); + + ppid->id_len = 1; + ppid->id[0] = port_priv->ethsw_data->dev_id; + + return 0; +} + +static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name, + size_t len) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + int err; + + err = snprintf(name, len, "p%d", port_priv->idx); + if (err >= len) + return -EINVAL; + + return 0; +} + +struct ethsw_dump_ctx { + struct net_device *dev; + struct sk_buff *skb; + struct netlink_callback *cb; + int idx; +}; + +static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry, + struct ethsw_dump_ctx *dump) +{ + int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC; + u32 portid = NETLINK_CB(dump->cb->skb).portid; + u32 seq = dump->cb->nlh->nlmsg_seq; + struct nlmsghdr *nlh; + struct ndmsg *ndm; + + if (dump->idx < dump->cb->args[2]) + goto skip; + + nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, + sizeof(*ndm), NLM_F_MULTI); + if (!nlh) + return -EMSGSIZE; + + ndm = nlmsg_data(nlh); + ndm->ndm_family = AF_BRIDGE; + ndm->ndm_pad1 = 0; + ndm->ndm_pad2 = 0; + ndm->ndm_flags = NTF_SELF; + ndm->ndm_type = 0; + ndm->ndm_ifindex = dump->dev->ifindex; + ndm->ndm_state = is_dynamic ? NUD_REACHABLE : NUD_NOARP; + + if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr)) + goto nla_put_failure; + + nlmsg_end(dump->skb, nlh); + +skip: + dump->idx++; + return 0; + +nla_put_failure: + nlmsg_cancel(dump->skb, nlh); + return -EMSGSIZE; +} + +static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry, + struct ethsw_port_priv *port_priv) +{ + int idx = port_priv->idx; + int valid; + + if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST) + valid = entry->if_info == port_priv->idx; + else + valid = entry->if_mask[idx / 8] & BIT(idx % 8); + + return valid; +} + +static int dpaa2_switch_fdb_iterate(struct ethsw_port_priv *port_priv, + dpaa2_switch_fdb_cb_t cb, void *data) +{ + struct net_device *net_dev = port_priv->netdev; + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct device *dev = net_dev->dev.parent; + struct fdb_dump_entry *fdb_entries; + struct fdb_dump_entry fdb_entry; + dma_addr_t fdb_dump_iova; + u16 num_fdb_entries; + u32 fdb_dump_size; + int err = 0, i; + u8 *dma_mem; + u16 fdb_id; + + fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry); + dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL); + if (!dma_mem) + return -ENOMEM; + + fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, fdb_dump_iova)) { + netdev_err(net_dev, "dma_map_single() failed\n"); + err = -ENOMEM; + goto err_map; + } + + fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); + err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, fdb_id, + fdb_dump_iova, fdb_dump_size, &num_fdb_entries); + if (err) { + netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err); + goto err_dump; + } + + dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE); + + fdb_entries = (struct fdb_dump_entry *)dma_mem; + for (i = 0; i < num_fdb_entries; i++) { + fdb_entry = fdb_entries[i]; + + err = cb(port_priv, &fdb_entry, data); + if (err) + goto end; + } + +end: + kfree(dma_mem); + + return 0; + +err_dump: + dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE); +err_map: + kfree(dma_mem); + return err; +} + +static int dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv *port_priv, + struct fdb_dump_entry *fdb_entry, + void *data) +{ + if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv)) + return 0; + + return dpaa2_switch_fdb_dump_nl(fdb_entry, data); +} + +static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, + struct net_device *net_dev, + struct net_device *filter_dev, int *idx) +{ + struct ethsw_port_priv *port_priv = netdev_priv(net_dev); + struct ethsw_dump_ctx dump = { + .dev = net_dev, + .skb = skb, + .cb = cb, + .idx = *idx, + }; + int err; + + err = dpaa2_switch_fdb_iterate(port_priv, dpaa2_switch_fdb_entry_dump, &dump); + *idx = dump.idx; + + return err; +} + +static int dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv *port_priv, + struct fdb_dump_entry *fdb_entry, + void *data __always_unused) +{ + if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv)) + return 0; + + if (!(fdb_entry->type & DPSW_FDB_ENTRY_TYPE_DYNAMIC)) + return 0; + + if (fdb_entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST) + dpaa2_switch_port_fdb_del_uc(port_priv, fdb_entry->mac_addr); + else + dpaa2_switch_port_fdb_del_mc(port_priv, fdb_entry->mac_addr); + + return 0; +} + +static void dpaa2_switch_port_fast_age(struct ethsw_port_priv *port_priv) +{ + dpaa2_switch_fdb_iterate(port_priv, + dpaa2_switch_fdb_entry_fast_age, NULL); +} + +static int dpaa2_switch_port_vlan_add(struct net_device *netdev, __be16 proto, + u16 vid) +{ + struct switchdev_obj_port_vlan vlan = { + .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, + .vid = vid, + .obj.orig_dev = netdev, + /* This API only allows programming tagged, non-PVID VIDs */ + .flags = 0, + }; + + return dpaa2_switch_port_vlans_add(netdev, &vlan); +} + +static int dpaa2_switch_port_vlan_kill(struct net_device *netdev, __be16 proto, + u16 vid) +{ + struct switchdev_obj_port_vlan vlan = { + .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, + .vid = vid, + .obj.orig_dev = netdev, + /* This API only allows programming tagged, non-PVID VIDs */ + .flags = 0, + }; + + return dpaa2_switch_port_vlans_del(netdev, &vlan); +} + +static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct net_device *net_dev = port_priv->netdev; + struct device *dev = net_dev->dev.parent; + u8 mac_addr[ETH_ALEN]; + int err; + + if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR)) + return 0; + + /* Get firmware address, if any */ + err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle, + port_priv->idx, mac_addr); + if (err) { + dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n"); + return err; + } + + /* First check if firmware has any address configured by bootloader */ + if (!is_zero_ether_addr(mac_addr)) { + memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); + } else { + /* No MAC address configured, fill in net_dev->dev_addr + * with a random one + */ + eth_hw_addr_random(net_dev); + dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); + + /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all + * practical purposes, this will be our "permanent" mac address, + * at least until the next reboot. This move will also permit + * register_netdevice() to properly fill up net_dev->perm_addr. + */ + net_dev->addr_assign_type = NET_ADDR_PERM; + } + + return 0; +} + +static void dpaa2_switch_free_fd(const struct ethsw_core *ethsw, + const struct dpaa2_fd *fd) +{ + struct device *dev = ethsw->dev; + unsigned char *buffer_start; + struct sk_buff **skbh, *skb; + dma_addr_t fd_addr; + + fd_addr = dpaa2_fd_get_addr(fd); + skbh = dpaa2_iova_to_virt(ethsw->iommu_domain, fd_addr); + + skb = *skbh; + buffer_start = (unsigned char *)skbh; + + dma_unmap_single(dev, fd_addr, + skb_tail_pointer(skb) - buffer_start, + DMA_TO_DEVICE); + + /* Move on with skb release */ + dev_kfree_skb(skb); +} + +static int dpaa2_switch_build_single_fd(struct ethsw_core *ethsw, + struct sk_buff *skb, + struct dpaa2_fd *fd) +{ + struct device *dev = ethsw->dev; + struct sk_buff **skbh; + dma_addr_t addr; + u8 *buff_start; + void *hwa; + + buff_start = PTR_ALIGN(skb->data - DPAA2_SWITCH_TX_DATA_OFFSET - + DPAA2_SWITCH_TX_BUF_ALIGN, + DPAA2_SWITCH_TX_BUF_ALIGN); + + /* Clear FAS to have consistent values for TX confirmation. It is + * located in the first 8 bytes of the buffer's hardware annotation + * area + */ + hwa = buff_start + DPAA2_SWITCH_SWA_SIZE; + memset(hwa, 0, 8); + + /* Store a backpointer to the skb at the beginning of the buffer + * (in the private data area) such that we can release it + * on Tx confirm + */ + skbh = (struct sk_buff **)buff_start; + *skbh = skb; + + addr = dma_map_single(dev, buff_start, + skb_tail_pointer(skb) - buff_start, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, addr))) + return -ENOMEM; + + /* Setup the FD fields */ + memset(fd, 0, sizeof(*fd)); + + dpaa2_fd_set_addr(fd, addr); + dpaa2_fd_set_offset(fd, (u16)(skb->data - buff_start)); + dpaa2_fd_set_len(fd, skb->len); + dpaa2_fd_set_format(fd, dpaa2_fd_single); + + return 0; +} + +static netdev_tx_t dpaa2_switch_port_tx(struct sk_buff *skb, + struct net_device *net_dev) +{ + struct ethsw_port_priv *port_priv = netdev_priv(net_dev); + struct ethsw_core *ethsw = port_priv->ethsw_data; + int retries = DPAA2_SWITCH_SWP_BUSY_RETRIES; + struct dpaa2_fd fd; + int err; + + if (unlikely(skb_headroom(skb) < DPAA2_SWITCH_NEEDED_HEADROOM)) { + struct sk_buff *ns; + + ns = skb_realloc_headroom(skb, DPAA2_SWITCH_NEEDED_HEADROOM); + if (unlikely(!ns)) { + net_err_ratelimited("%s: Error reallocating skb headroom\n", net_dev->name); + goto err_free_skb; + } + dev_consume_skb_any(skb); + skb = ns; + } + + /* We'll be holding a back-reference to the skb until Tx confirmation */ + skb = skb_unshare(skb, GFP_ATOMIC); + if (unlikely(!skb)) { + /* skb_unshare() has already freed the skb */ + net_err_ratelimited("%s: Error copying the socket buffer\n", net_dev->name); + goto err_exit; + } + + /* At this stage, we do not support non-linear skbs so just try to + * linearize the skb and if that's not working, just drop the packet. + */ + err = skb_linearize(skb); + if (err) { + net_err_ratelimited("%s: skb_linearize error (%d)!\n", net_dev->name, err); + goto err_free_skb; + } + + err = dpaa2_switch_build_single_fd(ethsw, skb, &fd); + if (unlikely(err)) { + net_err_ratelimited("%s: ethsw_build_*_fd() %d\n", net_dev->name, err); + goto err_free_skb; + } + + do { + err = dpaa2_io_service_enqueue_qd(NULL, + port_priv->tx_qdid, + 8, 0, &fd); + retries--; + } while (err == -EBUSY && retries); + + if (unlikely(err < 0)) { + dpaa2_switch_free_fd(ethsw, &fd); + goto err_exit; + } + + return NETDEV_TX_OK; + +err_free_skb: + dev_kfree_skb(skb); +err_exit: + return NETDEV_TX_OK; +} + +static int +dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_acl_tbl *acl_tbl, + struct flow_cls_offload *f) +{ + switch (f->command) { + case FLOW_CLS_REPLACE: + return dpaa2_switch_cls_flower_replace(acl_tbl, f); + case FLOW_CLS_DESTROY: + return dpaa2_switch_cls_flower_destroy(acl_tbl, f); + default: + return -EOPNOTSUPP; + } +} + +static int +dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_acl_tbl *acl_tbl, + struct tc_cls_matchall_offload *f) +{ + switch (f->command) { + case TC_CLSMATCHALL_REPLACE: + return dpaa2_switch_cls_matchall_replace(acl_tbl, f); + case TC_CLSMATCHALL_DESTROY: + return dpaa2_switch_cls_matchall_destroy(acl_tbl, f); + default: + return -EOPNOTSUPP; + } +} + +static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type, + void *type_data, + void *cb_priv) +{ + switch (type) { + case TC_SETUP_CLSFLOWER: + return dpaa2_switch_setup_tc_cls_flower(cb_priv, type_data); + case TC_SETUP_CLSMATCHALL: + return dpaa2_switch_setup_tc_cls_matchall(cb_priv, type_data); + default: + return -EOPNOTSUPP; + } +} + +static LIST_HEAD(dpaa2_switch_block_cb_list); + +static int dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv, + struct dpaa2_switch_acl_tbl *acl_tbl) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct net_device *netdev = port_priv->netdev; + struct dpsw_acl_if_cfg acl_if_cfg; + int err; + + if (port_priv->acl_tbl) + return -EINVAL; + + acl_if_cfg.if_id[0] = port_priv->idx; + acl_if_cfg.num_ifs = 1; + err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, + acl_tbl->id, &acl_if_cfg); + if (err) { + netdev_err(netdev, "dpsw_acl_add_if err %d\n", err); + return err; + } + + acl_tbl->ports |= BIT(port_priv->idx); + port_priv->acl_tbl = acl_tbl; + + return 0; +} + +static int +dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv, + struct dpaa2_switch_acl_tbl *acl_tbl) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct net_device *netdev = port_priv->netdev; + struct dpsw_acl_if_cfg acl_if_cfg; + int err; + + if (port_priv->acl_tbl != acl_tbl) + return -EINVAL; + + acl_if_cfg.if_id[0] = port_priv->idx; + acl_if_cfg.num_ifs = 1; + err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, + acl_tbl->id, &acl_if_cfg); + if (err) { + netdev_err(netdev, "dpsw_acl_add_if err %d\n", err); + return err; + } + + acl_tbl->ports &= ~BIT(port_priv->idx); + port_priv->acl_tbl = NULL; + return 0; +} + +static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv, + struct dpaa2_switch_acl_tbl *acl_tbl) +{ + struct dpaa2_switch_acl_tbl *old_acl_tbl = port_priv->acl_tbl; + int err; + + /* If the port is already bound to this ACL table then do nothing. This + * can happen when this port is the first one to join a tc block + */ + if (port_priv->acl_tbl == acl_tbl) + return 0; + + err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_acl_tbl); + if (err) + return err; + + /* Mark the previous ACL table as being unused if this was the last + * port that was using it. + */ + if (old_acl_tbl->ports == 0) + old_acl_tbl->in_use = false; + + return dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl); +} + +static int dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv, + struct dpaa2_switch_acl_tbl *acl_tbl) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpaa2_switch_acl_tbl *new_acl_tbl; + int err; + + /* We are the last port that leaves a block (an ACL table). + * We'll continue to use this table. + */ + if (acl_tbl->ports == BIT(port_priv->idx)) + return 0; + + err = dpaa2_switch_port_acl_tbl_unbind(port_priv, acl_tbl); + if (err) + return err; + + if (acl_tbl->ports == 0) + acl_tbl->in_use = false; + + new_acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw); + new_acl_tbl->in_use = true; + return dpaa2_switch_port_acl_tbl_bind(port_priv, new_acl_tbl); +} + +static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev, + struct flow_block_offload *f) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpaa2_switch_acl_tbl *acl_tbl; + struct flow_block_cb *block_cb; + bool register_block = false; + int err; + + block_cb = flow_block_cb_lookup(f->block, + dpaa2_switch_port_setup_tc_block_cb_ig, + ethsw); + + if (!block_cb) { + /* If the ACL table is not already known, then this port must + * be the first to join it. In this case, we can just continue + * to use our private table + */ + acl_tbl = port_priv->acl_tbl; + + block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig, + ethsw, acl_tbl, NULL); + if (IS_ERR(block_cb)) + return PTR_ERR(block_cb); + + register_block = true; + } else { + acl_tbl = flow_block_cb_priv(block_cb); + } + + flow_block_cb_incref(block_cb); + err = dpaa2_switch_port_block_bind(port_priv, acl_tbl); + if (err) + goto err_block_bind; + + if (register_block) { + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, + &dpaa2_switch_block_cb_list); + } + + return 0; + +err_block_bind: + if (!flow_block_cb_decref(block_cb)) + flow_block_cb_free(block_cb); + return err; +} + +static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev, + struct flow_block_offload *f) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpaa2_switch_acl_tbl *acl_tbl; + struct flow_block_cb *block_cb; + int err; + + block_cb = flow_block_cb_lookup(f->block, + dpaa2_switch_port_setup_tc_block_cb_ig, + ethsw); + if (!block_cb) + return; + + acl_tbl = flow_block_cb_priv(block_cb); + err = dpaa2_switch_port_block_unbind(port_priv, acl_tbl); + if (!err && !flow_block_cb_decref(block_cb)) { + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); + } +} + +static int dpaa2_switch_setup_tc_block(struct net_device *netdev, + struct flow_block_offload *f) +{ + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + f->driver_block_list = &dpaa2_switch_block_cb_list; + + switch (f->command) { + case FLOW_BLOCK_BIND: + return dpaa2_switch_setup_tc_block_bind(netdev, f); + case FLOW_BLOCK_UNBIND: + dpaa2_switch_setup_tc_block_unbind(netdev, f); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int dpaa2_switch_port_setup_tc(struct net_device *netdev, + enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_BLOCK: { + return dpaa2_switch_setup_tc_block(netdev, type_data); + } + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static const struct net_device_ops dpaa2_switch_port_ops = { + .ndo_open = dpaa2_switch_port_open, + .ndo_stop = dpaa2_switch_port_stop, + + .ndo_set_mac_address = eth_mac_addr, + .ndo_get_stats64 = dpaa2_switch_port_get_stats, + .ndo_change_mtu = dpaa2_switch_port_change_mtu, + .ndo_has_offload_stats = dpaa2_switch_port_has_offload_stats, + .ndo_get_offload_stats = dpaa2_switch_port_get_offload_stats, + .ndo_fdb_dump = dpaa2_switch_port_fdb_dump, + .ndo_vlan_rx_add_vid = dpaa2_switch_port_vlan_add, + .ndo_vlan_rx_kill_vid = dpaa2_switch_port_vlan_kill, + + .ndo_start_xmit = dpaa2_switch_port_tx, + .ndo_get_port_parent_id = dpaa2_switch_port_parent_id, + .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name, + .ndo_setup_tc = dpaa2_switch_port_setup_tc, +}; + +bool dpaa2_switch_port_dev_check(const struct net_device *netdev) +{ + return netdev->netdev_ops == &dpaa2_switch_port_ops; +} + +static void dpaa2_switch_links_state_update(struct ethsw_core *ethsw) +{ + int i; + + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { + dpaa2_switch_port_carrier_state_sync(ethsw->ports[i]->netdev); + dpaa2_switch_port_set_mac_addr(ethsw->ports[i]); + } +} + +static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg) +{ + struct device *dev = (struct device *)arg; + struct ethsw_core *ethsw = dev_get_drvdata(dev); + + /* Mask the events and the if_id reserved bits to be cleared on read */ + u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000; + int err; + + err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, + DPSW_IRQ_INDEX_IF, &status); + if (err) { + dev_err(dev, "Can't get irq status (err %d)\n", err); + + err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, + DPSW_IRQ_INDEX_IF, 0xFFFFFFFF); + if (err) + dev_err(dev, "Can't clear irq status (err %d)\n", err); + goto out; + } + + if (status & DPSW_IRQ_EVENT_LINK_CHANGED) + dpaa2_switch_links_state_update(ethsw); + +out: + return IRQ_HANDLED; +} + +static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev) +{ + struct device *dev = &sw_dev->dev; + struct ethsw_core *ethsw = dev_get_drvdata(dev); + u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED; + struct fsl_mc_device_irq *irq; + int err; + + err = fsl_mc_allocate_irqs(sw_dev); + if (err) { + dev_err(dev, "MC irqs allocation failed\n"); + return err; + } + + if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) { + err = -EINVAL; + goto free_irq; + } + + err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, + DPSW_IRQ_INDEX_IF, 0); + if (err) { + dev_err(dev, "dpsw_set_irq_enable err %d\n", err); + goto free_irq; + } + + irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF]; + + err = devm_request_threaded_irq(dev, irq->msi_desc->irq, + NULL, + dpaa2_switch_irq0_handler_thread, + IRQF_NO_SUSPEND | IRQF_ONESHOT, + dev_name(dev), dev); + if (err) { + dev_err(dev, "devm_request_threaded_irq(): %d\n", err); + goto free_irq; + } + + err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle, + DPSW_IRQ_INDEX_IF, mask); + if (err) { + dev_err(dev, "dpsw_set_irq_mask(): %d\n", err); + goto free_devm_irq; + } + + err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, + DPSW_IRQ_INDEX_IF, 1); + if (err) { + dev_err(dev, "dpsw_set_irq_enable(): %d\n", err); + goto free_devm_irq; + } + + return 0; + +free_devm_irq: + devm_free_irq(dev, irq->msi_desc->irq, dev); +free_irq: + fsl_mc_free_irqs(sw_dev); + return err; +} + +static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev) +{ + struct device *dev = &sw_dev->dev; + struct ethsw_core *ethsw = dev_get_drvdata(dev); + int err; + + err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, + DPSW_IRQ_INDEX_IF, 0); + if (err) + dev_err(dev, "dpsw_set_irq_enable err %d\n", err); + + fsl_mc_free_irqs(sw_dev); +} + +static int dpaa2_switch_port_set_learning(struct ethsw_port_priv *port_priv, bool enable) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + enum dpsw_learning_mode learn_mode; + int err; + + if (enable) + learn_mode = DPSW_LEARNING_MODE_HW; + else + learn_mode = DPSW_LEARNING_MODE_DIS; + + err = dpsw_if_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, + port_priv->idx, learn_mode); + if (err) + netdev_err(port_priv->netdev, "dpsw_if_set_learning_mode err %d\n", err); + + if (!enable) + dpaa2_switch_port_fast_age(port_priv); + + return err; +} + +static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev, + u8 state) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + int err; + + err = dpaa2_switch_port_set_stp_state(port_priv, state); + if (err) + return err; + + switch (state) { + case BR_STATE_DISABLED: + case BR_STATE_BLOCKING: + case BR_STATE_LISTENING: + err = dpaa2_switch_port_set_learning(port_priv, false); + break; + case BR_STATE_LEARNING: + case BR_STATE_FORWARDING: + err = dpaa2_switch_port_set_learning(port_priv, + port_priv->learn_ena); + break; + } + + return err; +} + +static int dpaa2_switch_port_flood(struct ethsw_port_priv *port_priv, + struct switchdev_brport_flags flags) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + + if (flags.mask & BR_BCAST_FLOOD) + port_priv->bcast_flood = !!(flags.val & BR_BCAST_FLOOD); + + if (flags.mask & BR_FLOOD) + port_priv->ucast_flood = !!(flags.val & BR_FLOOD); + + return dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); +} + +static int dpaa2_switch_port_pre_bridge_flags(struct net_device *netdev, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + if (flags.mask & ~(BR_LEARNING | BR_BCAST_FLOOD | BR_FLOOD | + BR_MCAST_FLOOD)) + return -EINVAL; + + if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD)) { + bool multicast = !!(flags.val & BR_MCAST_FLOOD); + bool unicast = !!(flags.val & BR_FLOOD); + + if (unicast != multicast) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot configure multicast flooding independently of unicast"); + return -EINVAL; + } + } + + return 0; +} + +static int dpaa2_switch_port_bridge_flags(struct net_device *netdev, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + int err; + + if (flags.mask & BR_LEARNING) { + bool learn_ena = !!(flags.val & BR_LEARNING); + + err = dpaa2_switch_port_set_learning(port_priv, learn_ena); + if (err) + return err; + port_priv->learn_ena = learn_ena; + } + + if (flags.mask & (BR_BCAST_FLOOD | BR_FLOOD | BR_MCAST_FLOOD)) { + err = dpaa2_switch_port_flood(port_priv, flags); + if (err) + return err; + } + + return 0; +} + +static int dpaa2_switch_port_attr_set(struct net_device *netdev, + const struct switchdev_attr *attr, + struct netlink_ext_ack *extack) +{ + int err = 0; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + err = dpaa2_switch_port_attr_stp_state_set(netdev, + attr->u.stp_state); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: + if (!attr->u.vlan_filtering) { + NL_SET_ERR_MSG_MOD(extack, + "The DPAA2 switch does not support VLAN-unaware operation"); + return -EOPNOTSUPP; + } + break; + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: + err = dpaa2_switch_port_pre_bridge_flags(netdev, attr->u.brport_flags, extack); + break; + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + err = dpaa2_switch_port_bridge_flags(netdev, attr->u.brport_flags, extack); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +int dpaa2_switch_port_vlans_add(struct net_device *netdev, + const struct switchdev_obj_port_vlan *vlan) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpsw_attr *attr = ðsw->sw_attr; + int err = 0; + + /* Make sure that the VLAN is not already configured + * on the switch port + */ + if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER) + return -EEXIST; + + /* Check if there is space for a new VLAN */ + err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, + ðsw->sw_attr); + if (err) { + netdev_err(netdev, "dpsw_get_attributes err %d\n", err); + return err; + } + if (attr->max_vlans - attr->num_vlans < 1) + return -ENOSPC; + + /* Check if there is space for a new VLAN */ + err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, + ðsw->sw_attr); + if (err) { + netdev_err(netdev, "dpsw_get_attributes err %d\n", err); + return err; + } + if (attr->max_vlans - attr->num_vlans < 1) + return -ENOSPC; + + if (!port_priv->ethsw_data->vlans[vlan->vid]) { + /* this is a new VLAN */ + err = dpaa2_switch_add_vlan(port_priv, vlan->vid); + if (err) + return err; + + port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL; + } + + return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags); +} + +static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc, + const unsigned char *addr) +{ + struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc; + struct netdev_hw_addr *ha; + + netif_addr_lock_bh(netdev); + list_for_each_entry(ha, &list->list, list) { + if (ether_addr_equal(ha->addr, addr)) { + netif_addr_unlock_bh(netdev); + return 1; + } + } + netif_addr_unlock_bh(netdev); + return 0; +} + +static int dpaa2_switch_port_mdb_add(struct net_device *netdev, + const struct switchdev_obj_port_mdb *mdb) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + int err; + + /* Check if address is already set on this port */ + if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr)) + return -EEXIST; + + err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr); + if (err) + return err; + + err = dev_mc_add(netdev, mdb->addr); + if (err) { + netdev_err(netdev, "dev_mc_add err %d\n", err); + dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr); + } + + return err; +} + +static int dpaa2_switch_port_obj_add(struct net_device *netdev, + const struct switchdev_obj *obj) +{ + int err; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = dpaa2_switch_port_vlans_add(netdev, + SWITCHDEV_OBJ_PORT_VLAN(obj)); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + err = dpaa2_switch_port_mdb_add(netdev, + SWITCHDEV_OBJ_PORT_MDB(obj)); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct net_device *netdev = port_priv->netdev; + struct dpsw_vlan_if_cfg vcfg; + int i, err; + + if (!port_priv->vlans[vid]) + return -ENOENT; + + if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) { + /* If we are deleting the PVID of a port, use VLAN 4095 instead + * as we are sure that neither the bridge nor the 8021q module + * will use it + */ + err = dpaa2_switch_port_set_pvid(port_priv, 4095); + if (err) + return err; + } + + vcfg.num_ifs = 1; + vcfg.if_id[0] = port_priv->idx; + if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) { + err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, + ethsw->dpsw_handle, + vid, &vcfg); + if (err) { + netdev_err(netdev, + "dpsw_vlan_remove_if_untagged err %d\n", + err); + } + port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED; + } + + if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) { + err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, + vid, &vcfg); + if (err) { + netdev_err(netdev, + "dpsw_vlan_remove_if err %d\n", err); + return err; + } + port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER; + + /* Delete VLAN from switch if it is no longer configured on + * any port + */ + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) + if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER) + return 0; /* Found a port member in VID */ + + ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL; + + err = dpaa2_switch_dellink(ethsw, vid); + if (err) + return err; + } + + return 0; +} + +int dpaa2_switch_port_vlans_del(struct net_device *netdev, + const struct switchdev_obj_port_vlan *vlan) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + + if (netif_is_bridge_master(vlan->obj.orig_dev)) + return -EOPNOTSUPP; + + return dpaa2_switch_port_del_vlan(port_priv, vlan->vid); +} + +static int dpaa2_switch_port_mdb_del(struct net_device *netdev, + const struct switchdev_obj_port_mdb *mdb) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + int err; + + if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr)) + return -ENOENT; + + err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr); + if (err) + return err; + + err = dev_mc_del(netdev, mdb->addr); + if (err) { + netdev_err(netdev, "dev_mc_del err %d\n", err); + return err; + } + + return err; +} + +static int dpaa2_switch_port_obj_del(struct net_device *netdev, + const struct switchdev_obj *obj) +{ + int err; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj)); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj)); + break; + default: + err = -EOPNOTSUPP; + break; + } + return err; +} + +static int dpaa2_switch_port_attr_set_event(struct net_device *netdev, + struct switchdev_notifier_port_attr_info *ptr) +{ + int err; + + err = switchdev_handle_port_attr_set(netdev, ptr, + dpaa2_switch_port_dev_check, + dpaa2_switch_port_attr_set); + return notifier_from_errno(err); +} + +static int dpaa2_switch_port_bridge_join(struct net_device *netdev, + struct net_device *upper_dev) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct ethsw_port_priv *other_port_priv; + struct net_device *other_dev; + struct list_head *iter; + bool learn_ena; + int err; + + netdev_for_each_lower_dev(upper_dev, other_dev, iter) { + if (!dpaa2_switch_port_dev_check(other_dev)) + continue; + + other_port_priv = netdev_priv(other_dev); + if (other_port_priv->ethsw_data != port_priv->ethsw_data) { + netdev_err(netdev, + "Interface from a different DPSW is in the bridge already!\n"); + return -EINVAL; + } + } + + /* Delete the previously manually installed VLAN 1 */ + err = dpaa2_switch_port_del_vlan(port_priv, 1); + if (err) + return err; + + dpaa2_switch_port_set_fdb(port_priv, upper_dev); + + /* Inherit the initial bridge port learning state */ + learn_ena = br_port_flag_is_set(netdev, BR_LEARNING); + err = dpaa2_switch_port_set_learning(port_priv, learn_ena); + port_priv->learn_ena = learn_ena; + + /* Setup the egress flood policy (broadcast, unknown unicast) */ + err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); + if (err) + goto err_egress_flood; + + return 0; + +err_egress_flood: + dpaa2_switch_port_set_fdb(port_priv, NULL); + return err; +} + +static int dpaa2_switch_port_clear_rxvlan(struct net_device *vdev, int vid, void *arg) +{ + __be16 vlan_proto = htons(ETH_P_8021Q); + + if (vdev) + vlan_proto = vlan_dev_vlan_proto(vdev); + + return dpaa2_switch_port_vlan_kill(arg, vlan_proto, vid); +} + +static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, void *arg) +{ + __be16 vlan_proto = htons(ETH_P_8021Q); + + if (vdev) + vlan_proto = vlan_dev_vlan_proto(vdev); + + return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid); +} + +static int dpaa2_switch_port_bridge_leave(struct net_device *netdev) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct dpaa2_switch_fdb *old_fdb = port_priv->fdb; + struct ethsw_core *ethsw = port_priv->ethsw_data; + int err; + + /* First of all, fast age any learn FDB addresses on this switch port */ + dpaa2_switch_port_fast_age(port_priv); + + /* Clear all RX VLANs installed through vlan_vid_add() either as VLAN + * upper devices or otherwise from the FDB table that we are about to + * leave + */ + err = vlan_for_each(netdev, dpaa2_switch_port_clear_rxvlan, netdev); + if (err) + netdev_err(netdev, "Unable to clear RX VLANs from old FDB table, err (%d)\n", err); + + dpaa2_switch_port_set_fdb(port_priv, NULL); + + /* Restore all RX VLANs into the new FDB table that we just joined */ + err = vlan_for_each(netdev, dpaa2_switch_port_restore_rxvlan, netdev); + if (err) + netdev_err(netdev, "Unable to restore RX VLANs to the new FDB, err (%d)\n", err); + + /* Reset the flooding state to denote that this port can send any + * packet in standalone mode. With this, we are also ensuring that any + * later bridge join will have the flooding flag on. + */ + port_priv->bcast_flood = true; + port_priv->ucast_flood = true; + + /* Setup the egress flood policy (broadcast, unknown unicast). + * When the port is not under a bridge, only the CTRL interface is part + * of the flooding domain besides the actual port + */ + err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); + if (err) + return err; + + /* Recreate the egress flood domain of the FDB that we just left */ + err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id); + if (err) + return err; + + /* No HW learning when not under a bridge */ + err = dpaa2_switch_port_set_learning(port_priv, false); + if (err) + return err; + port_priv->learn_ena = false; + + /* Add the VLAN 1 as PVID when not under a bridge. We need this since + * the dpaa2 switch interfaces are not capable to be VLAN unaware + */ + return dpaa2_switch_port_add_vlan(port_priv, DEFAULT_VLAN_ID, + BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID); +} + +static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *netdev) +{ + struct net_device *upper_dev; + struct list_head *iter; + + /* RCU read lock not necessary because we have write-side protection + * (rtnl_mutex), however a non-rcu iterator does not exist. + */ + netdev_for_each_upper_dev_rcu(netdev, upper_dev, iter) + if (is_vlan_dev(upper_dev)) + return -EOPNOTSUPP; + + return 0; +} + +static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + struct netdev_notifier_changeupper_info *info = ptr; + struct netlink_ext_ack *extack; + struct net_device *upper_dev; + int err = 0; + + if (!dpaa2_switch_port_dev_check(netdev)) + return NOTIFY_DONE; + + extack = netdev_notifier_info_to_extack(&info->info); + + switch (event) { + case NETDEV_PRECHANGEUPPER: + upper_dev = info->upper_dev; + if (!netif_is_bridge_master(upper_dev)) + break; + + if (!br_vlan_enabled(upper_dev)) { + NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge"); + err = -EOPNOTSUPP; + goto out; + } + + err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot join a bridge while VLAN uppers are present"); + goto out; + } + + break; + case NETDEV_CHANGEUPPER: + upper_dev = info->upper_dev; + if (netif_is_bridge_master(upper_dev)) { + if (info->linking) + err = dpaa2_switch_port_bridge_join(netdev, upper_dev); + else + err = dpaa2_switch_port_bridge_leave(netdev); + } + break; + } + +out: + return notifier_from_errno(err); +} + +struct ethsw_switchdev_event_work { + struct work_struct work; + struct switchdev_notifier_fdb_info fdb_info; + struct net_device *dev; + unsigned long event; +}; + +static void dpaa2_switch_event_work(struct work_struct *work) +{ + struct ethsw_switchdev_event_work *switchdev_work = + container_of(work, struct ethsw_switchdev_event_work, work); + struct net_device *dev = switchdev_work->dev; + struct switchdev_notifier_fdb_info *fdb_info; + int err; + + rtnl_lock(); + fdb_info = &switchdev_work->fdb_info; + + switch (switchdev_work->event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + if (!fdb_info->added_by_user || fdb_info->is_local) + break; + if (is_unicast_ether_addr(fdb_info->addr)) + err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev), + fdb_info->addr); + else + err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev), + fdb_info->addr); + if (err) + break; + fdb_info->offloaded = true; + call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev, + &fdb_info->info, NULL); + break; + case SWITCHDEV_FDB_DEL_TO_DEVICE: + if (!fdb_info->added_by_user || fdb_info->is_local) + break; + if (is_unicast_ether_addr(fdb_info->addr)) + dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr); + else + dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr); + break; + } + + rtnl_unlock(); + kfree(switchdev_work->fdb_info.addr); + kfree(switchdev_work); + dev_put(dev); +} + +/* Called under rcu_read_lock() */ +static int dpaa2_switch_port_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + struct ethsw_port_priv *port_priv = netdev_priv(dev); + struct ethsw_switchdev_event_work *switchdev_work; + struct switchdev_notifier_fdb_info *fdb_info = ptr; + struct ethsw_core *ethsw = port_priv->ethsw_data; + + if (event == SWITCHDEV_PORT_ATTR_SET) + return dpaa2_switch_port_attr_set_event(dev, ptr); + + if (!dpaa2_switch_port_dev_check(dev)) + return NOTIFY_DONE; + + switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); + if (!switchdev_work) + return NOTIFY_BAD; + + INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work); + switchdev_work->dev = dev; + switchdev_work->event = event; + + switch (event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: + memcpy(&switchdev_work->fdb_info, ptr, + sizeof(switchdev_work->fdb_info)); + switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); + if (!switchdev_work->fdb_info.addr) + goto err_addr_alloc; + + ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, + fdb_info->addr); + + /* Take a reference on the device to avoid being freed. */ + dev_hold(dev); + break; + default: + kfree(switchdev_work); + return NOTIFY_DONE; + } + + queue_work(ethsw->workqueue, &switchdev_work->work); + + return NOTIFY_DONE; + +err_addr_alloc: + kfree(switchdev_work); + return NOTIFY_BAD; +} + +static int dpaa2_switch_port_obj_event(unsigned long event, + struct net_device *netdev, + struct switchdev_notifier_port_obj_info *port_obj_info) +{ + int err = -EOPNOTSUPP; + + if (!dpaa2_switch_port_dev_check(netdev)) + return NOTIFY_DONE; + + switch (event) { + case SWITCHDEV_PORT_OBJ_ADD: + err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj); + break; + case SWITCHDEV_PORT_OBJ_DEL: + err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj); + break; + } + + port_obj_info->handled = true; + return notifier_from_errno(err); +} + +static int dpaa2_switch_port_blocking_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + + switch (event) { + case SWITCHDEV_PORT_OBJ_ADD: + case SWITCHDEV_PORT_OBJ_DEL: + return dpaa2_switch_port_obj_event(event, dev, ptr); + case SWITCHDEV_PORT_ATTR_SET: + return dpaa2_switch_port_attr_set_event(dev, ptr); + } + + return NOTIFY_DONE; +} + +/* Build a linear skb based on a single-buffer frame descriptor */ +static struct sk_buff *dpaa2_switch_build_linear_skb(struct ethsw_core *ethsw, + const struct dpaa2_fd *fd) +{ + u16 fd_offset = dpaa2_fd_get_offset(fd); + dma_addr_t addr = dpaa2_fd_get_addr(fd); + u32 fd_length = dpaa2_fd_get_len(fd); + struct device *dev = ethsw->dev; + struct sk_buff *skb = NULL; + void *fd_vaddr; + + fd_vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, addr); + dma_unmap_page(dev, addr, DPAA2_SWITCH_RX_BUF_SIZE, + DMA_FROM_DEVICE); + + skb = build_skb(fd_vaddr, DPAA2_SWITCH_RX_BUF_SIZE + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); + if (unlikely(!skb)) { + dev_err(dev, "build_skb() failed\n"); + return NULL; + } + + skb_reserve(skb, fd_offset); + skb_put(skb, fd_length); + + ethsw->buf_count--; + + return skb; +} + +static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq *fq, + const struct dpaa2_fd *fd) +{ + dpaa2_switch_free_fd(fq->ethsw, fd); +} + +static void dpaa2_switch_rx(struct dpaa2_switch_fq *fq, + const struct dpaa2_fd *fd) +{ + struct ethsw_core *ethsw = fq->ethsw; + struct ethsw_port_priv *port_priv; + struct net_device *netdev; + struct vlan_ethhdr *hdr; + struct sk_buff *skb; + u16 vlan_tci, vid; + int if_id, err; + + /* get switch ingress interface ID */ + if_id = upper_32_bits(dpaa2_fd_get_flc(fd)) & 0x0000FFFF; + + if (if_id >= ethsw->sw_attr.num_ifs) { + dev_err(ethsw->dev, "Frame received from unknown interface!\n"); + goto err_free_fd; + } + port_priv = ethsw->ports[if_id]; + netdev = port_priv->netdev; + + /* build the SKB based on the FD received */ + if (dpaa2_fd_get_format(fd) != dpaa2_fd_single) { + if (net_ratelimit()) { + netdev_err(netdev, "Received invalid frame format\n"); + goto err_free_fd; + } + } + + skb = dpaa2_switch_build_linear_skb(ethsw, fd); + if (unlikely(!skb)) + goto err_free_fd; + + skb_reset_mac_header(skb); + + /* Remove the VLAN header if the packet that we just received has a vid + * equal to the port PVIDs. Since the dpaa2-switch can operate only in + * VLAN-aware mode and no alterations are made on the packet when it's + * redirected/mirrored to the control interface, we are sure that there + * will always be a VLAN header present. + */ + hdr = vlan_eth_hdr(skb); + vid = ntohs(hdr->h_vlan_TCI) & VLAN_VID_MASK; + if (vid == port_priv->pvid) { + err = __skb_vlan_pop(skb, &vlan_tci); + if (err) { + dev_info(ethsw->dev, "__skb_vlan_pop() returned %d", err); + goto err_free_fd; + } + } + + skb->dev = netdev; + skb->protocol = eth_type_trans(skb, skb->dev); + + /* Setup the offload_fwd_mark only if the port is under a bridge */ + skb->offload_fwd_mark = !!(port_priv->fdb->bridge_dev); + + netif_receive_skb(skb); + + return; + +err_free_fd: + dpaa2_switch_free_fd(ethsw, fd); +} + +static void dpaa2_switch_detect_features(struct ethsw_core *ethsw) +{ + ethsw->features = 0; + + if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6)) + ethsw->features |= ETHSW_FEATURE_MAC_ADDR; +} + +static int dpaa2_switch_setup_fqs(struct ethsw_core *ethsw) +{ + struct dpsw_ctrl_if_attr ctrl_if_attr; + struct device *dev = ethsw->dev; + int i = 0; + int err; + + err = dpsw_ctrl_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, + &ctrl_if_attr); + if (err) { + dev_err(dev, "dpsw_ctrl_if_get_attributes() = %d\n", err); + return err; + } + + ethsw->fq[i].fqid = ctrl_if_attr.rx_fqid; + ethsw->fq[i].ethsw = ethsw; + ethsw->fq[i++].type = DPSW_QUEUE_RX; + + ethsw->fq[i].fqid = ctrl_if_attr.tx_err_conf_fqid; + ethsw->fq[i].ethsw = ethsw; + ethsw->fq[i++].type = DPSW_QUEUE_TX_ERR_CONF; + + return 0; +} + +/* Free buffers acquired from the buffer pool or which were meant to + * be released in the pool + */ +static void dpaa2_switch_free_bufs(struct ethsw_core *ethsw, u64 *buf_array, int count) +{ + struct device *dev = ethsw->dev; + void *vaddr; + int i; + + for (i = 0; i < count; i++) { + vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, buf_array[i]); + dma_unmap_page(dev, buf_array[i], DPAA2_SWITCH_RX_BUF_SIZE, + DMA_FROM_DEVICE); + free_pages((unsigned long)vaddr, 0); + } +} + +/* Perform a single release command to add buffers + * to the specified buffer pool + */ +static int dpaa2_switch_add_bufs(struct ethsw_core *ethsw, u16 bpid) +{ + struct device *dev = ethsw->dev; + u64 buf_array[BUFS_PER_CMD]; + struct page *page; + int retries = 0; + dma_addr_t addr; + int err; + int i; + + for (i = 0; i < BUFS_PER_CMD; i++) { + /* Allocate one page for each Rx buffer. WRIOP sees + * the entire page except for a tailroom reserved for + * skb shared info + */ + page = dev_alloc_pages(0); + if (!page) { + dev_err(dev, "buffer allocation failed\n"); + goto err_alloc; + } + + addr = dma_map_page(dev, page, 0, DPAA2_SWITCH_RX_BUF_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, addr)) { + dev_err(dev, "dma_map_single() failed\n"); + goto err_map; + } + buf_array[i] = addr; + } + +release_bufs: + /* In case the portal is busy, retry until successful or + * max retries hit. + */ + while ((err = dpaa2_io_service_release(NULL, bpid, + buf_array, i)) == -EBUSY) { + if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) + break; + + cpu_relax(); + } + + /* If release command failed, clean up and bail out. */ + if (err) { + dpaa2_switch_free_bufs(ethsw, buf_array, i); + return 0; + } + + return i; + +err_map: + __free_pages(page, 0); +err_alloc: + /* If we managed to allocate at least some buffers, + * release them to hardware + */ + if (i) + goto release_bufs; + + return 0; +} + +static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw) +{ + int *count = ðsw->buf_count; + int new_count; + int err = 0; + + if (unlikely(*count < DPAA2_ETHSW_REFILL_THRESH)) { + do { + new_count = dpaa2_switch_add_bufs(ethsw, ethsw->bpid); + if (unlikely(!new_count)) { + /* Out of memory; abort for now, we'll + * try later on + */ + break; + } + *count += new_count; + } while (*count < DPAA2_ETHSW_NUM_BUFS); + + if (unlikely(*count < DPAA2_ETHSW_NUM_BUFS)) + err = -ENOMEM; + } + + return err; +} + +static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw) +{ + int *count, i; + + for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) { + count = ðsw->buf_count; + *count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid); + + if (unlikely(*count < BUFS_PER_CMD)) + return -ENOMEM; + } + + return 0; +} + +static void dpaa2_switch_drain_bp(struct ethsw_core *ethsw) +{ + u64 buf_array[BUFS_PER_CMD]; + int ret; + + do { + ret = dpaa2_io_service_acquire(NULL, ethsw->bpid, + buf_array, BUFS_PER_CMD); + if (ret < 0) { + dev_err(ethsw->dev, + "dpaa2_io_service_acquire() = %d\n", ret); + return; + } + dpaa2_switch_free_bufs(ethsw, buf_array, ret); + + } while (ret); +} + +static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw) +{ + struct dpsw_ctrl_if_pools_cfg dpsw_ctrl_if_pools_cfg = { 0 }; + struct device *dev = ethsw->dev; + struct fsl_mc_device *dpbp_dev; + struct dpbp_attr dpbp_attrs; + int err; + + err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, + &dpbp_dev); + if (err) { + if (err == -ENXIO) + err = -EPROBE_DEFER; + else + dev_err(dev, "DPBP device allocation failed\n"); + return err; + } + ethsw->dpbp_dev = dpbp_dev; + + err = dpbp_open(ethsw->mc_io, 0, dpbp_dev->obj_desc.id, + &dpbp_dev->mc_handle); + if (err) { + dev_err(dev, "dpbp_open() failed\n"); + goto err_open; + } + + err = dpbp_reset(ethsw->mc_io, 0, dpbp_dev->mc_handle); + if (err) { + dev_err(dev, "dpbp_reset() failed\n"); + goto err_reset; + } + + err = dpbp_enable(ethsw->mc_io, 0, dpbp_dev->mc_handle); + if (err) { + dev_err(dev, "dpbp_enable() failed\n"); + goto err_enable; + } + + err = dpbp_get_attributes(ethsw->mc_io, 0, dpbp_dev->mc_handle, + &dpbp_attrs); + if (err) { + dev_err(dev, "dpbp_get_attributes() failed\n"); + goto err_get_attr; + } + + dpsw_ctrl_if_pools_cfg.num_dpbp = 1; + dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id; + dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE; + dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0; + + err = dpsw_ctrl_if_set_pools(ethsw->mc_io, 0, ethsw->dpsw_handle, + &dpsw_ctrl_if_pools_cfg); + if (err) { + dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n"); + goto err_get_attr; + } + ethsw->bpid = dpbp_attrs.id; + + return 0; + +err_get_attr: + dpbp_disable(ethsw->mc_io, 0, dpbp_dev->mc_handle); +err_enable: +err_reset: + dpbp_close(ethsw->mc_io, 0, dpbp_dev->mc_handle); +err_open: + fsl_mc_object_free(dpbp_dev); + return err; +} + +static void dpaa2_switch_free_dpbp(struct ethsw_core *ethsw) +{ + dpbp_disable(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle); + dpbp_close(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle); + fsl_mc_object_free(ethsw->dpbp_dev); +} + +static int dpaa2_switch_alloc_rings(struct ethsw_core *ethsw) +{ + int i; + + for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) { + ethsw->fq[i].store = + dpaa2_io_store_create(DPAA2_SWITCH_STORE_SIZE, + ethsw->dev); + if (!ethsw->fq[i].store) { + dev_err(ethsw->dev, "dpaa2_io_store_create failed\n"); + while (--i >= 0) + dpaa2_io_store_destroy(ethsw->fq[i].store); + return -ENOMEM; + } + } + + return 0; +} + +static void dpaa2_switch_destroy_rings(struct ethsw_core *ethsw) +{ + int i; + + for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) + dpaa2_io_store_destroy(ethsw->fq[i].store); +} + +static int dpaa2_switch_pull_fq(struct dpaa2_switch_fq *fq) +{ + int err, retries = 0; + + /* Try to pull from the FQ while the portal is busy and we didn't hit + * the maximum number fo retries + */ + do { + err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store); + cpu_relax(); + } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES); + + if (unlikely(err)) + dev_err(fq->ethsw->dev, "dpaa2_io_service_pull err %d", err); + + return err; +} + +/* Consume all frames pull-dequeued into the store */ +static int dpaa2_switch_store_consume(struct dpaa2_switch_fq *fq) +{ + struct ethsw_core *ethsw = fq->ethsw; + int cleaned = 0, is_last; + struct dpaa2_dq *dq; + int retries = 0; + + do { + /* Get the next available FD from the store */ + dq = dpaa2_io_store_next(fq->store, &is_last); + if (unlikely(!dq)) { + if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) { + dev_err_once(ethsw->dev, + "No valid dequeue response\n"); + return -ETIMEDOUT; + } + continue; + } + + if (fq->type == DPSW_QUEUE_RX) + dpaa2_switch_rx(fq, dpaa2_dq_fd(dq)); + else + dpaa2_switch_tx_conf(fq, dpaa2_dq_fd(dq)); + cleaned++; + + } while (!is_last); + + return cleaned; +} + +/* NAPI poll routine */ +static int dpaa2_switch_poll(struct napi_struct *napi, int budget) +{ + int err, cleaned = 0, store_cleaned, work_done; + struct dpaa2_switch_fq *fq; + int retries = 0; + + fq = container_of(napi, struct dpaa2_switch_fq, napi); + + do { + err = dpaa2_switch_pull_fq(fq); + if (unlikely(err)) + break; + + /* Refill pool if appropriate */ + dpaa2_switch_refill_bp(fq->ethsw); + + store_cleaned = dpaa2_switch_store_consume(fq); + cleaned += store_cleaned; + + if (cleaned >= budget) { + work_done = budget; + goto out; + } + + } while (store_cleaned); + + /* We didn't consume the entire budget, so finish napi and re-enable + * data availability notifications + */ + napi_complete_done(napi, cleaned); + do { + err = dpaa2_io_service_rearm(NULL, &fq->nctx); + cpu_relax(); + } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES); + + work_done = max(cleaned, 1); +out: + + return work_done; +} + +static void dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx *nctx) +{ + struct dpaa2_switch_fq *fq; + + fq = container_of(nctx, struct dpaa2_switch_fq, nctx); + + napi_schedule(&fq->napi); +} + +static int dpaa2_switch_setup_dpio(struct ethsw_core *ethsw) +{ + struct dpsw_ctrl_if_queue_cfg queue_cfg; + struct dpaa2_io_notification_ctx *nctx; + int err, i, j; + + for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) { + nctx = ðsw->fq[i].nctx; + + /* Register a new software context for the FQID. + * By using NULL as the first parameter, we specify that we do + * not care on which cpu are interrupts received for this queue + */ + nctx->is_cdan = 0; + nctx->id = ethsw->fq[i].fqid; + nctx->desired_cpu = DPAA2_IO_ANY_CPU; + nctx->cb = dpaa2_switch_fqdan_cb; + err = dpaa2_io_service_register(NULL, nctx, ethsw->dev); + if (err) { + err = -EPROBE_DEFER; + goto err_register; + } + + queue_cfg.options = DPSW_CTRL_IF_QUEUE_OPT_DEST | + DPSW_CTRL_IF_QUEUE_OPT_USER_CTX; + queue_cfg.dest_cfg.dest_type = DPSW_CTRL_IF_DEST_DPIO; + queue_cfg.dest_cfg.dest_id = nctx->dpio_id; + queue_cfg.dest_cfg.priority = 0; + queue_cfg.user_ctx = nctx->qman64; + + err = dpsw_ctrl_if_set_queue(ethsw->mc_io, 0, + ethsw->dpsw_handle, + ethsw->fq[i].type, + &queue_cfg); + if (err) + goto err_set_queue; + } + + return 0; + +err_set_queue: + dpaa2_io_service_deregister(NULL, nctx, ethsw->dev); +err_register: + for (j = 0; j < i; j++) + dpaa2_io_service_deregister(NULL, ðsw->fq[j].nctx, + ethsw->dev); + + return err; +} + +static void dpaa2_switch_free_dpio(struct ethsw_core *ethsw) +{ + int i; + + for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) + dpaa2_io_service_deregister(NULL, ðsw->fq[i].nctx, + ethsw->dev); +} + +static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw) +{ + int err; + + /* setup FQs for Rx and Tx Conf */ + err = dpaa2_switch_setup_fqs(ethsw); + if (err) + return err; + + /* setup the buffer pool needed on the Rx path */ + err = dpaa2_switch_setup_dpbp(ethsw); + if (err) + return err; + + err = dpaa2_switch_seed_bp(ethsw); + if (err) + goto err_free_dpbp; + + err = dpaa2_switch_alloc_rings(ethsw); + if (err) + goto err_drain_dpbp; + + err = dpaa2_switch_setup_dpio(ethsw); + if (err) + goto err_destroy_rings; + + err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle); + if (err) { + dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err); + goto err_deregister_dpio; + } + + return 0; + +err_deregister_dpio: + dpaa2_switch_free_dpio(ethsw); +err_destroy_rings: + dpaa2_switch_destroy_rings(ethsw); +err_drain_dpbp: + dpaa2_switch_drain_bp(ethsw); +err_free_dpbp: + dpaa2_switch_free_dpbp(ethsw); + + return err; +} + +static int dpaa2_switch_init(struct fsl_mc_device *sw_dev) +{ + struct device *dev = &sw_dev->dev; + struct ethsw_core *ethsw = dev_get_drvdata(dev); + struct dpsw_vlan_if_cfg vcfg = {0}; + struct dpsw_tci_cfg tci_cfg = {0}; + struct dpsw_stp_cfg stp_cfg; + int err; + u16 i; + + ethsw->dev_id = sw_dev->obj_desc.id; + + err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, ðsw->dpsw_handle); + if (err) { + dev_err(dev, "dpsw_open err %d\n", err); + return err; + } + + err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, + ðsw->sw_attr); + if (err) { + dev_err(dev, "dpsw_get_attributes err %d\n", err); + goto err_close; + } + + err = dpsw_get_api_version(ethsw->mc_io, 0, + ðsw->major, + ðsw->minor); + if (err) { + dev_err(dev, "dpsw_get_api_version err %d\n", err); + goto err_close; + } + + /* Minimum supported DPSW version check */ + if (ethsw->major < DPSW_MIN_VER_MAJOR || + (ethsw->major == DPSW_MIN_VER_MAJOR && + ethsw->minor < DPSW_MIN_VER_MINOR)) { + dev_err(dev, "DPSW version %d:%d not supported. Use firmware 10.28.0 or greater.\n", + ethsw->major, ethsw->minor); + err = -EOPNOTSUPP; + goto err_close; + } + + if (!dpaa2_switch_supports_cpu_traffic(ethsw)) { + err = -EOPNOTSUPP; + goto err_close; + } + + dpaa2_switch_detect_features(ethsw); + + err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle); + if (err) { + dev_err(dev, "dpsw_reset err %d\n", err); + goto err_close; + } + + stp_cfg.vlan_id = DEFAULT_VLAN_ID; + stp_cfg.state = DPSW_STP_STATE_FORWARDING; + + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { + err = dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i); + if (err) { + dev_err(dev, "dpsw_if_disable err %d\n", err); + goto err_close; + } + + err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i, + &stp_cfg); + if (err) { + dev_err(dev, "dpsw_if_set_stp err %d for port %d\n", + err, i); + goto err_close; + } + + /* Switch starts with all ports configured to VLAN 1. Need to + * remove this setting to allow configuration at bridge join + */ + vcfg.num_ifs = 1; + vcfg.if_id[0] = i; + err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle, + DEFAULT_VLAN_ID, &vcfg); + if (err) { + dev_err(dev, "dpsw_vlan_remove_if_untagged err %d\n", + err); + goto err_close; + } + + tci_cfg.vlan_id = 4095; + err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, i, &tci_cfg); + if (err) { + dev_err(dev, "dpsw_if_set_tci err %d\n", err); + goto err_close; + } + + err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, + DEFAULT_VLAN_ID, &vcfg); + if (err) { + dev_err(dev, "dpsw_vlan_remove_if err %d\n", err); + goto err_close; + } + } + + err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, DEFAULT_VLAN_ID); + if (err) { + dev_err(dev, "dpsw_vlan_remove err %d\n", err); + goto err_close; + } + + ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered", + WQ_MEM_RECLAIM, "ethsw", + ethsw->sw_attr.id); + if (!ethsw->workqueue) { + err = -ENOMEM; + goto err_close; + } + + err = dpsw_fdb_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, 0); + if (err) + goto err_destroy_ordered_workqueue; + + err = dpaa2_switch_ctrl_if_setup(ethsw); + if (err) + goto err_destroy_ordered_workqueue; + + return 0; + +err_destroy_ordered_workqueue: + destroy_workqueue(ethsw->workqueue); + +err_close: + dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); + return err; +} + +/* Add an ACL to redirect frames with specific destination MAC address to + * control interface + */ +static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv, + const char *mac) +{ + struct dpaa2_switch_acl_entry acl_entry = {0}; + + /* Match on the destination MAC address */ + ether_addr_copy(acl_entry.key.match.l2_dest_mac, mac); + eth_broadcast_addr(acl_entry.key.mask.l2_dest_mac); + + /* Trap to CPU */ + acl_entry.cfg.precedence = 0; + acl_entry.cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF; + + return dpaa2_switch_acl_entry_add(port_priv->acl_tbl, &acl_entry); +} + +static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) +{ + const char stpa[ETH_ALEN] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00}; + struct switchdev_obj_port_vlan vlan = { + .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, + .vid = DEFAULT_VLAN_ID, + .flags = BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID, + }; + struct net_device *netdev = port_priv->netdev; + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpaa2_switch_acl_tbl *acl_tbl; + struct dpsw_fdb_cfg fdb_cfg = {0}; + struct dpsw_if_attr dpsw_if_attr; + struct dpaa2_switch_fdb *fdb; + struct dpsw_acl_cfg acl_cfg; + u16 fdb_id, acl_tbl_id; + int err; + + /* Get the Tx queue for this specific port */ + err = dpsw_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, + port_priv->idx, &dpsw_if_attr); + if (err) { + netdev_err(netdev, "dpsw_if_get_attributes err %d\n", err); + return err; + } + port_priv->tx_qdid = dpsw_if_attr.qdid; + + /* Create a FDB table for this particular switch port */ + fdb_cfg.num_fdb_entries = ethsw->sw_attr.max_fdb_entries / ethsw->sw_attr.num_ifs; + err = dpsw_fdb_add(ethsw->mc_io, 0, ethsw->dpsw_handle, + &fdb_id, &fdb_cfg); + if (err) { + netdev_err(netdev, "dpsw_fdb_add err %d\n", err); + return err; + } + + /* Find an unused dpaa2_switch_fdb structure and use it */ + fdb = dpaa2_switch_fdb_get_unused(ethsw); + fdb->fdb_id = fdb_id; + fdb->in_use = true; + fdb->bridge_dev = NULL; + port_priv->fdb = fdb; + + /* We need to add VLAN 1 as the PVID on this port until it is under a + * bridge since the DPAA2 switch is not able to handle the traffic in a + * VLAN unaware fashion + */ + err = dpaa2_switch_port_vlans_add(netdev, &vlan); + if (err) + return err; + + /* Setup the egress flooding domains (broadcast, unknown unicast */ + err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); + if (err) + return err; + + /* Create an ACL table to be used by this switch port */ + acl_cfg.max_entries = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES; + err = dpsw_acl_add(ethsw->mc_io, 0, ethsw->dpsw_handle, + &acl_tbl_id, &acl_cfg); + if (err) { + netdev_err(netdev, "dpsw_acl_add err %d\n", err); + return err; + } + + acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw); + acl_tbl->ethsw = ethsw; + acl_tbl->id = acl_tbl_id; + acl_tbl->in_use = true; + acl_tbl->num_rules = 0; + INIT_LIST_HEAD(&acl_tbl->entries); + + err = dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl); + if (err) + return err; + + err = dpaa2_switch_port_trap_mac_addr(port_priv, stpa); + if (err) + return err; + + return err; +} + +static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev) +{ + struct device *dev = &sw_dev->dev; + struct ethsw_core *ethsw = dev_get_drvdata(dev); + int err; + + err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); + if (err) + dev_warn(dev, "dpsw_close err %d\n", err); +} + +static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw) +{ + dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); + dpaa2_switch_free_dpio(ethsw); + dpaa2_switch_destroy_rings(ethsw); + dpaa2_switch_drain_bp(ethsw); + dpaa2_switch_free_dpbp(ethsw); +} + +static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev) +{ + struct ethsw_port_priv *port_priv; + struct ethsw_core *ethsw; + struct device *dev; + int i; + + dev = &sw_dev->dev; + ethsw = dev_get_drvdata(dev); + + dpaa2_switch_ctrl_if_teardown(ethsw); + + dpaa2_switch_teardown_irqs(sw_dev); + + dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); + + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { + port_priv = ethsw->ports[i]; + unregister_netdev(port_priv->netdev); + free_netdev(port_priv->netdev); + } + + kfree(ethsw->fdbs); + kfree(ethsw->acls); + kfree(ethsw->ports); + + dpaa2_switch_takedown(sw_dev); + + destroy_workqueue(ethsw->workqueue); + + fsl_mc_portal_free(ethsw->mc_io); + + kfree(ethsw); + + dev_set_drvdata(dev, NULL); + + return 0; +} + +static int dpaa2_switch_probe_port(struct ethsw_core *ethsw, + u16 port_idx) +{ + struct ethsw_port_priv *port_priv; + struct device *dev = ethsw->dev; + struct net_device *port_netdev; + int err; + + port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv)); + if (!port_netdev) { + dev_err(dev, "alloc_etherdev error\n"); + return -ENOMEM; + } + + port_priv = netdev_priv(port_netdev); + port_priv->netdev = port_netdev; + port_priv->ethsw_data = ethsw; + + port_priv->idx = port_idx; + port_priv->stp_state = BR_STATE_FORWARDING; + + SET_NETDEV_DEV(port_netdev, dev); + port_netdev->netdev_ops = &dpaa2_switch_port_ops; + port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops; + + port_netdev->needed_headroom = DPAA2_SWITCH_NEEDED_HEADROOM; + + port_priv->bcast_flood = true; + port_priv->ucast_flood = true; + + /* Set MTU limits */ + port_netdev->min_mtu = ETH_MIN_MTU; + port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH; + + /* Populate the private port structure so that later calls to + * dpaa2_switch_port_init() can use it. + */ + ethsw->ports[port_idx] = port_priv; + + /* The DPAA2 switch's ingress path depends on the VLAN table, + * thus we are not able to disable VLAN filtering. + */ + port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_STAG_FILTER | + NETIF_F_HW_TC; + + err = dpaa2_switch_port_init(port_priv, port_idx); + if (err) + goto err_port_probe; + + err = dpaa2_switch_port_set_mac_addr(port_priv); + if (err) + goto err_port_probe; + + err = dpaa2_switch_port_set_learning(port_priv, false); + if (err) + goto err_port_probe; + port_priv->learn_ena = false; + + return 0; + +err_port_probe: + free_netdev(port_netdev); + ethsw->ports[port_idx] = NULL; + + return err; +} + +static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev) +{ + struct device *dev = &sw_dev->dev; + struct ethsw_core *ethsw; + int i, err; + + /* Allocate switch core*/ + ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL); + + if (!ethsw) + return -ENOMEM; + + ethsw->dev = dev; + ethsw->iommu_domain = iommu_get_domain_for_dev(dev); + dev_set_drvdata(dev, ethsw); + + err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, + ðsw->mc_io); + if (err) { + if (err == -ENXIO) + err = -EPROBE_DEFER; + else + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); + goto err_free_drvdata; + } + + err = dpaa2_switch_init(sw_dev); + if (err) + goto err_free_cmdport; + + ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports), + GFP_KERNEL); + if (!(ethsw->ports)) { + err = -ENOMEM; + goto err_takedown; + } + + ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs), + GFP_KERNEL); + if (!ethsw->fdbs) { + err = -ENOMEM; + goto err_free_ports; + } + + ethsw->acls = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->acls), + GFP_KERNEL); + if (!ethsw->acls) { + err = -ENOMEM; + goto err_free_fdbs; + } + + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { + err = dpaa2_switch_probe_port(ethsw, i); + if (err) + goto err_free_netdev; + } + + /* Add a NAPI instance for each of the Rx queues. The first port's + * net_device will be associated with the instances since we do not have + * different queues for each switch ports. + */ + for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) + netif_napi_add(ethsw->ports[0]->netdev, + ðsw->fq[i].napi, dpaa2_switch_poll, + NAPI_POLL_WEIGHT); + + err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle); + if (err) { + dev_err(ethsw->dev, "dpsw_enable err %d\n", err); + goto err_free_netdev; + } + + /* Setup IRQs */ + err = dpaa2_switch_setup_irqs(sw_dev); + if (err) + goto err_stop; + + /* Register the netdev only when the entire setup is done and the + * switch port interfaces are ready to receive traffic + */ + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { + err = register_netdev(ethsw->ports[i]->netdev); + if (err < 0) { + dev_err(dev, "register_netdev error %d\n", err); + goto err_unregister_ports; + } + } + + return 0; + +err_unregister_ports: + for (i--; i >= 0; i--) + unregister_netdev(ethsw->ports[i]->netdev); + dpaa2_switch_teardown_irqs(sw_dev); +err_stop: + dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); +err_free_netdev: + for (i--; i >= 0; i--) + free_netdev(ethsw->ports[i]->netdev); + kfree(ethsw->acls); +err_free_fdbs: + kfree(ethsw->fdbs); +err_free_ports: + kfree(ethsw->ports); + +err_takedown: + dpaa2_switch_takedown(sw_dev); + +err_free_cmdport: + fsl_mc_portal_free(ethsw->mc_io); + +err_free_drvdata: + kfree(ethsw); + dev_set_drvdata(dev, NULL); + + return err; +} + +static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = { + { + .vendor = FSL_MC_VENDOR_FREESCALE, + .obj_type = "dpsw", + }, + { .vendor = 0x0 } +}; +MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table); + +static struct fsl_mc_driver dpaa2_switch_drv = { + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + }, + .probe = dpaa2_switch_probe, + .remove = dpaa2_switch_remove, + .match_id_table = dpaa2_switch_match_id_table +}; + +static struct notifier_block dpaa2_switch_port_nb __read_mostly = { + .notifier_call = dpaa2_switch_port_netdevice_event, +}; + +static struct notifier_block dpaa2_switch_port_switchdev_nb = { + .notifier_call = dpaa2_switch_port_event, +}; + +static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb = { + .notifier_call = dpaa2_switch_port_blocking_event, +}; + +static int dpaa2_switch_register_notifiers(void) +{ + int err; + + err = register_netdevice_notifier(&dpaa2_switch_port_nb); + if (err) { + pr_err("dpaa2-switch: failed to register net_device notifier (%d)\n", err); + return err; + } + + err = register_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); + if (err) { + pr_err("dpaa2-switch: failed to register switchdev notifier (%d)\n", err); + goto err_switchdev_nb; + } + + err = register_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb); + if (err) { + pr_err("dpaa2-switch: failed to register switchdev blocking notifier (%d)\n", err); + goto err_switchdev_blocking_nb; + } + + return 0; + +err_switchdev_blocking_nb: + unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); +err_switchdev_nb: + unregister_netdevice_notifier(&dpaa2_switch_port_nb); + + return err; +} + +static void dpaa2_switch_unregister_notifiers(void) +{ + int err; + + err = unregister_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb); + if (err) + pr_err("dpaa2-switch: failed to unregister switchdev blocking notifier (%d)\n", + err); + + err = unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); + if (err) + pr_err("dpaa2-switch: failed to unregister switchdev notifier (%d)\n", err); + + err = unregister_netdevice_notifier(&dpaa2_switch_port_nb); + if (err) + pr_err("dpaa2-switch: failed to unregister net_device notifier (%d)\n", err); +} + +static int __init dpaa2_switch_driver_init(void) +{ + int err; + + err = fsl_mc_driver_register(&dpaa2_switch_drv); + if (err) + return err; + + err = dpaa2_switch_register_notifiers(); + if (err) { + fsl_mc_driver_unregister(&dpaa2_switch_drv); + return err; + } + + return 0; +} + +static void __exit dpaa2_switch_driver_exit(void) +{ + dpaa2_switch_unregister_notifiers(); + fsl_mc_driver_unregister(&dpaa2_switch_drv); +} + +module_init(dpaa2_switch_driver_init); +module_exit(dpaa2_switch_driver_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver"); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h new file mode 100644 index 000000000000..bdef71f234cb --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h @@ -0,0 +1,246 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * DPAA2 Ethernet Switch declarations + * + * Copyright 2014-2016 Freescale Semiconductor Inc. + * Copyright 2017-2021 NXP + * + */ + +#ifndef __ETHSW_H +#define __ETHSW_H + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <linux/if_vlan.h> +#include <uapi/linux/if_bridge.h> +#include <net/switchdev.h> +#include <linux/if_bridge.h> +#include <linux/fsl/mc.h> +#include <net/pkt_cls.h> +#include <soc/fsl/dpaa2-io.h> + +#include "dpsw.h" + +/* Number of IRQs supported */ +#define DPSW_IRQ_NUM 2 + +/* Port is member of VLAN */ +#define ETHSW_VLAN_MEMBER 1 +/* VLAN to be treated as untagged on egress */ +#define ETHSW_VLAN_UNTAGGED 2 +/* Untagged frames will be assigned to this VLAN */ +#define ETHSW_VLAN_PVID 4 +/* VLAN configured on the switch */ +#define ETHSW_VLAN_GLOBAL 8 + +/* Maximum Frame Length supported by HW (currently 10k) */ +#define DPAA2_MFL (10 * 1024) +#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN) + +#define ETHSW_FEATURE_MAC_ADDR BIT(0) + +/* Number of receive queues (one RX and one TX_CONF) */ +#define DPAA2_SWITCH_RX_NUM_FQS 2 + +/* Hardware requires alignment for ingress/egress buffer addresses */ +#define DPAA2_SWITCH_RX_BUF_RAW_SIZE PAGE_SIZE +#define DPAA2_SWITCH_RX_BUF_TAILROOM \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +#define DPAA2_SWITCH_RX_BUF_SIZE \ + (DPAA2_SWITCH_RX_BUF_RAW_SIZE - DPAA2_SWITCH_RX_BUF_TAILROOM) + +#define DPAA2_SWITCH_STORE_SIZE 16 + +/* Buffer management */ +#define BUFS_PER_CMD 7 +#define DPAA2_ETHSW_NUM_BUFS (1024 * BUFS_PER_CMD) +#define DPAA2_ETHSW_REFILL_THRESH (DPAA2_ETHSW_NUM_BUFS * 5 / 6) + +/* Number of times to retry DPIO portal operations while waiting + * for portal to finish executing current command and become + * available. We want to avoid being stuck in a while loop in case + * hardware becomes unresponsive, but not give up too easily if + * the portal really is busy for valid reasons + */ +#define DPAA2_SWITCH_SWP_BUSY_RETRIES 1000 + +/* Hardware annotation buffer size */ +#define DPAA2_SWITCH_HWA_SIZE 64 +/* Software annotation buffer size */ +#define DPAA2_SWITCH_SWA_SIZE 64 + +#define DPAA2_SWITCH_TX_BUF_ALIGN 64 + +#define DPAA2_SWITCH_TX_DATA_OFFSET \ + (DPAA2_SWITCH_HWA_SIZE + DPAA2_SWITCH_SWA_SIZE) + +#define DPAA2_SWITCH_NEEDED_HEADROOM \ + (DPAA2_SWITCH_TX_DATA_OFFSET + DPAA2_SWITCH_TX_BUF_ALIGN) + +#define DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES 16 +#define DPAA2_ETHSW_PORT_DEFAULT_TRAPS 1 + +#define DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE 256 + +extern const struct ethtool_ops dpaa2_switch_port_ethtool_ops; + +struct ethsw_core; + +struct dpaa2_switch_fq { + struct ethsw_core *ethsw; + enum dpsw_queue_type type; + struct dpaa2_io_store *store; + struct dpaa2_io_notification_ctx nctx; + struct napi_struct napi; + u32 fqid; +}; + +struct dpaa2_switch_fdb { + struct net_device *bridge_dev; + u16 fdb_id; + bool in_use; +}; + +struct dpaa2_switch_acl_entry { + struct list_head list; + u16 prio; + unsigned long cookie; + + struct dpsw_acl_entry_cfg cfg; + struct dpsw_acl_key key; +}; + +struct dpaa2_switch_acl_tbl { + struct list_head entries; + struct ethsw_core *ethsw; + u64 ports; + + u16 id; + u8 num_rules; + bool in_use; +}; + +static inline bool +dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_acl_tbl *acl_tbl) +{ + if ((acl_tbl->num_rules + DPAA2_ETHSW_PORT_DEFAULT_TRAPS) >= + DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES) + return true; + return false; +} + +/* Per port private data */ +struct ethsw_port_priv { + struct net_device *netdev; + u16 idx; + struct ethsw_core *ethsw_data; + u8 link_state; + u8 stp_state; + + u8 vlans[VLAN_VID_MASK + 1]; + u16 pvid; + u16 tx_qdid; + + struct dpaa2_switch_fdb *fdb; + bool bcast_flood; + bool ucast_flood; + bool learn_ena; + + struct dpaa2_switch_acl_tbl *acl_tbl; +}; + +/* Switch data */ +struct ethsw_core { + struct device *dev; + struct fsl_mc_io *mc_io; + u16 dpsw_handle; + struct dpsw_attr sw_attr; + u16 major, minor; + unsigned long features; + int dev_id; + struct ethsw_port_priv **ports; + struct iommu_domain *iommu_domain; + + u8 vlans[VLAN_VID_MASK + 1]; + + struct workqueue_struct *workqueue; + + struct dpaa2_switch_fq fq[DPAA2_SWITCH_RX_NUM_FQS]; + struct fsl_mc_device *dpbp_dev; + int buf_count; + u16 bpid; + int napi_users; + + struct dpaa2_switch_fdb *fdbs; + struct dpaa2_switch_acl_tbl *acls; +}; + +static inline int dpaa2_switch_get_index(struct ethsw_core *ethsw, + struct net_device *netdev) +{ + int i; + + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) + if (ethsw->ports[i]->netdev == netdev) + return ethsw->ports[i]->idx; + + return -EINVAL; +} + +static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw) +{ + if (ethsw->sw_attr.options & DPSW_OPT_CTRL_IF_DIS) { + dev_err(ethsw->dev, "Control Interface is disabled, cannot probe\n"); + return false; + } + + if (ethsw->sw_attr.flooding_cfg != DPSW_FLOODING_PER_FDB) { + dev_err(ethsw->dev, "Flooding domain is not per FDB, cannot probe\n"); + return false; + } + + if (ethsw->sw_attr.broadcast_cfg != DPSW_BROADCAST_PER_FDB) { + dev_err(ethsw->dev, "Broadcast domain is not per FDB, cannot probe\n"); + return false; + } + + if (ethsw->sw_attr.max_fdbs < ethsw->sw_attr.num_ifs) { + dev_err(ethsw->dev, "The number of FDBs is lower than the number of ports, cannot probe\n"); + return false; + } + + return true; +} + +bool dpaa2_switch_port_dev_check(const struct net_device *netdev); + +int dpaa2_switch_port_vlans_add(struct net_device *netdev, + const struct switchdev_obj_port_vlan *vlan); + +int dpaa2_switch_port_vlans_del(struct net_device *netdev, + const struct switchdev_obj_port_vlan *vlan); + +typedef int dpaa2_switch_fdb_cb_t(struct ethsw_port_priv *port_priv, + struct fdb_dump_entry *fdb_entry, + void *data); + +/* TC offload */ + +int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl, + struct flow_cls_offload *cls); + +int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, + struct flow_cls_offload *cls); + +int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl, + struct tc_cls_matchall_offload *cls); + +int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, + struct tc_cls_matchall_offload *cls); + +int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry); +#endif /* __ETHSW_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpkg.h b/drivers/net/ethernet/freescale/dpaa2/dpkg.h index 6de613b13e4d..6f596a5fbeeb 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpkg.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpkg.h @@ -13,11 +13,12 @@ /** Key Generator properties */ /** - * Number of masks per key extraction + * DPKG_NUM_OF_MASKS - Number of masks per key extraction */ #define DPKG_NUM_OF_MASKS 4 + /** - * Number of extractions per key profile + * DPKG_MAX_NUM_OF_EXTRACTS - Number of extractions per key profile */ #define DPKG_MAX_NUM_OF_EXTRACTS 10 diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.h b/drivers/net/ethernet/freescale/dpaa2/dpmac.h index 135f143097a5..8f7ceb731282 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpmac.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.h @@ -83,39 +83,21 @@ int dpmac_get_attributes(struct fsl_mc_io *mc_io, u16 token, struct dpmac_attr *attr); -/** - * DPMAC link configuration/state options - */ +/* DPMAC link configuration/state options */ -/** - * Enable auto-negotiation - */ #define DPMAC_LINK_OPT_AUTONEG BIT_ULL(0) -/** - * Enable half-duplex mode - */ #define DPMAC_LINK_OPT_HALF_DUPLEX BIT_ULL(1) -/** - * Enable pause frames - */ #define DPMAC_LINK_OPT_PAUSE BIT_ULL(2) -/** - * Enable a-symmetric pause frames - */ #define DPMAC_LINK_OPT_ASYM_PAUSE BIT_ULL(3) -/** - * Advertised link speeds - */ +/* Advertised link speeds */ #define DPMAC_ADVERTISED_10BASET_FULL BIT_ULL(0) #define DPMAC_ADVERTISED_100BASET_FULL BIT_ULL(1) #define DPMAC_ADVERTISED_1000BASET_FULL BIT_ULL(2) #define DPMAC_ADVERTISED_10000BASET_FULL BIT_ULL(4) #define DPMAC_ADVERTISED_2500BASEX_FULL BIT_ULL(5) -/** - * Advertise auto-negotiation enable - */ +/* Advertise auto-negotiation enable */ #define DPMAC_ADVERTISED_AUTONEG BIT_ULL(3) /** diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c index aa429c17c343..d6afada99fb6 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c @@ -17,6 +17,8 @@ * This function has to be called before the following functions: * - dpni_set_rx_tc_dist() * - dpni_set_qos_table() + * + * Return: '0' on Success; Error code otherwise. */ int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, u8 *key_cfg_buf) { @@ -1793,6 +1795,8 @@ int dpni_get_api_version(struct fsl_mc_io *mc_io, * If cfg.enable is set to 0 the command will clear flow steering table. * The packets will be classified according to settings made in * dpni_set_rx_hash_dist() + * + * Return: '0' on Success; Error code otherwise. */ int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, u32 cmd_flags, @@ -1826,6 +1830,8 @@ int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, * If cfg.enable is set to 1 the packets will be classified using a hash * function based on the key received in cfg.key_cfg_iova parameter. * If cfg.enable is set to 0 the packets will be sent to the default queue + * + * Return: '0' on Success; Error code otherwise. */ int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, u32 cmd_flags, diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h index 4e96d9362dd2..7de0562bbf59 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h @@ -10,73 +10,76 @@ struct fsl_mc_io; -/** - * Data Path Network Interface API +/* Data Path Network Interface API * Contains initialization APIs and runtime control APIs for DPNI */ /** General DPNI macros */ /** - * Maximum number of traffic classes + * DPNI_MAX_TC - Maximum number of traffic classes */ #define DPNI_MAX_TC 8 /** - * Maximum number of buffer pools per DPNI + * DPNI_MAX_DPBP - Maximum number of buffer pools per DPNI */ #define DPNI_MAX_DPBP 8 /** - * All traffic classes considered; see dpni_set_queue() + * DPNI_ALL_TCS - All traffic classes considered; see dpni_set_queue() */ #define DPNI_ALL_TCS (u8)(-1) /** - * All flows within traffic class considered; see dpni_set_queue() + * DPNI_ALL_TC_FLOWS - All flows within traffic class considered; see + * dpni_set_queue() */ #define DPNI_ALL_TC_FLOWS (u16)(-1) /** - * Generate new flow ID; see dpni_set_queue() + * DPNI_NEW_FLOW_ID - Generate new flow ID; see dpni_set_queue() */ #define DPNI_NEW_FLOW_ID (u16)(-1) /** - * Tx traffic is always released to a buffer pool on transmit, there are no - * resources allocated to have the frames confirmed back to the source after - * transmission. + * DPNI_OPT_TX_FRM_RELEASE - Tx traffic is always released to a buffer pool on + * transmit, there are no resources allocated to have the frames confirmed back + * to the source after transmission. */ #define DPNI_OPT_TX_FRM_RELEASE 0x000001 /** - * Disables support for MAC address filtering for addresses other than primary - * MAC address. This affects both unicast and multicast. Promiscuous mode can - * still be enabled/disabled for both unicast and multicast. If promiscuous mode - * is disabled, only traffic matching the primary MAC address will be accepted. + * DPNI_OPT_NO_MAC_FILTER - Disables support for MAC address filtering for + * addresses other than primary MAC address. This affects both unicast and + * multicast. Promiscuous mode can still be enabled/disabled for both unicast + * and multicast. If promiscuous mode is disabled, only traffic matching the + * primary MAC address will be accepted. */ #define DPNI_OPT_NO_MAC_FILTER 0x000002 /** - * Allocate policers for this DPNI. They can be used to rate-limit traffic per - * traffic class (TC) basis. + * DPNI_OPT_HAS_POLICING - Allocate policers for this DPNI. They can be used to + * rate-limit traffic per traffic class (TC) basis. */ #define DPNI_OPT_HAS_POLICING 0x000004 /** - * Congestion can be managed in several ways, allowing the buffer pool to - * deplete on ingress, taildrop on each queue or use congestion groups for sets - * of queues. If set, it configures a single congestion groups across all TCs. - * If reset, a congestion group is allocated for each TC. Only relevant if the - * DPNI has multiple traffic classes. + * DPNI_OPT_SHARED_CONGESTION - Congestion can be managed in several ways, + * allowing the buffer pool to deplete on ingress, taildrop on each queue or + * use congestion groups for sets of queues. If set, it configures a single + * congestion groups across all TCs. If reset, a congestion group is allocated + * for each TC. Only relevant if the DPNI has multiple traffic classes. */ #define DPNI_OPT_SHARED_CONGESTION 0x000008 /** - * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all - * look-ups are exact match. Note that TCAM is not available on LS1088 and its - * variants. Setting this bit on these SoCs will trigger an error. + * DPNI_OPT_HAS_KEY_MASKING - Enables TCAM for Flow Steering and QoS look-ups. + * If not specified, all look-ups are exact match. Note that TCAM is not + * available on LS1088 and its variants. Setting this bit on these SoCs will + * trigger an error. */ #define DPNI_OPT_HAS_KEY_MASKING 0x000010 /** - * Disables the flow steering table. + * DPNI_OPT_NO_FS - Disables the flow steering table. */ #define DPNI_OPT_NO_FS 0x000020 /** - * Flow steering table is shared between all traffic classes + * DPNI_OPT_SHARED_FS - Flow steering table is shared between all traffic + * classes */ #define DPNI_OPT_SHARED_FS 0x001000 @@ -129,20 +132,14 @@ int dpni_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); -/** - * DPNI IRQ Index and Events - */ +/* DPNI IRQ Index and Events */ -/** - * IRQ index - */ #define DPNI_IRQ_INDEX 0 -/** - * IRQ events: - * indicates a change in link state - * indicates a change in endpoint - */ + +/* DPNI_IRQ_EVENT_LINK_CHANGED - indicates a change in link state */ #define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001 + +/* DPNI_IRQ_EVENT_ENDPOINT_CHANGED - indicates a change in endpoint */ #define DPNI_IRQ_EVENT_ENDPOINT_CHANGED 0x00000002 int dpni_set_irq_enable(struct fsl_mc_io *mc_io, @@ -222,32 +219,30 @@ int dpni_get_attributes(struct fsl_mc_io *mc_io, u16 token, struct dpni_attr *attr); -/** - * DPNI errors - */ +/* DPNI errors */ /** - * Extract out of frame header error + * DPNI_ERROR_EOFHE - Extract out of frame header error */ #define DPNI_ERROR_EOFHE 0x00020000 /** - * Frame length error + * DPNI_ERROR_FLE - Frame length error */ #define DPNI_ERROR_FLE 0x00002000 /** - * Frame physical error + * DPNI_ERROR_FPE - Frame physical error */ #define DPNI_ERROR_FPE 0x00001000 /** - * Parsing header error + * DPNI_ERROR_PHE - Parsing header error */ #define DPNI_ERROR_PHE 0x00000020 /** - * Parser L3 checksum error + * DPNI_ERROR_L3CE - Parser L3 checksum error */ #define DPNI_ERROR_L3CE 0x00000004 /** - * Parser L3 checksum error + * DPNI_ERROR_L4CE - Parser L3 checksum error */ #define DPNI_ERROR_L4CE 0x00000001 @@ -281,36 +276,35 @@ int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, u16 token, struct dpni_error_cfg *cfg); -/** - * DPNI buffer layout modification options - */ +/* DPNI buffer layout modification options */ /** - * Select to modify the time-stamp setting + * DPNI_BUF_LAYOUT_OPT_TIMESTAMP - Select to modify the time-stamp setting */ #define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001 /** - * Select to modify the parser-result setting; not applicable for Tx + * DPNI_BUF_LAYOUT_OPT_PARSER_RESULT - Select to modify the parser-result + * setting; not applicable for Tx */ #define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002 /** - * Select to modify the frame-status setting + * DPNI_BUF_LAYOUT_OPT_FRAME_STATUS - Select to modify the frame-status setting */ #define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004 /** - * Select to modify the private-data-size setting + * DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE - Select to modify the private-data-size setting */ #define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008 /** - * Select to modify the data-alignment setting + * DPNI_BUF_LAYOUT_OPT_DATA_ALIGN - Select to modify the data-alignment setting */ #define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010 /** - * Select to modify the data-head-room setting + * DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM - Select to modify the data-head-room setting */ #define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020 /** - * Select to modify the data-tail-room setting + * DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM - Select to modify the data-tail-room setting */ #define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040 @@ -343,7 +337,8 @@ struct dpni_buffer_layout { * @DPNI_QUEUE_TX: Tx queue * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue * @DPNI_QUEUE_RX_ERR: Rx error queue - */enum dpni_queue_type { + */ +enum dpni_queue_type { DPNI_QUEUE_RX, DPNI_QUEUE_TX, DPNI_QUEUE_TX_CONFIRM, @@ -424,7 +419,7 @@ int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, * lack of buffers * @page_2.egress_discarded_frames: Egress discarded frame count * @page_2.egress_confirmed_frames: Egress confirmed frame count - * @page3: Page_3 statistics structure + * @page_3: Page_3 statistics structure * @page_3.egress_dequeue_bytes: Cumulative count of the number of bytes * dequeued from egress FQs * @page_3.egress_dequeue_frames: Cumulative count of the number of frames @@ -501,30 +496,14 @@ int dpni_get_statistics(struct fsl_mc_io *mc_io, u8 page, union dpni_statistics *stat); -/** - * Enable auto-negotiation - */ #define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL -/** - * Enable half-duplex mode - */ #define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL -/** - * Enable pause frames - */ #define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL -/** - * Enable a-symmetric pause frames - */ #define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL - -/** - * Enable priority flow control pause frames - */ #define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL /** - * struct - Structure representing DPNI link configuration + * struct dpni_link_cfg - Structure representing DPNI link configuration * @rate: Rate * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values */ @@ -687,8 +666,8 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, const struct dpni_rx_tc_dist_cfg *cfg); /** - * When used for fs_miss_flow_id in function dpni_set_rx_dist, - * will signal to dpni to drop all unclassified frames + * DPNI_FS_MISS_DROP - When used for fs_miss_flow_id in function + * dpni_set_rx_dist, will signal to dpni to drop all unclassified frames */ #define DPNI_FS_MISS_DROP ((uint16_t)-1) @@ -766,7 +745,7 @@ enum dpni_dest { /** * struct dpni_queue - Queue structure - * @destination - Destination structure + * @destination: - Destination structure * @destination.id: ID of the destination, only relevant if DEST_TYPE is > 0. * Identifies either a DPIO or a DPCON object. * Not relevant for Tx queues. @@ -837,9 +816,7 @@ struct dpni_queue_id { u16 qdbin; }; -/** - * Set User Context - */ +/* Set User Context */ #define DPNI_QUEUE_OPT_USER_CTX 0x00000001 #define DPNI_QUEUE_OPT_DEST 0x00000002 #define DPNI_QUEUE_OPT_FLC 0x00000004 @@ -904,9 +881,9 @@ struct dpni_dest_cfg { /* DPNI congestion options */ /** - * This congestion will trigger flow control or priority flow control. - * This will have effect only if flow control is enabled with - * dpni_set_link_cfg(). + * DPNI_CONG_OPT_FLOW_CONTROL - This congestion will trigger flow control or + * priority flow control. This will have effect only if flow control is + * enabled with dpni_set_link_cfg(). */ #define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040 @@ -990,23 +967,24 @@ struct dpni_rule_cfg { }; /** - * Discard matching traffic. If set, this takes precedence over any other - * configuration and matching traffic is always discarded. + * DPNI_FS_OPT_DISCARD - Discard matching traffic. If set, this takes + * precedence over any other configuration and matching traffic is always + * discarded. */ #define DPNI_FS_OPT_DISCARD 0x1 /** - * Set FLC value. If set, flc member of struct dpni_fs_action_cfg is used to - * override the FLC value set per queue. + * DPNI_FS_OPT_SET_FLC - Set FLC value. If set, flc member of struct + * dpni_fs_action_cfg is used to override the FLC value set per queue. * For more details check the Frame Descriptor section in the hardware * documentation. */ #define DPNI_FS_OPT_SET_FLC 0x2 /** - * Indicates whether the 6 lowest significant bits of FLC are used for stash - * control. If set, the 6 least significant bits in value are interpreted as - * follows: + * DPNI_FS_OPT_SET_STASH_CONTROL - Indicates whether the 6 lowest significant + * bits of FLC are used for stash control. If set, the 6 least significant bits + * in value are interpreted as follows: * - bits 0-1: indicates the number of 64 byte units of context that are * stashed. FLC value is interpreted as a memory address in this case, * excluding the 6 LS bits. @@ -1068,7 +1046,7 @@ int dpni_get_api_version(struct fsl_mc_io *mc_io, u16 *major_ver, u16 *minor_ver); /** - * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration + * struct dpni_tx_shaping_cfg - Structure representing DPNI tx shaping configuration * @rate_limit: Rate in Mbps * @max_burst_size: Burst size in bytes (up to 64KB) */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.h b/drivers/net/ethernet/freescale/dpaa2/dprtc.h index 05c413719e55..01d77c685a5b 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dprtc.h +++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.h @@ -13,9 +13,6 @@ struct fsl_mc_io; -/** - * Number of irq's - */ #define DPRTC_MAX_IRQ_NUM 1 #define DPRTC_IRQ_INDEX 0 diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h index 450841cc6ca8..cb13e740f72b 100644 --- a/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h @@ -1,16 +1,18 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright 2017-2020 NXP + * Copyright 2017-2021 NXP * */ #ifndef __FSL_DPSW_CMD_H #define __FSL_DPSW_CMD_H +#include "dpsw.h" + /* DPSW Version */ #define DPSW_VER_MAJOR 8 -#define DPSW_VER_MINOR 5 +#define DPSW_VER_MINOR 9 #define DPSW_CMD_BASE_VERSION 1 #define DPSW_CMD_VERSION_2 2 @@ -27,7 +29,7 @@ #define DPSW_CMDID_ENABLE DPSW_CMD_ID(0x002) #define DPSW_CMDID_DISABLE DPSW_CMD_ID(0x003) -#define DPSW_CMDID_GET_ATTR DPSW_CMD_ID(0x004) +#define DPSW_CMDID_GET_ATTR DPSW_CMD_V2(0x004) #define DPSW_CMDID_RESET DPSW_CMD_ID(0x005) #define DPSW_CMDID_SET_IRQ_ENABLE DPSW_CMD_ID(0x012) @@ -45,18 +47,18 @@ #define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D) #define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E) +#define DPSW_CMDID_IF_GET_ATTR DPSW_CMD_ID(0x042) + #define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x044) #define DPSW_CMDID_IF_GET_LINK_STATE DPSW_CMD_ID(0x046) -#define DPSW_CMDID_IF_SET_FLOODING DPSW_CMD_ID(0x047) -#define DPSW_CMDID_IF_SET_BROADCAST DPSW_CMD_ID(0x048) #define DPSW_CMDID_IF_GET_TCI DPSW_CMD_ID(0x04A) #define DPSW_CMDID_IF_SET_LINK_CFG DPSW_CMD_ID(0x04C) #define DPSW_CMDID_VLAN_ADD DPSW_CMD_ID(0x060) -#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_ID(0x061) +#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_V2(0x061) #define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED DPSW_CMD_ID(0x062) #define DPSW_CMDID_VLAN_REMOVE_IF DPSW_CMD_ID(0x064) @@ -64,16 +66,31 @@ #define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING DPSW_CMD_ID(0x066) #define DPSW_CMDID_VLAN_REMOVE DPSW_CMD_ID(0x067) +#define DPSW_CMDID_FDB_ADD DPSW_CMD_ID(0x082) +#define DPSW_CMDID_FDB_REMOVE DPSW_CMD_ID(0x083) #define DPSW_CMDID_FDB_ADD_UNICAST DPSW_CMD_ID(0x084) #define DPSW_CMDID_FDB_REMOVE_UNICAST DPSW_CMD_ID(0x085) #define DPSW_CMDID_FDB_ADD_MULTICAST DPSW_CMD_ID(0x086) #define DPSW_CMDID_FDB_REMOVE_MULTICAST DPSW_CMD_ID(0x087) -#define DPSW_CMDID_FDB_SET_LEARNING_MODE DPSW_CMD_ID(0x088) #define DPSW_CMDID_FDB_DUMP DPSW_CMD_ID(0x08A) +#define DPSW_CMDID_ACL_ADD DPSW_CMD_ID(0x090) +#define DPSW_CMDID_ACL_REMOVE DPSW_CMD_ID(0x091) +#define DPSW_CMDID_ACL_ADD_ENTRY DPSW_CMD_ID(0x092) +#define DPSW_CMDID_ACL_REMOVE_ENTRY DPSW_CMD_ID(0x093) +#define DPSW_CMDID_ACL_ADD_IF DPSW_CMD_ID(0x094) +#define DPSW_CMDID_ACL_REMOVE_IF DPSW_CMD_ID(0x095) + #define DPSW_CMDID_IF_GET_PORT_MAC_ADDR DPSW_CMD_ID(0x0A7) -#define DPSW_CMDID_IF_GET_PRIMARY_MAC_ADDR DPSW_CMD_ID(0x0A8) -#define DPSW_CMDID_IF_SET_PRIMARY_MAC_ADDR DPSW_CMD_ID(0x0A9) + +#define DPSW_CMDID_CTRL_IF_GET_ATTR DPSW_CMD_ID(0x0A0) +#define DPSW_CMDID_CTRL_IF_SET_POOLS DPSW_CMD_ID(0x0A1) +#define DPSW_CMDID_CTRL_IF_ENABLE DPSW_CMD_ID(0x0A2) +#define DPSW_CMDID_CTRL_IF_DISABLE DPSW_CMD_ID(0x0A3) +#define DPSW_CMDID_CTRL_IF_SET_QUEUE DPSW_CMD_ID(0x0A6) + +#define DPSW_CMDID_SET_EGRESS_FLOOD DPSW_CMD_ID(0x0AC) +#define DPSW_CMDID_IF_SET_LEARNING_MODE DPSW_CMD_ID(0x0AD) /* Macros for accessing command fields smaller than 1byte */ #define DPSW_MASK(field) \ @@ -169,6 +186,12 @@ struct dpsw_cmd_clear_irq_status { #define DPSW_COMPONENT_TYPE_SHIFT 0 #define DPSW_COMPONENT_TYPE_SIZE 4 +#define DPSW_FLOODING_CFG_SHIFT 0 +#define DPSW_FLOODING_CFG_SIZE 4 + +#define DPSW_BROADCAST_CFG_SHIFT 4 +#define DPSW_BROADCAST_CFG_SIZE 4 + struct dpsw_rsp_get_attr { /* cmd word 0 */ __le16 num_ifs; @@ -186,23 +209,15 @@ struct dpsw_rsp_get_attr { u8 max_meters_per_if; /* from LSB only the first 4 bits */ u8 component_type; - __le16 pad; + /* [0:3] - flooding configuration + * [4:7] - broadcast configuration + */ + u8 repl_cfg; + u8 pad; /* cmd word 3 */ __le64 options; }; -struct dpsw_cmd_if_set_flooding { - __le16 if_id; - /* from LSB: enable:1 */ - u8 enable; -}; - -struct dpsw_cmd_if_set_broadcast { - __le16 if_id; - /* from LSB: enable:1 */ - u8 enable; -}; - #define DPSW_VLAN_ID_SHIFT 0 #define DPSW_VLAN_ID_SIZE 12 #define DPSW_DEI_SHIFT 12 @@ -255,6 +270,28 @@ struct dpsw_cmd_if { __le16 if_id; }; +#define DPSW_ADMIT_UNTAGGED_SHIFT 0 +#define DPSW_ADMIT_UNTAGGED_SIZE 4 +#define DPSW_ENABLED_SHIFT 5 +#define DPSW_ENABLED_SIZE 1 +#define DPSW_ACCEPT_ALL_VLAN_SHIFT 6 +#define DPSW_ACCEPT_ALL_VLAN_SIZE 1 + +struct dpsw_rsp_if_get_attr { + /* cmd word 0 */ + /* from LSB: admit_untagged:4 enabled:1 accept_all_vlan:1 */ + u8 conf; + u8 pad1; + u8 num_tcs; + u8 pad2; + __le16 qdid; + /* cmd word 1 */ + __le32 options; + __le32 pad3; + /* cmd word 2 */ + __le32 rate; +}; + struct dpsw_cmd_if_set_max_frame_length { __le16 if_id; __le16 frame_length; @@ -295,13 +332,23 @@ struct dpsw_vlan_add { __le16 vlan_id; }; +struct dpsw_cmd_vlan_add_if { + /* cmd word 0 */ + __le16 options; + __le16 vlan_id; + __le16 fdb_id; + __le16 pad0; + /* cmd word 1-4 */ + __le64 if_id; +}; + struct dpsw_cmd_vlan_manage_if { /* cmd word 0 */ __le16 pad0; __le16 vlan_id; __le32 pad1; /* cmd word 1-4 */ - __le64 if_id[4]; + __le64 if_id; }; struct dpsw_cmd_vlan_remove { @@ -311,7 +358,7 @@ struct dpsw_cmd_vlan_remove { struct dpsw_cmd_fdb_add { __le32 pad; - __le16 fdb_aging_time; + __le16 fdb_ageing_time; __le16 num_fdb_entries; }; @@ -347,16 +394,7 @@ struct dpsw_cmd_fdb_multicast_op { u8 mac_addr[6]; __le16 pad2; /* cmd word 2-5 */ - __le64 if_id[4]; -}; - -#define DPSW_LEARNING_MODE_SHIFT 0 -#define DPSW_LEARNING_MODE_SIZE 4 - -struct dpsw_cmd_fdb_set_learning_mode { - __le16 fdb_id; - /* only the first 4 bits from LSB */ - u8 mode; + __le64 if_id; }; struct dpsw_cmd_fdb_dump { @@ -371,6 +409,36 @@ struct dpsw_rsp_fdb_dump { __le16 num_entries; }; +struct dpsw_rsp_ctrl_if_get_attr { + __le64 pad; + __le32 rx_fqid; + __le32 rx_err_fqid; + __le32 tx_err_conf_fqid; +}; + +#define DPSW_BACKUP_POOL(val, order) (((val) & 0x1) << (order)) +struct dpsw_cmd_ctrl_if_set_pools { + u8 num_dpbp; + u8 backup_pool_mask; + __le16 pad; + __le32 dpbp_id[DPSW_MAX_DPBP]; + __le16 buffer_size[DPSW_MAX_DPBP]; +}; + +#define DPSW_DEST_TYPE_SHIFT 0 +#define DPSW_DEST_TYPE_SIZE 4 + +struct dpsw_cmd_ctrl_if_set_queue { + __le32 dest_id; + u8 dest_priority; + u8 pad; + /* from LSB: dest_type:4 */ + u8 dest_type; + u8 qtype; + __le64 user_ctx; + __le32 options; +}; + struct dpsw_rsp_get_api_version { __le16 version_major; __le16 version_minor; @@ -381,10 +449,89 @@ struct dpsw_rsp_if_get_mac_addr { u8 mac_addr[6]; }; -struct dpsw_cmd_if_set_mac_addr { +struct dpsw_cmd_set_egress_flood { + __le16 fdb_id; + u8 flood_type; + u8 pad[5]; + __le64 if_id; +}; + +#define DPSW_LEARNING_MODE_SHIFT 0 +#define DPSW_LEARNING_MODE_SIZE 4 + +struct dpsw_cmd_if_set_learning_mode { __le16 if_id; - u8 mac_addr[6]; + /* only the first 4 bits from LSB */ + u8 mode; +}; + +struct dpsw_cmd_acl_add { + __le16 pad; + __le16 max_entries; +}; + +struct dpsw_rsp_acl_add { + __le16 acl_id; +}; + +struct dpsw_cmd_acl_remove { + __le16 acl_id; +}; + +struct dpsw_cmd_acl_if { + __le16 acl_id; + __le16 num_ifs; + __le32 pad; + __le64 if_id; +}; + +struct dpsw_prep_acl_entry { + u8 match_l2_dest_mac[6]; + __le16 match_l2_tpid; + + u8 match_l2_source_mac[6]; + __le16 match_l2_vlan_id; + + __le32 match_l3_dest_ip; + __le32 match_l3_source_ip; + + __le16 match_l4_dest_port; + __le16 match_l4_source_port; + __le16 match_l2_ether_type; + u8 match_l2_pcp_dei; + u8 match_l3_dscp; + + u8 mask_l2_dest_mac[6]; + __le16 mask_l2_tpid; + + u8 mask_l2_source_mac[6]; + __le16 mask_l2_vlan_id; + + __le32 mask_l3_dest_ip; + __le32 mask_l3_source_ip; + + __le16 mask_l4_dest_port; + __le16 mask_l4_source_port; + __le16 mask_l2_ether_type; + u8 mask_l2_pcp_dei; + u8 mask_l3_dscp; + + u8 match_l3_protocol; + u8 mask_l3_protocol; }; +#define DPSW_RESULT_ACTION_SHIFT 0 +#define DPSW_RESULT_ACTION_SIZE 4 + +struct dpsw_cmd_acl_entry { + __le16 acl_id; + __le16 result_if_id; + __le32 precedence; + /* from LSB only the first 4 bits */ + u8 result_action; + u8 pad[7]; + __le64 pad2[4]; + __le64 key_iova; +}; #pragma pack(pop) #endif /* __FSL_DPSW_CMD_H */ diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw.c b/drivers/net/ethernet/freescale/dpaa2/dpsw.c index f8bfe779bd30..6352d6d1ecba 100644 --- a/drivers/staging/fsl-dpaa2/ethsw/dpsw.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright 2017-2018 NXP + * Copyright 2017-2021 NXP * */ @@ -9,9 +9,7 @@ #include "dpsw.h" #include "dpsw-cmd.h" -static void build_if_id_bitmap(__le64 *bmap, - const u16 *id, - const u16 num_ifs) +static void build_if_id_bitmap(__le64 *bmap, const u16 *id, const u16 num_ifs) { int i; @@ -38,10 +36,7 @@ static void build_if_id_bitmap(__le64 *bmap, * * Return: '0' on Success; Error code otherwise. */ -int dpsw_open(struct fsl_mc_io *mc_io, - u32 cmd_flags, - int dpsw_id, - u16 *token) +int dpsw_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpsw_id, u16 *token) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_open *cmd_params; @@ -76,9 +71,7 @@ int dpsw_open(struct fsl_mc_io *mc_io, * * Return: '0' on Success; Error code otherwise. */ -int dpsw_close(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token) +int dpsw_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) { struct fsl_mc_command cmd = { 0 }; @@ -99,9 +92,7 @@ int dpsw_close(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_enable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token) +int dpsw_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) { struct fsl_mc_command cmd = { 0 }; @@ -122,9 +113,7 @@ int dpsw_enable(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_disable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token) +int dpsw_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) { struct fsl_mc_command cmd = { 0 }; @@ -145,9 +134,7 @@ int dpsw_disable(struct fsl_mc_io *mc_io, * * Return: '0' on Success; Error code otherwise. */ -int dpsw_reset(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token) +int dpsw_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) { struct fsl_mc_command cmd = { 0 }; @@ -175,11 +162,8 @@ int dpsw_reset(struct fsl_mc_io *mc_io, * * Return: '0' on Success; Error code otherwise. */ -int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u8 en) +int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 irq_index, u8 en) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_set_irq_enable *cmd_params; @@ -212,11 +196,8 @@ int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, * * Return: '0' on Success; Error code otherwise. */ -int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 mask) +int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 irq_index, u32 mask) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_set_irq_mask *cmd_params; @@ -245,11 +226,8 @@ int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, * * Return: '0' on Success; Error code otherwise. */ -int dpsw_get_irq_status(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 *status) +int dpsw_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 irq_index, u32 *status) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_get_irq_status *cmd_params; @@ -288,11 +266,8 @@ int dpsw_get_irq_status(struct fsl_mc_io *mc_io, * * Return: '0' on Success; Error code otherwise. */ -int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 status) +int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 irq_index, u32 status) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_clear_irq_status *cmd_params; @@ -318,9 +293,7 @@ int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_get_attributes(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, +int dpsw_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, struct dpsw_attr *attr) { struct fsl_mc_command cmd = { 0 }; @@ -351,9 +324,9 @@ int dpsw_get_attributes(struct fsl_mc_io *mc_io, attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups); attr->max_meters_per_if = rsp_params->max_meters_per_if; attr->options = le64_to_cpu(rsp_params->options); - attr->component_type = dpsw_get_field(rsp_params->component_type, - COMPONENT_TYPE); - + attr->component_type = dpsw_get_field(rsp_params->component_type, COMPONENT_TYPE); + attr->flooding_cfg = dpsw_get_field(rsp_params->repl_cfg, FLOODING_CFG); + attr->broadcast_cfg = dpsw_get_field(rsp_params->repl_cfg, BROADCAST_CFG); return 0; } @@ -367,10 +340,7 @@ int dpsw_get_attributes(struct fsl_mc_io *mc_io, * * Return: '0' on Success; Error code otherwise. */ -int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, +int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id, struct dpsw_link_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; @@ -397,13 +367,10 @@ int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, * @if_id: Interface id * @state: Link state 1 - linkup, 0 - link down or disconnected * - * @Return '0' on Success; Error code otherwise. + * Return: '0' on Success; Error code otherwise. */ -int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - struct dpsw_link_state *state) +int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, struct dpsw_link_state *state) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_if_get_link_state *cmd_params; @@ -432,68 +399,6 @@ int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, } /** - * dpsw_if_set_flooding() - Enable Disable flooding for particular interface - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPSW object - * @if_id: Interface Identifier - * @en: 1 - enable, 0 - disable - * - * Return: Completion status. '0' on Success; Error code otherwise. - */ -int dpsw_if_set_flooding(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - u8 en) -{ - struct fsl_mc_command cmd = { 0 }; - struct dpsw_cmd_if_set_flooding *cmd_params; - - /* prepare command */ - cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING, - cmd_flags, - token); - cmd_params = (struct dpsw_cmd_if_set_flooding *)cmd.params; - cmd_params->if_id = cpu_to_le16(if_id); - dpsw_set_field(cmd_params->enable, ENABLE, en); - - /* send command to mc*/ - return mc_send_command(mc_io, &cmd); -} - -/** - * dpsw_if_set_broadcast() - Enable/disable broadcast for particular interface - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPSW object - * @if_id: Interface Identifier - * @en: 1 - enable, 0 - disable - * - * Return: Completion status. '0' on Success; Error code otherwise. - */ -int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - u8 en) -{ - struct fsl_mc_command cmd = { 0 }; - struct dpsw_cmd_if_set_broadcast *cmd_params; - - /* prepare command */ - cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_BROADCAST, - cmd_flags, - token); - cmd_params = (struct dpsw_cmd_if_set_broadcast *)cmd.params; - cmd_params->if_id = cpu_to_le16(if_id); - dpsw_set_field(cmd_params->enable, ENABLE, en); - - /* send command to mc*/ - return mc_send_command(mc_io, &cmd); -} - -/** * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI) * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' @@ -503,10 +408,7 @@ int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_if_set_tci(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, +int dpsw_if_set_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id, const struct dpsw_tci_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; @@ -538,10 +440,7 @@ int dpsw_if_set_tci(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_if_get_tci(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, +int dpsw_if_get_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id, struct dpsw_tci_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; @@ -583,10 +482,7 @@ int dpsw_if_get_tci(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_if_set_stp(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, +int dpsw_if_set_stp(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id, const struct dpsw_stp_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; @@ -616,12 +512,8 @@ int dpsw_if_set_stp(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_if_get_counter(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - enum dpsw_counter type, - u64 *counter) +int dpsw_if_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, enum dpsw_counter type, u64 *counter) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_if_get_counter *cmd_params; @@ -657,10 +549,7 @@ int dpsw_if_get_counter(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_if_enable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id) +int dpsw_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_if *cmd_params; @@ -685,10 +574,7 @@ int dpsw_if_enable(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_if_disable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id) +int dpsw_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_if *cmd_params; @@ -705,6 +591,47 @@ int dpsw_if_disable(struct fsl_mc_io *mc_io, } /** + * dpsw_if_get_attributes() - Function obtains attributes of interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @attr: Returned interface attributes + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, struct dpsw_if_attr *attr) +{ + struct dpsw_rsp_if_get_attr *rsp_params; + struct fsl_mc_command cmd = { 0 }; + struct dpsw_cmd_if *cmd_params; + int err; + + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_ATTR, cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpsw_rsp_if_get_attr *)cmd.params; + attr->num_tcs = rsp_params->num_tcs; + attr->rate = le32_to_cpu(rsp_params->rate); + attr->options = le32_to_cpu(rsp_params->options); + attr->qdid = le16_to_cpu(rsp_params->qdid); + attr->enabled = dpsw_get_field(rsp_params->conf, ENABLED); + attr->accept_all_vlan = dpsw_get_field(rsp_params->conf, + ACCEPT_ALL_VLAN); + attr->admit_untagged = dpsw_get_field(rsp_params->conf, + ADMIT_UNTAGGED); + + return 0; +} + +/** * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length. * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' @@ -714,11 +641,8 @@ int dpsw_if_disable(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - u16 frame_length) +int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, u16 frame_length) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_if_set_max_frame_length *cmd_params; @@ -752,11 +676,8 @@ int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_vlan_add(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 vlan_id, - const struct dpsw_vlan_cfg *cfg) +int dpsw_vlan_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 vlan_id, const struct dpsw_vlan_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; struct dpsw_vlan_add *cmd_params; @@ -788,22 +709,21 @@ int dpsw_vlan_add(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 vlan_id, - const struct dpsw_vlan_if_cfg *cfg) +int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg) { + struct dpsw_cmd_vlan_add_if *cmd_params; struct fsl_mc_command cmd = { 0 }; - struct dpsw_cmd_vlan_manage_if *cmd_params; /* prepare command */ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF, cmd_flags, token); - cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params; + cmd_params = (struct dpsw_cmd_vlan_add_if *)cmd.params; cmd_params->vlan_id = cpu_to_le16(vlan_id); - build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); + cmd_params->options = cpu_to_le16(cfg->options); + cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id); + build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs); /* send command to mc*/ return mc_send_command(mc_io, &cmd); @@ -826,11 +746,8 @@ int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 vlan_id, - const struct dpsw_vlan_if_cfg *cfg) +int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_vlan_manage_if *cmd_params; @@ -841,7 +758,7 @@ int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, token); cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params; cmd_params->vlan_id = cpu_to_le16(vlan_id); - build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); + build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs); /* send command to mc*/ return mc_send_command(mc_io, &cmd); @@ -860,11 +777,8 @@ int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 vlan_id, - const struct dpsw_vlan_if_cfg *cfg) +int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_vlan_manage_if *cmd_params; @@ -875,7 +789,7 @@ int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, token); cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params; cmd_params->vlan_id = cpu_to_le16(vlan_id); - build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); + build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs); /* send command to mc*/ return mc_send_command(mc_io, &cmd); @@ -896,11 +810,8 @@ int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 vlan_id, - const struct dpsw_vlan_if_cfg *cfg) +int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_vlan_manage_if *cmd_params; @@ -911,7 +822,7 @@ int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, token); cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params; cmd_params->vlan_id = cpu_to_le16(vlan_id); - build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); + build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs); /* send command to mc*/ return mc_send_command(mc_io, &cmd); @@ -926,9 +837,7 @@ int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_vlan_remove(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, +int dpsw_vlan_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 vlan_id) { struct fsl_mc_command cmd = { 0 }; @@ -946,6 +855,66 @@ int dpsw_vlan_remove(struct fsl_mc_io *mc_io, } /** + * dpsw_fdb_add() - Add FDB to switch and Returns handle to FDB table for + * the reference + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @fdb_id: Returned Forwarding Database Identifier + * @cfg: FDB Configuration + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_fdb_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 *fdb_id, + const struct dpsw_fdb_cfg *cfg) +{ + struct dpsw_cmd_fdb_add *cmd_params; + struct dpsw_rsp_fdb_add *rsp_params; + struct fsl_mc_command cmd = { 0 }; + int err; + + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_fdb_add *)cmd.params; + cmd_params->fdb_ageing_time = cpu_to_le16(cfg->fdb_ageing_time); + cmd_params->num_fdb_entries = cpu_to_le16(cfg->num_fdb_entries); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpsw_rsp_fdb_add *)cmd.params; + *fdb_id = le16_to_cpu(rsp_params->fdb_id); + + return 0; +} + +/** + * dpsw_fdb_remove() - Remove FDB from switch + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @fdb_id: Forwarding Database Identifier + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_fdb_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id) +{ + struct dpsw_cmd_fdb_remove *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_fdb_remove *)cmd.params; + cmd_params->fdb_id = cpu_to_le16(fdb_id); + + return mc_send_command(mc_io, &cmd); +} + +/** * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' @@ -955,11 +924,8 @@ int dpsw_vlan_remove(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - const struct dpsw_fdb_unicast_cfg *cfg) +int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_fdb_unicast_op *cmd_params; @@ -998,13 +964,8 @@ int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, * The struct fdb_dump_entry array must be parsed until the end of memory * area or until an entry with mac_addr set to zero is found. */ -int dpsw_fdb_dump(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - u64 iova_addr, - u32 iova_size, - u16 *num_entries) +int dpsw_fdb_dump(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id, + u64 iova_addr, u32 iova_size, u16 *num_entries) { struct dpsw_cmd_fdb_dump *cmd_params; struct dpsw_rsp_fdb_dump *rsp_params; @@ -1041,11 +1002,8 @@ int dpsw_fdb_dump(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - const struct dpsw_fdb_unicast_cfg *cfg) +int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_fdb_unicast_op *cmd_params; @@ -1083,11 +1041,8 @@ int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - const struct dpsw_fdb_multicast_cfg *cfg) +int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_fdb_multicast_op *cmd_params; @@ -1101,7 +1056,7 @@ int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, cmd_params->fdb_id = cpu_to_le16(fdb_id); cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs); dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type); - build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); + build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs); for (i = 0; i < 6; i++) cmd_params->mac_addr[i] = cfg->mac_addr[5 - i]; @@ -1125,11 +1080,8 @@ int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - const struct dpsw_fdb_multicast_cfg *cfg) +int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_fdb_multicast_op *cmd_params; @@ -1143,7 +1095,7 @@ int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, cmd_params->fdb_id = cpu_to_le16(fdb_id); cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs); dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type); - build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); + build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs); for (i = 0; i < 6; i++) cmd_params->mac_addr[i] = cfg->mac_addr[5 - i]; @@ -1152,33 +1104,97 @@ int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, } /** - * dpsw_fdb_set_learning_mode() - Define FDB learning mode + * dpsw_ctrl_if_get_attributes() - Obtain control interface attributes * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPSW object - * @fdb_id: Forwarding Database Identifier - * @mode: Learning mode + * @attr: Returned control interface attributes * - * Return: Completion status. '0' on Success; Error code otherwise. + * Return: '0' on Success; Error code otherwise. */ -int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - enum dpsw_fdb_learning_mode mode) +int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, + u16 token, struct dpsw_ctrl_if_attr *attr) { + struct dpsw_rsp_ctrl_if_get_attr *rsp_params; struct fsl_mc_command cmd = { 0 }; - struct dpsw_cmd_fdb_set_learning_mode *cmd_params; + int err; - /* prepare command */ - cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_SET_LEARNING_MODE, + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_GET_ATTR, + cmd_flags, token); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpsw_rsp_ctrl_if_get_attr *)cmd.params; + attr->rx_fqid = le32_to_cpu(rsp_params->rx_fqid); + attr->rx_err_fqid = le32_to_cpu(rsp_params->rx_err_fqid); + attr->tx_err_conf_fqid = le32_to_cpu(rsp_params->tx_err_conf_fqid); + + return 0; +} + +/** + * dpsw_ctrl_if_set_pools() - Set control interface buffer pools + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @cfg: Buffer pools configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + const struct dpsw_ctrl_if_pools_cfg *cfg) +{ + struct dpsw_cmd_ctrl_if_set_pools *cmd_params; + struct fsl_mc_command cmd = { 0 }; + int i; + + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_POOLS, + cmd_flags, token); + cmd_params = (struct dpsw_cmd_ctrl_if_set_pools *)cmd.params; + cmd_params->num_dpbp = cfg->num_dpbp; + for (i = 0; i < DPSW_MAX_DPBP; i++) { + cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id); + cmd_params->buffer_size[i] = + cpu_to_le16(cfg->pools[i].buffer_size); + cmd_params->backup_pool_mask |= + DPSW_BACKUP_POOL(cfg->pools[i].backup_pool, i); + } + + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_ctrl_if_set_queue() - Set Rx queue configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of dpsw object + * @qtype: dpsw_queue_type of the targeted queue + * @cfg: Rx queue configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_ctrl_if_set_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + enum dpsw_queue_type qtype, + const struct dpsw_ctrl_if_queue_cfg *cfg) +{ + struct dpsw_cmd_ctrl_if_set_queue *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_QUEUE, cmd_flags, token); - cmd_params = (struct dpsw_cmd_fdb_set_learning_mode *)cmd.params; - cmd_params->fdb_id = cpu_to_le16(fdb_id); - dpsw_set_field(cmd_params->mode, LEARNING_MODE, mode); + cmd_params = (struct dpsw_cmd_ctrl_if_set_queue *)cmd.params; + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); + cmd_params->dest_priority = cfg->dest_cfg.priority; + cmd_params->qtype = qtype; + cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx); + cmd_params->options = cpu_to_le32(cfg->options); + dpsw_set_field(cmd_params->dest_type, + DEST_TYPE, + cfg->dest_cfg.dest_type); - /* send command to mc*/ return mc_send_command(mc_io, &cmd); } @@ -1191,10 +1207,8 @@ int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io, * * Return: '0' on Success; Error code otherwise. */ -int dpsw_get_api_version(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 *major_ver, - u16 *minor_ver) +int dpsw_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags, + u16 *major_ver, u16 *minor_ver) { struct fsl_mc_command cmd = { 0 }; struct dpsw_rsp_get_api_version *rsp_params; @@ -1216,7 +1230,7 @@ int dpsw_get_api_version(struct fsl_mc_io *mc_io, } /** - * dpsw_if_get_port_mac_addr() + * dpsw_if_get_port_mac_addr() - Retrieve MAC address associated to the physical port * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPSW object @@ -1254,68 +1268,313 @@ int dpsw_if_get_port_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, } /** - * dpsw_if_get_primary_mac_addr() + * dpsw_ctrl_if_enable() - Enable control interface * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPSW object - * @if_id: Interface Identifier - * @mac_addr: MAC address of the physical port, if any, otherwise 0 + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_ENABLE, cmd_flags, + token); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_ctrl_if_disable() - Function disables control interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_DISABLE, + cmd_flags, + token); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_set_egress_flood() - Set egress parameters associated with an FDB ID + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @cfg: Egress flooding configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_set_egress_flood(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + const struct dpsw_egress_flood_cfg *cfg) +{ + struct dpsw_cmd_set_egress_flood *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_EGRESS_FLOOD, cmd_flags, token); + cmd_params = (struct dpsw_cmd_set_egress_flood *)cmd.params; + cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id); + cmd_params->flood_type = cfg->flood_type; + build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_if_set_learning_mode() - Configure the learning mode on an interface. + * If this API is used, it will take precedence over the FDB configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: InterfaceID + * @mode: Learning mode * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_if_get_primary_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags, - u16 token, u16 if_id, u8 mac_addr[6]) +int dpsw_if_set_learning_mode(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, enum dpsw_learning_mode mode) { - struct dpsw_rsp_if_get_mac_addr *rsp_params; + struct dpsw_cmd_if_set_learning_mode *cmd_params; struct fsl_mc_command cmd = { 0 }; - struct dpsw_cmd_if *cmd_params; - int err, i; - /* prepare command */ - cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_PRIMARY_MAC_ADDR, + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LEARNING_MODE, cmd_flags, token); - cmd_params = (struct dpsw_cmd_if *)cmd.params; + cmd_params = (struct dpsw_cmd_if_set_learning_mode *)cmd.params; cmd_params->if_id = cpu_to_le16(if_id); + dpsw_set_field(cmd_params->mode, LEARNING_MODE, mode); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_acl_add() - Create an ACL table + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @acl_id: Returned ACL ID, for future references + * @cfg: ACL configuration + * + * Create Access Control List table. Multiple ACLs can be created and + * co-exist in L2 switch + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_acl_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 *acl_id, + const struct dpsw_acl_cfg *cfg) +{ + struct dpsw_cmd_acl_add *cmd_params; + struct dpsw_rsp_acl_add *rsp_params; + struct fsl_mc_command cmd = { 0 }; + int err; + + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD, cmd_flags, token); + cmd_params = (struct dpsw_cmd_acl_add *)cmd.params; + cmd_params->max_entries = cpu_to_le16(cfg->max_entries); - /* send command to mc*/ err = mc_send_command(mc_io, &cmd); if (err) return err; - /* retrieve response parameters */ - rsp_params = (struct dpsw_rsp_if_get_mac_addr *)cmd.params; - for (i = 0; i < 6; i++) - mac_addr[5 - i] = rsp_params->mac_addr[i]; + rsp_params = (struct dpsw_rsp_acl_add *)cmd.params; + *acl_id = le16_to_cpu(rsp_params->acl_id); return 0; } /** - * dpsw_if_set_primary_mac_addr() + * dpsw_acl_remove() - Remove an ACL table from L2 switch. * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPSW object - * @if_id: Interface Identifier - * @mac_addr: MAC address of the physical port, if any, otherwise 0 + * @acl_id: ACL ID * - * Return: Completion status. '0' on Success; Error code otherwise. + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_acl_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 acl_id) +{ + struct dpsw_cmd_acl_remove *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE, cmd_flags, + token); + cmd_params = (struct dpsw_cmd_acl_remove *)cmd.params; + cmd_params->acl_id = cpu_to_le16(acl_id); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_acl_add_if() - Associate interface/interfaces with an ACL table. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @acl_id: ACL ID + * @cfg: Interfaces list + * + * Return: '0' on Success; Error code otherwise. */ -int dpsw_if_set_primary_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags, - u16 token, u16 if_id, u8 mac_addr[6]) +int dpsw_acl_add_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 acl_id, const struct dpsw_acl_if_cfg *cfg) { - struct dpsw_cmd_if_set_mac_addr *cmd_params; + struct dpsw_cmd_acl_if *cmd_params; struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_IF, cmd_flags, + token); + cmd_params = (struct dpsw_cmd_acl_if *)cmd.params; + cmd_params->acl_id = cpu_to_le16(acl_id); + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs); + build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_acl_remove_if() - De-associate interface/interfaces from an ACL table + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @acl_id: ACL ID + * @cfg: Interfaces list + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_acl_remove_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 acl_id, const struct dpsw_acl_if_cfg *cfg) +{ + struct dpsw_cmd_acl_if *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_IF, cmd_flags, + token); + cmd_params = (struct dpsw_cmd_acl_if *)cmd.params; + cmd_params->acl_id = cpu_to_le16(acl_id); + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs); + build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_acl_prepare_entry_cfg() - Setup an ACL entry + * @key: Key + * @entry_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA + * + * This function has to be called before adding or removing acl_entry + * + */ +void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key, + u8 *entry_cfg_buf) +{ + struct dpsw_prep_acl_entry *ext_params; int i; + ext_params = (struct dpsw_prep_acl_entry *)entry_cfg_buf; + + for (i = 0; i < 6; i++) { + ext_params->match_l2_dest_mac[i] = key->match.l2_dest_mac[5 - i]; + ext_params->match_l2_source_mac[i] = key->match.l2_source_mac[5 - i]; + ext_params->mask_l2_dest_mac[i] = key->mask.l2_dest_mac[5 - i]; + ext_params->mask_l2_source_mac[i] = key->mask.l2_source_mac[5 - i]; + } + + ext_params->match_l2_tpid = cpu_to_le16(key->match.l2_tpid); + ext_params->match_l2_vlan_id = cpu_to_le16(key->match.l2_vlan_id); + ext_params->match_l3_dest_ip = cpu_to_le32(key->match.l3_dest_ip); + ext_params->match_l3_source_ip = cpu_to_le32(key->match.l3_source_ip); + ext_params->match_l4_dest_port = cpu_to_le16(key->match.l4_dest_port); + ext_params->match_l4_source_port = cpu_to_le16(key->match.l4_source_port); + ext_params->match_l2_ether_type = cpu_to_le16(key->match.l2_ether_type); + ext_params->match_l2_pcp_dei = key->match.l2_pcp_dei; + ext_params->match_l3_dscp = key->match.l3_dscp; + + ext_params->mask_l2_tpid = cpu_to_le16(key->mask.l2_tpid); + ext_params->mask_l2_vlan_id = cpu_to_le16(key->mask.l2_vlan_id); + ext_params->mask_l3_dest_ip = cpu_to_le32(key->mask.l3_dest_ip); + ext_params->mask_l3_source_ip = cpu_to_le32(key->mask.l3_source_ip); + ext_params->mask_l4_dest_port = cpu_to_le16(key->mask.l4_dest_port); + ext_params->mask_l4_source_port = cpu_to_le16(key->mask.l4_source_port); + ext_params->mask_l2_ether_type = cpu_to_le16(key->mask.l2_ether_type); + ext_params->mask_l2_pcp_dei = key->mask.l2_pcp_dei; + ext_params->mask_l3_dscp = key->mask.l3_dscp; + ext_params->match_l3_protocol = key->match.l3_protocol; + ext_params->mask_l3_protocol = key->mask.l3_protocol; +} + +/** + * dpsw_acl_add_entry() - Add a rule to the ACL table. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @acl_id: ACL ID + * @cfg: Entry configuration + * + * warning: This function has to be called after dpsw_acl_prepare_entry_cfg() + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 acl_id, const struct dpsw_acl_entry_cfg *cfg) +{ + struct dpsw_cmd_acl_entry *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_ENTRY, cmd_flags, + token); + cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params; + cmd_params->acl_id = cpu_to_le16(acl_id); + cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id); + cmd_params->precedence = cpu_to_le32(cfg->precedence); + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); + dpsw_set_field(cmd_params->result_action, + RESULT_ACTION, + cfg->result.action); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_acl_remove_entry() - Removes an entry from ACL. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @acl_id: ACL ID + * @cfg: Entry configuration + * + * warning: This function has to be called after dpsw_acl_set_entry_cfg() + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 acl_id, const struct dpsw_acl_entry_cfg *cfg) +{ + struct dpsw_cmd_acl_entry *cmd_params; + struct fsl_mc_command cmd = { 0 }; + /* prepare command */ - cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_PRIMARY_MAC_ADDR, + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY, cmd_flags, token); - cmd_params = (struct dpsw_cmd_if_set_mac_addr *)cmd.params; - cmd_params->if_id = cpu_to_le16(if_id); - for (i = 0; i < 6; i++) - cmd_params->mac_addr[i] = mac_addr[5 - i]; + cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params; + cmd_params->acl_id = cpu_to_le16(acl_id); + cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id); + cmd_params->precedence = cpu_to_le32(cfg->precedence); + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); + dpsw_set_field(cmd_params->result_action, + RESULT_ACTION, + cfg->result.action); /* send command to mc*/ return mc_send_command(mc_io, &cmd); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.h b/drivers/net/ethernet/freescale/dpaa2/dpsw.h new file mode 100644 index 000000000000..5ef221a25b02 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.h @@ -0,0 +1,755 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2014-2016 Freescale Semiconductor Inc. + * Copyright 2017-2021 NXP + * + */ + +#ifndef __FSL_DPSW_H +#define __FSL_DPSW_H + +/* Data Path L2-Switch API + * Contains API for handling DPSW topology and functionality + */ + +struct fsl_mc_io; + +/* DPSW general definitions */ + +#define DPSW_MAX_PRIORITIES 8 + +#define DPSW_MAX_IF 64 + +int dpsw_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpsw_id, u16 *token); + +int dpsw_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); + +/* DPSW options */ + +/** + * DPSW_OPT_FLOODING_DIS - Flooding was disabled at device create + */ +#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL +/** + * DPSW_OPT_MULTICAST_DIS - Multicast was disabled at device create + */ +#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL +/** + * DPSW_OPT_CTRL_IF_DIS - Control interface support is disabled + */ +#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL + +/** + * enum dpsw_component_type - component type of a bridge + * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an + * enterprise VLAN bridge or of a Provider Bridge used + * to process C-tagged frames + * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a + * Provider Bridge + * + */ +enum dpsw_component_type { + DPSW_COMPONENT_TYPE_C_VLAN = 0, + DPSW_COMPONENT_TYPE_S_VLAN +}; + +/** + * enum dpsw_flooding_cfg - flooding configuration requested + * @DPSW_FLOODING_PER_VLAN: Flooding replicators are allocated per VLAN and + * interfaces present in each of them can be configured using + * dpsw_vlan_add_if_flooding()/dpsw_vlan_remove_if_flooding(). + * This is the default configuration. + * + * @DPSW_FLOODING_PER_FDB: Flooding replicators are allocated per FDB and + * interfaces present in each of them can be configured using + * dpsw_set_egress_flood(). + */ +enum dpsw_flooding_cfg { + DPSW_FLOODING_PER_VLAN = 0, + DPSW_FLOODING_PER_FDB, +}; + +/** + * enum dpsw_broadcast_cfg - broadcast configuration requested + * @DPSW_BROADCAST_PER_OBJECT: There is only one broadcast replicator per DPSW + * object. This is the default configuration. + * @DPSW_BROADCAST_PER_FDB: Broadcast replicators are allocated per FDB and + * interfaces present in each of them can be configured using + * dpsw_set_egress_flood(). + */ +enum dpsw_broadcast_cfg { + DPSW_BROADCAST_PER_OBJECT = 0, + DPSW_BROADCAST_PER_FDB, +}; + +int dpsw_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); + +int dpsw_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); + +int dpsw_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); + +/* DPSW IRQ Index and Events */ + +#define DPSW_IRQ_INDEX_IF 0x0000 +#define DPSW_IRQ_INDEX_L2SW 0x0001 + +/** + * DPSW_IRQ_EVENT_LINK_CHANGED - Indicates that the link state changed + */ +#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001 + +/** + * struct dpsw_irq_cfg - IRQ configuration + * @addr: Address that must be written to signal a message-based interrupt + * @val: Value to write into irq_addr address + * @irq_num: A user defined number associated with this IRQ + */ +struct dpsw_irq_cfg { + u64 addr; + u32 val; + int irq_num; +}; + +int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 irq_index, u8 en); + +int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 irq_index, u32 mask); + +int dpsw_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 irq_index, u32 *status); + +int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 irq_index, u32 status); + +/** + * struct dpsw_attr - Structure representing DPSW attributes + * @id: DPSW object ID + * @options: Enable/Disable DPSW features + * @max_vlans: Maximum Number of VLANs + * @max_meters_per_if: Number of meters per interface + * @max_fdbs: Maximum Number of FDBs + * @max_fdb_entries: Number of FDB entries for default FDB table; + * 0 - indicates default 1024 entries. + * @fdb_aging_time: Default FDB aging time for default FDB table; + * 0 - indicates default 300 seconds + * @max_fdb_mc_groups: Number of multicast groups in each FDB table; + * 0 - indicates default 32 + * @mem_size: DPSW frame storage memory size + * @num_ifs: Number of interfaces + * @num_vlans: Current number of VLANs + * @num_fdbs: Current number of FDBs + * @component_type: Component type of this bridge + * @flooding_cfg: Flooding configuration (PER_VLAN - default, PER_FDB) + * @broadcast_cfg: Broadcast configuration (PER_OBJECT - default, PER_FDB) + */ +struct dpsw_attr { + int id; + u64 options; + u16 max_vlans; + u8 max_meters_per_if; + u8 max_fdbs; + u16 max_fdb_entries; + u16 fdb_aging_time; + u16 max_fdb_mc_groups; + u16 num_ifs; + u16 mem_size; + u16 num_vlans; + u8 num_fdbs; + enum dpsw_component_type component_type; + enum dpsw_flooding_cfg flooding_cfg; + enum dpsw_broadcast_cfg broadcast_cfg; +}; + +int dpsw_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + struct dpsw_attr *attr); + +/** + * struct dpsw_ctrl_if_attr - Control interface attributes + * @rx_fqid: Receive FQID + * @rx_err_fqid: Receive error FQID + * @tx_err_conf_fqid: Transmit error and confirmation FQID + */ +struct dpsw_ctrl_if_attr { + u32 rx_fqid; + u32 rx_err_fqid; + u32 tx_err_conf_fqid; +}; + +int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, + u16 token, struct dpsw_ctrl_if_attr *attr); + +enum dpsw_queue_type { + DPSW_QUEUE_RX, + DPSW_QUEUE_TX_ERR_CONF, + DPSW_QUEUE_RX_ERR, +}; + +#define DPSW_MAX_DPBP 8 + +/** + * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration + * @num_dpbp: Number of DPBPs + * @pools: Array of buffer pools parameters; The number of valid entries + * must match 'num_dpbp' value + * @pools.dpbp_id: DPBP object ID + * @pools.buffer_size: Buffer size + * @pools.backup_pool: Backup pool + */ +struct dpsw_ctrl_if_pools_cfg { + u8 num_dpbp; + struct { + int dpbp_id; + u16 buffer_size; + int backup_pool; + } pools[DPSW_MAX_DPBP]; +}; + +int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + const struct dpsw_ctrl_if_pools_cfg *cfg); + +#define DPSW_CTRL_IF_QUEUE_OPT_USER_CTX 0x00000001 +#define DPSW_CTRL_IF_QUEUE_OPT_DEST 0x00000002 + +enum dpsw_ctrl_if_dest { + DPSW_CTRL_IF_DEST_NONE = 0, + DPSW_CTRL_IF_DEST_DPIO = 1, +}; + +struct dpsw_ctrl_if_dest_cfg { + enum dpsw_ctrl_if_dest dest_type; + int dest_id; + u8 priority; +}; + +struct dpsw_ctrl_if_queue_cfg { + u32 options; + u64 user_ctx; + struct dpsw_ctrl_if_dest_cfg dest_cfg; +}; + +int dpsw_ctrl_if_set_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + enum dpsw_queue_type qtype, + const struct dpsw_ctrl_if_queue_cfg *cfg); + +int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); + +int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); + +/** + * enum dpsw_action - Action selection for special/control frames + * @DPSW_ACTION_DROP: Drop frame + * @DPSW_ACTION_REDIRECT: Redirect frame to control port + */ +enum dpsw_action { + DPSW_ACTION_DROP = 0, + DPSW_ACTION_REDIRECT = 1 +}; + +#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL +#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL +#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL +#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL + +/** + * struct dpsw_link_cfg - Structure representing DPSW link configuration + * @rate: Rate + * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values + */ +struct dpsw_link_cfg { + u32 rate; + u64 options; +}; + +int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id, + struct dpsw_link_cfg *cfg); + +/** + * struct dpsw_link_state - Structure representing DPSW link state + * @rate: Rate + * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values + * @up: 0 - covers two cases: down and disconnected, 1 - up + */ +struct dpsw_link_state { + u32 rate; + u64 options; + u8 up; +}; + +int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, struct dpsw_link_state *state); + +/** + * struct dpsw_tci_cfg - Tag Control Information (TCI) configuration + * @pcp: Priority Code Point (PCP): a 3-bit field which refers + * to the IEEE 802.1p priority + * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used + * separately or in conjunction with PCP to indicate frames + * eligible to be dropped in the presence of congestion + * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN + * to which the frame belongs. The hexadecimal values + * of 0x000 and 0xFFF are reserved; + * all other values may be used as VLAN identifiers, + * allowing up to 4,094 VLANs + */ +struct dpsw_tci_cfg { + u8 pcp; + u8 dei; + u16 vlan_id; +}; + +int dpsw_if_set_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id, + const struct dpsw_tci_cfg *cfg); + +int dpsw_if_get_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id, + struct dpsw_tci_cfg *cfg); + +/** + * enum dpsw_stp_state - Spanning Tree Protocol (STP) states + * @DPSW_STP_STATE_DISABLED: Disabled state + * @DPSW_STP_STATE_LISTENING: Listening state + * @DPSW_STP_STATE_LEARNING: Learning state + * @DPSW_STP_STATE_FORWARDING: Forwarding state + * @DPSW_STP_STATE_BLOCKING: Blocking state + * + */ +enum dpsw_stp_state { + DPSW_STP_STATE_DISABLED = 0, + DPSW_STP_STATE_LISTENING = 1, + DPSW_STP_STATE_LEARNING = 2, + DPSW_STP_STATE_FORWARDING = 3, + DPSW_STP_STATE_BLOCKING = 0 +}; + +/** + * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration + * @vlan_id: VLAN ID STP state + * @state: STP state + */ +struct dpsw_stp_cfg { + u16 vlan_id; + enum dpsw_stp_state state; +}; + +int dpsw_if_set_stp(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id, + const struct dpsw_stp_cfg *cfg); + +/** + * enum dpsw_accepted_frames - Types of frames to accept + * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and + * priority tagged frames + * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or + * Priority-Tagged frames received on this interface. + * + */ +enum dpsw_accepted_frames { + DPSW_ADMIT_ALL = 1, + DPSW_ADMIT_ONLY_VLAN_TAGGED = 3 +}; + +/** + * enum dpsw_counter - Counters types + * @DPSW_CNT_ING_FRAME: Counts ingress frames + * @DPSW_CNT_ING_BYTE: Counts ingress bytes + * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames + * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame + * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames + * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes + * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames + * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes + * @DPSW_CNT_EGR_FRAME: Counts egress frames + * @DPSW_CNT_EGR_BYTE: Counts egress bytes + * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames + * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames + * @DPSW_CNT_ING_NO_BUFF_DISCARD: Counts ingress no buffer discarded frames + */ +enum dpsw_counter { + DPSW_CNT_ING_FRAME = 0x0, + DPSW_CNT_ING_BYTE = 0x1, + DPSW_CNT_ING_FLTR_FRAME = 0x2, + DPSW_CNT_ING_FRAME_DISCARD = 0x3, + DPSW_CNT_ING_MCAST_FRAME = 0x4, + DPSW_CNT_ING_MCAST_BYTE = 0x5, + DPSW_CNT_ING_BCAST_FRAME = 0x6, + DPSW_CNT_ING_BCAST_BYTES = 0x7, + DPSW_CNT_EGR_FRAME = 0x8, + DPSW_CNT_EGR_BYTE = 0x9, + DPSW_CNT_EGR_FRAME_DISCARD = 0xa, + DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb, + DPSW_CNT_ING_NO_BUFF_DISCARD = 0xc, +}; + +int dpsw_if_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, enum dpsw_counter type, u64 *counter); + +int dpsw_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id); + +int dpsw_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id); + +/** + * struct dpsw_if_attr - Structure representing DPSW interface attributes + * @num_tcs: Number of traffic classes + * @rate: Transmit rate in bits per second + * @options: Interface configuration options (bitmap) + * @enabled: Indicates if interface is enabled + * @accept_all_vlan: The device discards/accepts incoming frames + * for VLANs that do not include this interface + * @admit_untagged: When set to 'DPSW_ADMIT_ONLY_VLAN_TAGGED', the device + * discards untagged frames or priority-tagged frames received on + * this interface; + * When set to 'DPSW_ADMIT_ALL', untagged frames or priority- + * tagged frames received on this interface are accepted + * @qdid: control frames transmit qdid + */ +struct dpsw_if_attr { + u8 num_tcs; + u32 rate; + u32 options; + int enabled; + int accept_all_vlan; + enum dpsw_accepted_frames admit_untagged; + u16 qdid; +}; + +int dpsw_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, struct dpsw_if_attr *attr); + +int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, u16 frame_length); + +/** + * struct dpsw_vlan_cfg - VLAN Configuration + * @fdb_id: Forwarding Data Base + */ +struct dpsw_vlan_cfg { + u16 fdb_id; +}; + +int dpsw_vlan_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 vlan_id, const struct dpsw_vlan_cfg *cfg); + +#define DPSW_VLAN_ADD_IF_OPT_FDB_ID 0x0001 + +/** + * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces + * @num_ifs: The number of interfaces that are assigned to the egress + * list for this VLAN + * @if_id: The set of interfaces that are + * assigned to the egress list for this VLAN + * @options: Options map for this command (DPSW_VLAN_ADD_IF_OPT_FDB_ID) + * @fdb_id: FDB id to be used by this VLAN on these specific interfaces + * (taken into account only if the DPSW_VLAN_ADD_IF_OPT_FDB_ID is + * specified in the options field) + */ +struct dpsw_vlan_if_cfg { + u16 num_ifs; + u16 options; + u16 if_id[DPSW_MAX_IF]; + u16 fdb_id; +}; + +int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg); + +int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg); + +int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg); + +int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg); + +int dpsw_vlan_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 vlan_id); + +/** + * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic + * @DPSW_FDB_ENTRY_STATIC: Static entry + * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry + */ +enum dpsw_fdb_entry_type { + DPSW_FDB_ENTRY_STATIC = 0, + DPSW_FDB_ENTRY_DINAMIC = 1 +}; + +/** + * struct dpsw_fdb_unicast_cfg - Unicast entry configuration + * @type: Select static or dynamic entry + * @mac_addr: MAC address + * @if_egress: Egress interface ID + */ +struct dpsw_fdb_unicast_cfg { + enum dpsw_fdb_entry_type type; + u8 mac_addr[6]; + u16 if_egress; +}; + +int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg); + +int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg); + +#define DPSW_FDB_ENTRY_TYPE_DYNAMIC BIT(0) +#define DPSW_FDB_ENTRY_TYPE_UNICAST BIT(1) + +/** + * struct fdb_dump_entry - fdb snapshot entry + * @mac_addr: MAC address + * @type: bit0 - DINAMIC(1)/STATIC(0), bit1 - UNICAST(1)/MULTICAST(0) + * @if_info: unicast - egress interface, multicast - number of egress interfaces + * @if_mask: multicast - egress interface mask + */ +struct fdb_dump_entry { + u8 mac_addr[6]; + u8 type; + u8 if_info; + u8 if_mask[8]; +}; + +int dpsw_fdb_dump(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id, + u64 iova_addr, u32 iova_size, u16 *num_entries); + +/** + * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration + * @type: Select static or dynamic entry + * @mac_addr: MAC address + * @num_ifs: Number of external and internal interfaces + * @if_id: Egress interface IDs + */ +struct dpsw_fdb_multicast_cfg { + enum dpsw_fdb_entry_type type; + u8 mac_addr[6]; + u16 num_ifs; + u16 if_id[DPSW_MAX_IF]; +}; + +int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg); + +int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg); + +/** + * enum dpsw_learning_mode - Auto-learning modes + * @DPSW_LEARNING_MODE_DIS: Disable Auto-learning + * @DPSW_LEARNING_MODE_HW: Enable HW auto-Learning + * @DPSW_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU + * @DPSW_LEARNING_MODE_SECURE: Enable secure learning by CPU + * + * NONE - SECURE LEARNING + * SMAC found DMAC found CTLU Action + * v v Forward frame to + * 1. DMAC destination + * - v Forward frame to + * 1. DMAC destination + * 2. Control interface + * v - Forward frame to + * 1. Flooding list of interfaces + * - - Forward frame to + * 1. Flooding list of interfaces + * 2. Control interface + * SECURE LEARING + * SMAC found DMAC found CTLU Action + * v v Forward frame to + * 1. DMAC destination + * - v Forward frame to + * 1. Control interface + * v - Forward frame to + * 1. Flooding list of interfaces + * - - Forward frame to + * 1. Control interface + */ +enum dpsw_learning_mode { + DPSW_LEARNING_MODE_DIS = 0, + DPSW_LEARNING_MODE_HW = 1, + DPSW_LEARNING_MODE_NON_SECURE = 2, + DPSW_LEARNING_MODE_SECURE = 3 +}; + +/** + * struct dpsw_fdb_attr - FDB Attributes + * @max_fdb_entries: Number of FDB entries + * @fdb_ageing_time: Ageing time in seconds + * @learning_mode: Learning mode + * @num_fdb_mc_groups: Current number of multicast groups + * @max_fdb_mc_groups: Maximum number of multicast groups + */ +struct dpsw_fdb_attr { + u16 max_fdb_entries; + u16 fdb_ageing_time; + enum dpsw_learning_mode learning_mode; + u16 num_fdb_mc_groups; + u16 max_fdb_mc_groups; +}; + +int dpsw_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags, + u16 *major_ver, u16 *minor_ver); + +int dpsw_if_get_port_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, u8 mac_addr[6]); + +/** + * struct dpsw_fdb_cfg - FDB Configuration + * @num_fdb_entries: Number of FDB entries + * @fdb_ageing_time: Ageing time in seconds + */ +struct dpsw_fdb_cfg { + u16 num_fdb_entries; + u16 fdb_ageing_time; +}; + +int dpsw_fdb_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 *fdb_id, + const struct dpsw_fdb_cfg *cfg); + +int dpsw_fdb_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id); + +/** + * enum dpsw_flood_type - Define the flood type of a DPSW object + * @DPSW_BROADCAST: Broadcast flooding + * @DPSW_FLOODING: Unknown flooding + */ +enum dpsw_flood_type { + DPSW_BROADCAST = 0, + DPSW_FLOODING, +}; + +struct dpsw_egress_flood_cfg { + u16 fdb_id; + enum dpsw_flood_type flood_type; + u16 num_ifs; + u16 if_id[DPSW_MAX_IF]; +}; + +int dpsw_set_egress_flood(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + const struct dpsw_egress_flood_cfg *cfg); + +int dpsw_if_set_learning_mode(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, enum dpsw_learning_mode mode); + +/** + * struct dpsw_acl_cfg - ACL Configuration + * @max_entries: Number of ACL rules + */ +struct dpsw_acl_cfg { + u16 max_entries; +}; + +int dpsw_acl_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 *acl_id, + const struct dpsw_acl_cfg *cfg); + +int dpsw_acl_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 acl_id); + +/** + * struct dpsw_acl_if_cfg - List of interfaces to associate with an ACL table + * @num_ifs: Number of interfaces + * @if_id: List of interfaces + */ +struct dpsw_acl_if_cfg { + u16 num_ifs; + u16 if_id[DPSW_MAX_IF]; +}; + +int dpsw_acl_add_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 acl_id, const struct dpsw_acl_if_cfg *cfg); + +int dpsw_acl_remove_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 acl_id, const struct dpsw_acl_if_cfg *cfg); + +/** + * struct dpsw_acl_fields - ACL fields. + * @l2_dest_mac: Destination MAC address: BPDU, Multicast, Broadcast, Unicast, + * slow protocols, MVRP, STP + * @l2_source_mac: Source MAC address + * @l2_tpid: Layer 2 (Ethernet) protocol type, used to identify the following + * protocols: MPLS, PTP, PFC, ARP, Jumbo frames, LLDP, IEEE802.1ae, + * Q-in-Q, IPv4, IPv6, PPPoE + * @l2_pcp_dei: indicate which protocol is encapsulated in the payload + * @l2_vlan_id: layer 2 VLAN ID + * @l2_ether_type: layer 2 Ethernet type + * @l3_dscp: Layer 3 differentiated services code point + * @l3_protocol: Tells the Network layer at the destination host, to which + * Protocol this packet belongs to. The following protocol are + * supported: ICMP, IGMP, IPv4 (encapsulation), TCP, IPv6 + * (encapsulation), GRE, PTP + * @l3_source_ip: Source IPv4 IP + * @l3_dest_ip: Destination IPv4 IP + * @l4_source_port: Source TCP/UDP Port + * @l4_dest_port: Destination TCP/UDP Port + */ +struct dpsw_acl_fields { + u8 l2_dest_mac[6]; + u8 l2_source_mac[6]; + u16 l2_tpid; + u8 l2_pcp_dei; + u16 l2_vlan_id; + u16 l2_ether_type; + u8 l3_dscp; + u8 l3_protocol; + u32 l3_source_ip; + u32 l3_dest_ip; + u16 l4_source_port; + u16 l4_dest_port; +}; + +/** + * struct dpsw_acl_key - ACL key + * @match: Match fields + * @mask: Mask: b'1 - valid, b'0 don't care + */ +struct dpsw_acl_key { + struct dpsw_acl_fields match; + struct dpsw_acl_fields mask; +}; + +/** + * enum dpsw_acl_action - action to be run on the ACL rule match + * @DPSW_ACL_ACTION_DROP: Drop frame + * @DPSW_ACL_ACTION_REDIRECT: Redirect to certain port + * @DPSW_ACL_ACTION_ACCEPT: Accept frame + * @DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF: Redirect to control interface + */ +enum dpsw_acl_action { + DPSW_ACL_ACTION_DROP, + DPSW_ACL_ACTION_REDIRECT, + DPSW_ACL_ACTION_ACCEPT, + DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF +}; + +/** + * struct dpsw_acl_result - ACL action + * @action: Action should be taken when ACL entry hit + * @if_id: Interface IDs to redirect frame. Valid only if redirect selected for + * action + */ +struct dpsw_acl_result { + enum dpsw_acl_action action; + u16 if_id; +}; + +/** + * struct dpsw_acl_entry_cfg - ACL entry + * @key_iova: I/O virtual address of DMA-able memory filled with key after call + * to dpsw_acl_prepare_entry_cfg() + * @result: Required action when entry hit occurs + * @precedence: Precedence inside ACL 0 is lowest; This priority can not change + * during the lifetime of a Policy. It is user responsibility to + * space the priorities according to consequent rule additions. + */ +struct dpsw_acl_entry_cfg { + u64 key_iova; + struct dpsw_acl_result result; + int precedence; +}; + +void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key, + u8 *entry_cfg_buf); + +int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 acl_id, const struct dpsw_acl_entry_cfg *cfg); + +int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 acl_id, const struct dpsw_acl_entry_cfg *cfg); +#endif /* __FSL_DPSW_H */ diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig index ab92382c399a..cdc0ff89388a 100644 --- a/drivers/net/ethernet/freescale/enetc/Kconfig +++ b/drivers/net/ethernet/freescale/enetc/Kconfig @@ -2,6 +2,7 @@ config FSL_ENETC tristate "ENETC PF driver" depends on PCI && PCI_MSI + select FSL_ENETC_IERB select FSL_ENETC_MDIO select PHYLINK select PCS_LYNX @@ -25,6 +26,14 @@ config FSL_ENETC_VF If compiled as module (M), the module name is fsl-enetc-vf. +config FSL_ENETC_IERB + tristate "ENETC IERB driver" + help + This driver configures the Integrated Endpoint Register Block on NXP + LS1028A. + + If compiled as module (M), the module name is fsl-enetc-ierb. + config FSL_ENETC_MDIO tristate "ENETC MDIO driver" depends on PCI && MDIO_DEVRES && MDIO_BUS diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile index 74f7ac253b8b..a139f2e9d59f 100644 --- a/drivers/net/ethernet/freescale/enetc/Makefile +++ b/drivers/net/ethernet/freescale/enetc/Makefile @@ -11,6 +11,9 @@ obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o fsl-enetc-vf-y := enetc_vf.o $(common-objs) fsl-enetc-vf-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o +obj-$(CONFIG_FSL_ENETC_IERB) += fsl-enetc-ierb.o +fsl-enetc-ierb-y := enetc_ierb.o + obj-$(CONFIG_FSL_ENETC_MDIO) += fsl-enetc-mdio.o fsl-enetc-mdio-y := enetc_pci_mdio.o enetc_mdio.o diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 09471329f3a3..3ca93adb9662 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -2,88 +2,138 @@ /* Copyright 2017-2019 NXP */ #include "enetc.h" +#include <linux/bpf_trace.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/vmalloc.h> +#include <linux/ptp_classify.h> +#include <net/pkt_sched.h> -/* ENETC overhead: optional extension BD + 1 BD gap */ -#define ENETC_TXBDS_NEEDED(val) ((val) + 2) -/* max # of chained Tx BDs is 15, including head and extension BD */ -#define ENETC_MAX_SKB_FRAGS 13 -#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1) - -static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb, - int active_offloads); - -netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev) +static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv) { - struct enetc_ndev_priv *priv = netdev_priv(ndev); - struct enetc_bdr *tx_ring; - int count; + int num_tx_rings = priv->num_tx_rings; + int i; - tx_ring = priv->tx_ring[skb->queue_mapping]; + for (i = 0; i < priv->num_rx_rings; i++) + if (priv->rx_ring[i]->xdp.prog) + return num_tx_rings - num_possible_cpus(); - if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS)) - if (unlikely(skb_linearize(skb))) - goto drop_packet_err; + return num_tx_rings; +} - count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */ - if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) { - netif_stop_subqueue(ndev, tx_ring->index); - return NETDEV_TX_BUSY; - } +static struct enetc_bdr *enetc_rx_ring_from_xdp_tx_ring(struct enetc_ndev_priv *priv, + struct enetc_bdr *tx_ring) +{ + int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring; - enetc_lock_mdio(); - count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads); - enetc_unlock_mdio(); + return priv->rx_ring[index]; +} - if (unlikely(!count)) - goto drop_packet_err; +static struct sk_buff *enetc_tx_swbd_get_skb(struct enetc_tx_swbd *tx_swbd) +{ + if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect) + return NULL; - if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED) - netif_stop_subqueue(ndev, tx_ring->index); + return tx_swbd->skb; +} - return NETDEV_TX_OK; +static struct xdp_frame * +enetc_tx_swbd_get_xdp_frame(struct enetc_tx_swbd *tx_swbd) +{ + if (tx_swbd->is_xdp_redirect) + return tx_swbd->xdp_frame; -drop_packet_err: - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; + return NULL; } static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring, struct enetc_tx_swbd *tx_swbd) { + /* For XDP_TX, pages come from RX, whereas for the other contexts where + * we have is_dma_page_set, those come from skb_frag_dma_map. We need + * to match the DMA mapping length, so we need to differentiate those. + */ if (tx_swbd->is_dma_page) dma_unmap_page(tx_ring->dev, tx_swbd->dma, - tx_swbd->len, DMA_TO_DEVICE); + tx_swbd->is_xdp_tx ? PAGE_SIZE : tx_swbd->len, + tx_swbd->dir); else dma_unmap_single(tx_ring->dev, tx_swbd->dma, - tx_swbd->len, DMA_TO_DEVICE); + tx_swbd->len, tx_swbd->dir); tx_swbd->dma = 0; } -static void enetc_free_tx_skb(struct enetc_bdr *tx_ring, - struct enetc_tx_swbd *tx_swbd) +static void enetc_free_tx_frame(struct enetc_bdr *tx_ring, + struct enetc_tx_swbd *tx_swbd) { + struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd); + struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd); + if (tx_swbd->dma) enetc_unmap_tx_buff(tx_ring, tx_swbd); - if (tx_swbd->skb) { - dev_kfree_skb_any(tx_swbd->skb); + if (xdp_frame) { + xdp_return_frame(tx_swbd->xdp_frame); + tx_swbd->xdp_frame = NULL; + } else if (skb) { + dev_kfree_skb_any(skb); tx_swbd->skb = NULL; } } -static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb, - int active_offloads) +/* Let H/W know BD ring has been updated */ +static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring) +{ + /* includes wmb() */ + enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use); +} + +static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp, + u8 *msgtype, u8 *twostep, + u16 *correction_offset, u16 *body_offset) +{ + unsigned int ptp_class; + struct ptp_header *hdr; + unsigned int type; + u8 *base; + + ptp_class = ptp_classify_raw(skb); + if (ptp_class == PTP_CLASS_NONE) + return -EINVAL; + + hdr = ptp_parse_header(skb, ptp_class); + if (!hdr) + return -EINVAL; + + type = ptp_class & PTP_CLASS_PMASK; + if (type == PTP_CLASS_IPV4 || type == PTP_CLASS_IPV6) + *udp = 1; + else + *udp = 0; + + *msgtype = ptp_get_msgtype(hdr, ptp_class); + *twostep = hdr->flag_field[0] & 0x2; + + base = skb_mac_header(skb); + *correction_offset = (u8 *)&hdr->correction - base; + *body_offset = (u8 *)hdr + sizeof(struct ptp_header) - base; + + return 0; +} + +static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) { + bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false; + struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); + struct enetc_hw *hw = &priv->si->hw; struct enetc_tx_swbd *tx_swbd; - skb_frag_t *frag; int len = skb_headlen(skb); union enetc_tx_bd temp_bd; + u8 msgtype, twostep, udp; union enetc_tx_bd *txbd; - bool do_vlan, do_tstamp; + u16 offset1, offset2; int i, count = 0; + skb_frag_t *frag; unsigned int f; dma_addr_t dma; u8 flags = 0; @@ -104,15 +154,25 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb, tx_swbd->dma = dma; tx_swbd->len = len; tx_swbd->is_dma_page = 0; + tx_swbd->dir = DMA_TO_DEVICE; count++; do_vlan = skb_vlan_tag_present(skb); - do_tstamp = (active_offloads & ENETC_F_TX_TSTAMP) && - (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP); - tx_swbd->do_tstamp = do_tstamp; - tx_swbd->check_wb = tx_swbd->do_tstamp; + if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { + if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, &offset1, + &offset2) || + msgtype != PTP_MSGTYPE_SYNC || twostep) + WARN_ONCE(1, "Bad packet for one-step timestamping\n"); + else + do_onestep_tstamp = true; + } else if (skb->cb[0] & ENETC_F_TX_TSTAMP) { + do_twostep_tstamp = true; + } - if (do_vlan || do_tstamp) + tx_swbd->do_twostep_tstamp = do_twostep_tstamp; + tx_swbd->check_wb = tx_swbd->do_twostep_tstamp; + + if (do_vlan || do_onestep_tstamp || do_twostep_tstamp) flags |= ENETC_TXBD_FLAGS_EX; if (tx_ring->tsd_enable) @@ -149,7 +209,40 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb, e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS; } - if (do_tstamp) { + if (do_onestep_tstamp) { + u32 lo, hi, val; + u64 sec, nsec; + u8 *data; + + lo = enetc_rd_hot(hw, ENETC_SICTR0); + hi = enetc_rd_hot(hw, ENETC_SICTR1); + sec = (u64)hi << 32 | lo; + nsec = do_div(sec, 1000000000); + + /* Configure extension BD */ + temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff); + e_flags |= ENETC_TXBD_E_FLAGS_ONE_STEP_PTP; + + /* Update originTimestamp field of Sync packet + * - 48 bits seconds field + * - 32 bits nanseconds field + */ + data = skb_mac_header(skb); + *(__be16 *)(data + offset2) = + htons((sec >> 32) & 0xffff); + *(__be32 *)(data + offset2 + 2) = + htonl(sec & 0xffffffff); + *(__be32 *)(data + offset2 + 6) = htonl(nsec); + + /* Configure single-step register */ + val = ENETC_PM0_SINGLE_STEP_EN; + val |= ENETC_SET_SINGLE_STEP_OFFSET(offset1); + if (udp) + val |= ENETC_PM0_SINGLE_STEP_CH; + + enetc_port_wr(hw, ENETC_PM0_SINGLE_STEP, val); + enetc_port_wr(hw, ENETC_PM1_SINGLE_STEP, val); + } else if (do_twostep_tstamp) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP; } @@ -186,6 +279,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb, tx_swbd->dma = dma; tx_swbd->len = len; tx_swbd->is_dma_page = 1; + tx_swbd->dir = DMA_TO_DEVICE; count++; } @@ -194,6 +288,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb, temp_bd.flags = flags; *txbd = temp_bd; + tx_ring->tx_swbd[i].is_eof = true; tx_ring->tx_swbd[i].skb = skb; enetc_bdr_idx_inc(tx_ring, &i); @@ -201,8 +296,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb, skb_tx_timestamp(skb); - /* let H/W know BD ring has been updated */ - enetc_wr_reg_hot(tx_ring->tpir, i); /* includes wmb() */ + enetc_update_tx_ring_tail(tx_ring); return count; @@ -211,7 +305,7 @@ dma_err: do { tx_swbd = &tx_ring->tx_swbd[i]; - enetc_free_tx_skb(tx_ring, tx_swbd); + enetc_free_tx_frame(tx_ring, tx_swbd); if (i == 0) i = tx_ring->bd_count; i--; @@ -220,6 +314,76 @@ dma_err: return 0; } +static netdev_tx_t enetc_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_bdr *tx_ring; + int count; + + /* Queue one-step Sync packet if already locked */ + if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { + if (test_and_set_bit_lock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, + &priv->flags)) { + skb_queue_tail(&priv->tx_skbs, skb); + return NETDEV_TX_OK; + } + } + + tx_ring = priv->tx_ring[skb->queue_mapping]; + + if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS)) + if (unlikely(skb_linearize(skb))) + goto drop_packet_err; + + count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */ + if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) { + netif_stop_subqueue(ndev, tx_ring->index); + return NETDEV_TX_BUSY; + } + + enetc_lock_mdio(); + count = enetc_map_tx_buffs(tx_ring, skb); + enetc_unlock_mdio(); + + if (unlikely(!count)) + goto drop_packet_err; + + if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED) + netif_stop_subqueue(ndev, tx_ring->index); + + return NETDEV_TX_OK; + +drop_packet_err: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + u8 udp, msgtype, twostep; + u16 offset1, offset2; + + /* Mark tx timestamp type on skb->cb[0] if requires */ + if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK)) { + skb->cb[0] = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK; + } else { + skb->cb[0] = 0; + } + + /* Fall back to two-step timestamp if not one-step Sync packet */ + if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { + if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, + &offset1, &offset2) || + msgtype != PTP_MSGTYPE_SYNC || twostep != 0) + skb->cb[0] = ENETC_F_TX_TSTAMP; + } + + return enetc_start_xmit(skb, ndev); +} + static irqreturn_t enetc_msix(int irq, void *data) { struct enetc_int_vector *v = data; @@ -241,10 +405,6 @@ static irqreturn_t enetc_msix(int irq, void *data) return IRQ_HANDLED; } -static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget); -static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, - struct napi_struct *napi, int work_limit); - static void enetc_rx_dim_work(struct work_struct *w) { struct dim *dim = container_of(w, struct dim, work); @@ -273,55 +433,30 @@ static void enetc_rx_net_dim(struct enetc_int_vector *v) net_dim(&v->rx_dim, dim_sample); } -static int enetc_poll(struct napi_struct *napi, int budget) +static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci) { - struct enetc_int_vector - *v = container_of(napi, struct enetc_int_vector, napi); - bool complete = true; - int work_done; - int i; - - enetc_lock_mdio(); - - for (i = 0; i < v->count_tx_rings; i++) - if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) - complete = false; - - work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget); - if (work_done == budget) - complete = false; - if (work_done) - v->rx_napi_work = true; - - if (!complete) { - enetc_unlock_mdio(); - return budget; - } - - napi_complete_done(napi, work_done); - - if (likely(v->rx_dim_en)) - enetc_rx_net_dim(v); - - v->rx_napi_work = false; - - /* enable interrupts */ - enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE); - - for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) - enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), - ENETC_TBIER_TXTIE); + int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK; - enetc_unlock_mdio(); + return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi; +} - return work_done; +static bool enetc_page_reusable(struct page *page) +{ + return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1); } -static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci) +static void enetc_reuse_page(struct enetc_bdr *rx_ring, + struct enetc_rx_swbd *old) { - int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK; + struct enetc_rx_swbd *new; - return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi; + new = &rx_ring->rx_swbd[rx_ring->next_to_alloc]; + + /* next buf that may reuse a page */ + enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc); + + /* copy page reference */ + *new = *old; } static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd, @@ -344,23 +479,58 @@ static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp) if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { memset(&shhwtstamps, 0, sizeof(shhwtstamps)); shhwtstamps.hwtstamp = ns_to_ktime(tstamp); - /* Ensure skb_mstamp_ns, which might have been populated with - * the txtime, is not mistaken for a software timestamp, - * because this will prevent the dispatch of our hardware - * timestamp to the socket. - */ - skb->tstamp = ktime_set(0, 0); + skb_txtime_consumed(skb); skb_tstamp_tx(skb, &shhwtstamps); } } +static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring, + struct enetc_tx_swbd *tx_swbd) +{ + struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); + struct enetc_rx_swbd rx_swbd = { + .dma = tx_swbd->dma, + .page = tx_swbd->page, + .page_offset = tx_swbd->page_offset, + .dir = tx_swbd->dir, + .len = tx_swbd->len, + }; + struct enetc_bdr *rx_ring; + + rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring); + + if (likely(enetc_swbd_unused(rx_ring))) { + enetc_reuse_page(rx_ring, &rx_swbd); + + /* sync for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma, + rx_swbd.page_offset, + ENETC_RXB_DMA_SIZE_XDP, + rx_swbd.dir); + + rx_ring->stats.recycles++; + } else { + /* RX ring is already full, we need to unmap and free the + * page, since there's nothing useful we can do with it. + */ + rx_ring->stats.recycle_failures++; + + dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE, + rx_swbd.dir); + __free_page(rx_swbd.page); + } + + rx_ring->xdp.xdp_tx_in_flight--; +} + static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) { struct net_device *ndev = tx_ring->ndev; + struct enetc_ndev_priv *priv = netdev_priv(ndev); int tx_frm_cnt = 0, tx_byte_cnt = 0; struct enetc_tx_swbd *tx_swbd; int i, bds_to_clean; - bool do_tstamp; + bool do_twostep_tstamp; u64 tstamp = 0; i = tx_ring->next_to_clean; @@ -368,10 +538,12 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) bds_to_clean = enetc_bd_ready_count(tx_ring, i); - do_tstamp = false; + do_twostep_tstamp = false; while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) { - bool is_eof = !!tx_swbd->skb; + struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd); + struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd); + bool is_eof = tx_swbd->is_eof; if (unlikely(tx_swbd->check_wb)) { struct enetc_ndev_priv *priv = netdev_priv(ndev); @@ -380,26 +552,40 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) txbd = ENETC_TXBD(*tx_ring, i); if (txbd->flags & ENETC_TXBD_FLAGS_W && - tx_swbd->do_tstamp) { + tx_swbd->do_twostep_tstamp) { enetc_get_tx_tstamp(&priv->si->hw, txbd, &tstamp); - do_tstamp = true; + do_twostep_tstamp = true; } } - if (likely(tx_swbd->dma)) + if (tx_swbd->is_xdp_tx) + enetc_recycle_xdp_tx_buff(tx_ring, tx_swbd); + else if (likely(tx_swbd->dma)) enetc_unmap_tx_buff(tx_ring, tx_swbd); - if (is_eof) { - if (unlikely(do_tstamp)) { - enetc_tstamp_tx(tx_swbd->skb, tstamp); - do_tstamp = false; + if (xdp_frame) { + xdp_return_frame(xdp_frame); + } else if (skb) { + if (unlikely(tx_swbd->skb->cb[0] & + ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) { + /* Start work to release lock for next one-step + * timestamping packet. And send one skb in + * tx_skbs queue if has. + */ + schedule_work(&priv->tx_onestep_tstamp); + } else if (unlikely(do_twostep_tstamp)) { + enetc_tstamp_tx(skb, tstamp); + do_twostep_tstamp = false; } - napi_consume_skb(tx_swbd->skb, napi_budget); - tx_swbd->skb = NULL; + napi_consume_skb(skb, napi_budget); } tx_byte_cnt += tx_swbd->len; + /* Scrub the swbd here so we don't have to do that + * when we reuse it during xmit + */ + memset(tx_swbd, 0, sizeof(*tx_swbd)); bds_to_clean--; tx_swbd++; @@ -437,6 +623,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) static bool enetc_new_page(struct enetc_bdr *rx_ring, struct enetc_rx_swbd *rx_swbd) { + bool xdp = !!(rx_ring->xdp.prog); struct page *page; dma_addr_t addr; @@ -444,7 +631,10 @@ static bool enetc_new_page(struct enetc_bdr *rx_ring, if (unlikely(!page)) return false; - addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + /* For XDP_TX, we forgo dma_unmap -> dma_map */ + rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; + + addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir); if (unlikely(dma_mapping_error(rx_ring->dev, addr))) { __free_page(page); @@ -453,7 +643,7 @@ static bool enetc_new_page(struct enetc_bdr *rx_ring, rx_swbd->dma = addr; rx_swbd->page = page; - rx_swbd->page_offset = ENETC_RXB_PAD; + rx_swbd->page_offset = rx_ring->buffer_offset; return true; } @@ -483,18 +673,16 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt) /* clear 'R" as well */ rxbd->r.lstatus = 0; - rxbd = enetc_rxbd_next(rx_ring, rxbd, i); - rx_swbd++; - i++; - if (unlikely(i == rx_ring->bd_count)) { - i = 0; - rx_swbd = rx_ring->rx_swbd; - } + enetc_rxbd_next(rx_ring, &rxbd, &i); + rx_swbd = &rx_ring->rx_swbd[i]; } if (likely(j)) { rx_ring->next_to_alloc = i; /* keep track from page reuse */ rx_ring->next_to_use = i; + + /* update ENETC's consumer index */ + enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use); } return j; @@ -570,32 +758,10 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring, #endif } -static void enetc_process_skb(struct enetc_bdr *rx_ring, - struct sk_buff *skb) -{ - skb_record_rx_queue(skb, rx_ring->index); - skb->protocol = eth_type_trans(skb, rx_ring->ndev); -} - -static bool enetc_page_reusable(struct page *page) -{ - return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1); -} - -static void enetc_reuse_page(struct enetc_bdr *rx_ring, - struct enetc_rx_swbd *old) -{ - struct enetc_rx_swbd *new; - - new = &rx_ring->rx_swbd[rx_ring->next_to_alloc]; - - /* next buf that may reuse a page */ - enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc); - - /* copy page reference */ - *new = *old; -} - +/* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS, + * so it needs to work with both DMA_FROM_DEVICE as well as DMA_BIDIRECTIONAL + * mapped buffers. + */ static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring, int i, u16 size) { @@ -603,30 +769,39 @@ static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring, dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma, rx_swbd->page_offset, - size, DMA_FROM_DEVICE); + size, rx_swbd->dir); return rx_swbd; } +/* Reuse the current page without performing half-page buffer flipping */ static void enetc_put_rx_buff(struct enetc_bdr *rx_ring, struct enetc_rx_swbd *rx_swbd) { + size_t buffer_size = ENETC_RXB_TRUESIZE - rx_ring->buffer_offset; + + enetc_reuse_page(rx_ring, rx_swbd); + + dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, + rx_swbd->page_offset, + buffer_size, rx_swbd->dir); + + rx_swbd->page = NULL; +} + +/* Reuse the current page by performing half-page buffer flipping */ +static void enetc_flip_rx_buff(struct enetc_bdr *rx_ring, + struct enetc_rx_swbd *rx_swbd) +{ if (likely(enetc_page_reusable(rx_swbd->page))) { rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE; page_ref_inc(rx_swbd->page); - enetc_reuse_page(rx_ring, rx_swbd); - - /* sync for use by the device */ - dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, - rx_swbd->page_offset, - ENETC_RXB_DMA_SIZE, - DMA_FROM_DEVICE); + enetc_put_rx_buff(rx_ring, rx_swbd); } else { - dma_unmap_page(rx_ring->dev, rx_swbd->dma, - PAGE_SIZE, DMA_FROM_DEVICE); + dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, + rx_swbd->dir); + rx_swbd->page = NULL; } - - rx_swbd->page = NULL; } static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring, @@ -637,16 +812,16 @@ static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring, void *ba; ba = page_address(rx_swbd->page) + rx_swbd->page_offset; - skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE); + skb = build_skb(ba - rx_ring->buffer_offset, ENETC_RXB_TRUESIZE); if (unlikely(!skb)) { rx_ring->stats.rx_alloc_errs++; return NULL; } - skb_reserve(skb, ENETC_RXB_PAD); + skb_reserve(skb, rx_ring->buffer_offset); __skb_put(skb, size); - enetc_put_rx_buff(rx_ring, rx_swbd); + enetc_flip_rx_buff(rx_ring, rx_swbd); return skb; } @@ -659,7 +834,72 @@ static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page, rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE); - enetc_put_rx_buff(rx_ring, rx_swbd); + enetc_flip_rx_buff(rx_ring, rx_swbd); +} + +static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring, + u32 bd_status, + union enetc_rx_bd **rxbd, int *i) +{ + if (likely(!(bd_status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK)))) + return false; + + enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); + enetc_rxbd_next(rx_ring, rxbd, i); + + while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { + dma_rmb(); + bd_status = le32_to_cpu((*rxbd)->r.lstatus); + + enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); + enetc_rxbd_next(rx_ring, rxbd, i); + } + + rx_ring->ndev->stats.rx_dropped++; + rx_ring->ndev->stats.rx_errors++; + + return true; +} + +static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring, + u32 bd_status, union enetc_rx_bd **rxbd, + int *i, int *cleaned_cnt, int buffer_size) +{ + struct sk_buff *skb; + u16 size; + + size = le16_to_cpu((*rxbd)->r.buf_len); + skb = enetc_map_rx_buff_to_skb(rx_ring, *i, size); + if (!skb) + return NULL; + + enetc_get_offloads(rx_ring, *rxbd, skb); + + (*cleaned_cnt)++; + + enetc_rxbd_next(rx_ring, rxbd, i); + + /* not last BD in frame? */ + while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { + bd_status = le32_to_cpu((*rxbd)->r.lstatus); + size = buffer_size; + + if (bd_status & ENETC_RXBD_LSTATUS_F) { + dma_rmb(); + size = le16_to_cpu((*rxbd)->r.buf_len); + } + + enetc_add_rx_buff_to_skb(rx_ring, *i, size, skb); + + (*cleaned_cnt)++; + + enetc_rxbd_next(rx_ring, rxbd, i); + } + + skb_record_rx_queue(skb, rx_ring->index); + skb->protocol = eth_type_trans(skb, rx_ring->ndev); + + return skb; } #define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */ @@ -678,15 +918,10 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, union enetc_rx_bd *rxbd; struct sk_buff *skb; u32 bd_status; - u16 size; - if (cleaned_cnt >= ENETC_RXBD_BUNDLE) { - int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt); - - /* update ENETC's consumer index */ - enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use); - cleaned_cnt -= count; - } + if (cleaned_cnt >= ENETC_RXBD_BUNDLE) + cleaned_cnt -= enetc_refill_rx_ring(rx_ring, + cleaned_cnt); rxbd = enetc_rxbd(rx_ring, i); bd_status = le32_to_cpu(rxbd->r.lstatus); @@ -695,73 +930,511 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index)); dma_rmb(); /* for reading other rxbd fields */ - size = le16_to_cpu(rxbd->r.buf_len); - skb = enetc_map_rx_buff_to_skb(rx_ring, i, size); + + if (enetc_check_bd_errors_and_consume(rx_ring, bd_status, + &rxbd, &i)) + break; + + skb = enetc_build_skb(rx_ring, bd_status, &rxbd, &i, + &cleaned_cnt, ENETC_RXB_DMA_SIZE); if (!skb) break; - enetc_get_offloads(rx_ring, rxbd, skb); + rx_byte_cnt += skb->len; + rx_frm_cnt++; - cleaned_cnt++; + napi_gro_receive(napi, skb); + } - rxbd = enetc_rxbd_next(rx_ring, rxbd, i); - if (unlikely(++i == rx_ring->bd_count)) - i = 0; + rx_ring->next_to_clean = i; - if (unlikely(bd_status & - ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) { - dev_kfree_skb(skb); - while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { - dma_rmb(); - bd_status = le32_to_cpu(rxbd->r.lstatus); + rx_ring->stats.packets += rx_frm_cnt; + rx_ring->stats.bytes += rx_byte_cnt; - rxbd = enetc_rxbd_next(rx_ring, rxbd, i); - if (unlikely(++i == rx_ring->bd_count)) - i = 0; - } + return rx_frm_cnt; +} + +static void enetc_xdp_map_tx_buff(struct enetc_bdr *tx_ring, int i, + struct enetc_tx_swbd *tx_swbd, + int frm_len) +{ + union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); + + prefetchw(txbd); + + enetc_clear_tx_bd(txbd); + txbd->addr = cpu_to_le64(tx_swbd->dma + tx_swbd->page_offset); + txbd->buf_len = cpu_to_le16(tx_swbd->len); + txbd->frm_len = cpu_to_le16(frm_len); + + memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd)); +} + +/* Puts in the TX ring one XDP frame, mapped as an array of TX software buffer + * descriptors. + */ +static bool enetc_xdp_tx(struct enetc_bdr *tx_ring, + struct enetc_tx_swbd *xdp_tx_arr, int num_tx_swbd) +{ + struct enetc_tx_swbd *tmp_tx_swbd = xdp_tx_arr; + int i, k, frm_len = tmp_tx_swbd->len; + + if (unlikely(enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(num_tx_swbd))) + return false; + + while (unlikely(!tmp_tx_swbd->is_eof)) { + tmp_tx_swbd++; + frm_len += tmp_tx_swbd->len; + } + + i = tx_ring->next_to_use; + + for (k = 0; k < num_tx_swbd; k++) { + struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[k]; + + enetc_xdp_map_tx_buff(tx_ring, i, xdp_tx_swbd, frm_len); + + /* last BD needs 'F' bit set */ + if (xdp_tx_swbd->is_eof) { + union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); + + txbd->flags = ENETC_TXBD_FLAGS_F; + } + + enetc_bdr_idx_inc(tx_ring, &i); + } + + tx_ring->next_to_use = i; + + return true; +} + +static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring, + struct enetc_tx_swbd *xdp_tx_arr, + struct xdp_frame *xdp_frame) +{ + struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[0]; + struct skb_shared_info *shinfo; + void *data = xdp_frame->data; + int len = xdp_frame->len; + skb_frag_t *frag; + dma_addr_t dma; + unsigned int f; + int n = 0; + + dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { + netdev_err(tx_ring->ndev, "DMA map error\n"); + return -1; + } + + xdp_tx_swbd->dma = dma; + xdp_tx_swbd->dir = DMA_TO_DEVICE; + xdp_tx_swbd->len = len; + xdp_tx_swbd->is_xdp_redirect = true; + xdp_tx_swbd->is_eof = false; + xdp_tx_swbd->xdp_frame = NULL; + + n++; + xdp_tx_swbd = &xdp_tx_arr[n]; + + shinfo = xdp_get_shared_info_from_frame(xdp_frame); + + for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags; + f++, frag++) { + data = skb_frag_address(frag); + len = skb_frag_size(frag); + + dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { + /* Undo the DMA mapping for all fragments */ + while (--n >= 0) + enetc_unmap_tx_buff(tx_ring, &xdp_tx_arr[n]); + + netdev_err(tx_ring->ndev, "DMA map error\n"); + return -1; + } + + xdp_tx_swbd->dma = dma; + xdp_tx_swbd->dir = DMA_TO_DEVICE; + xdp_tx_swbd->len = len; + xdp_tx_swbd->is_xdp_redirect = true; + xdp_tx_swbd->is_eof = false; + xdp_tx_swbd->xdp_frame = NULL; + + n++; + xdp_tx_swbd = &xdp_tx_arr[n]; + } + + xdp_tx_arr[n - 1].is_eof = true; + xdp_tx_arr[n - 1].xdp_frame = xdp_frame; + + return n; +} + +int enetc_xdp_xmit(struct net_device *ndev, int num_frames, + struct xdp_frame **frames, u32 flags) +{ + struct enetc_tx_swbd xdp_redirect_arr[ENETC_MAX_SKB_FRAGS] = {0}; + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_bdr *tx_ring; + int xdp_tx_bd_cnt, i, k; + int xdp_tx_frm_cnt = 0; + + enetc_lock_mdio(); + + tx_ring = priv->xdp_tx_ring[smp_processor_id()]; - rx_ring->ndev->stats.rx_dropped++; - rx_ring->ndev->stats.rx_errors++; + prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use)); + for (k = 0; k < num_frames; k++) { + xdp_tx_bd_cnt = enetc_xdp_frame_to_xdp_tx_swbd(tx_ring, + xdp_redirect_arr, + frames[k]); + if (unlikely(xdp_tx_bd_cnt < 0)) + break; + + if (unlikely(!enetc_xdp_tx(tx_ring, xdp_redirect_arr, + xdp_tx_bd_cnt))) { + for (i = 0; i < xdp_tx_bd_cnt; i++) + enetc_unmap_tx_buff(tx_ring, + &xdp_redirect_arr[i]); + tx_ring->stats.xdp_tx_drops++; break; } - /* not last BD in frame? */ - while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { - bd_status = le32_to_cpu(rxbd->r.lstatus); - size = ENETC_RXB_DMA_SIZE; + xdp_tx_frm_cnt++; + } - if (bd_status & ENETC_RXBD_LSTATUS_F) { - dma_rmb(); - size = le16_to_cpu(rxbd->r.buf_len); - } + if (unlikely((flags & XDP_XMIT_FLUSH) || k != xdp_tx_frm_cnt)) + enetc_update_tx_ring_tail(tx_ring); + + tx_ring->stats.xdp_tx += xdp_tx_frm_cnt; + + enetc_unlock_mdio(); + + return xdp_tx_frm_cnt; +} + +static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i, + struct xdp_buff *xdp_buff, u16 size) +{ + struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); + void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset; + struct skb_shared_info *shinfo; + + /* To be used for XDP_TX */ + rx_swbd->len = size; - enetc_add_rx_buff_to_skb(rx_ring, i, size, skb); + xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset, + rx_ring->buffer_offset, size, false); - cleaned_cnt++; + shinfo = xdp_get_shared_info_from_buff(xdp_buff); + shinfo->nr_frags = 0; +} + +static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i, + u16 size, struct xdp_buff *xdp_buff) +{ + struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff); + struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); + skb_frag_t *frag = &shinfo->frags[shinfo->nr_frags]; + + /* To be used for XDP_TX */ + rx_swbd->len = size; + + skb_frag_off_set(frag, rx_swbd->page_offset); + skb_frag_size_set(frag, size); + __skb_frag_set_page(frag, rx_swbd->page); + + shinfo->nr_frags++; +} + +static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status, + union enetc_rx_bd **rxbd, int *i, + int *cleaned_cnt, struct xdp_buff *xdp_buff) +{ + u16 size = le16_to_cpu((*rxbd)->r.buf_len); + + xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq); + + enetc_map_rx_buff_to_xdp(rx_ring, *i, xdp_buff, size); + (*cleaned_cnt)++; + enetc_rxbd_next(rx_ring, rxbd, i); - rxbd = enetc_rxbd_next(rx_ring, rxbd, i); - if (unlikely(++i == rx_ring->bd_count)) - i = 0; + /* not last BD in frame? */ + while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { + bd_status = le32_to_cpu((*rxbd)->r.lstatus); + size = ENETC_RXB_DMA_SIZE_XDP; + + if (bd_status & ENETC_RXBD_LSTATUS_F) { + dma_rmb(); + size = le16_to_cpu((*rxbd)->r.buf_len); } - rx_byte_cnt += skb->len; + enetc_add_rx_buff_to_xdp(rx_ring, *i, size, xdp_buff); + (*cleaned_cnt)++; + enetc_rxbd_next(rx_ring, rxbd, i); + } +} - enetc_process_skb(rx_ring, skb); +/* Convert RX buffer descriptors to TX buffer descriptors. These will be + * recycled back into the RX ring in enetc_clean_tx_ring. + */ +static int enetc_rx_swbd_to_xdp_tx_swbd(struct enetc_tx_swbd *xdp_tx_arr, + struct enetc_bdr *rx_ring, + int rx_ring_first, int rx_ring_last) +{ + int n = 0; + + for (; rx_ring_first != rx_ring_last; + n++, enetc_bdr_idx_inc(rx_ring, &rx_ring_first)) { + struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first]; + struct enetc_tx_swbd *tx_swbd = &xdp_tx_arr[n]; + + /* No need to dma_map, we already have DMA_BIDIRECTIONAL */ + tx_swbd->dma = rx_swbd->dma; + tx_swbd->dir = rx_swbd->dir; + tx_swbd->page = rx_swbd->page; + tx_swbd->page_offset = rx_swbd->page_offset; + tx_swbd->len = rx_swbd->len; + tx_swbd->is_dma_page = true; + tx_swbd->is_xdp_tx = true; + tx_swbd->is_eof = false; + } - napi_gro_receive(napi, skb); + /* We rely on caller providing an rx_ring_last > rx_ring_first */ + xdp_tx_arr[n - 1].is_eof = true; + + return n; +} + +static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first, + int rx_ring_last) +{ + while (rx_ring_first != rx_ring_last) { + enetc_put_rx_buff(rx_ring, + &rx_ring->rx_swbd[rx_ring_first]); + enetc_bdr_idx_inc(rx_ring, &rx_ring_first); + } + rx_ring->stats.xdp_drops++; +} + +static void enetc_xdp_free(struct enetc_bdr *rx_ring, int rx_ring_first, + int rx_ring_last) +{ + while (rx_ring_first != rx_ring_last) { + struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first]; + + if (rx_swbd->page) { + dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, + rx_swbd->dir); + __free_page(rx_swbd->page); + rx_swbd->page = NULL; + } + enetc_bdr_idx_inc(rx_ring, &rx_ring_first); + } + rx_ring->stats.xdp_redirect_failures++; +} + +static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, + struct napi_struct *napi, int work_limit, + struct bpf_prog *prog) +{ + int xdp_tx_bd_cnt, xdp_tx_frm_cnt = 0, xdp_redirect_frm_cnt = 0; + struct enetc_tx_swbd xdp_tx_arr[ENETC_MAX_SKB_FRAGS] = {0}; + struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev); + int rx_frm_cnt = 0, rx_byte_cnt = 0; + struct enetc_bdr *tx_ring; + int cleaned_cnt, i; + u32 xdp_act; + + cleaned_cnt = enetc_bd_unused(rx_ring); + /* next descriptor to process */ + i = rx_ring->next_to_clean; + + while (likely(rx_frm_cnt < work_limit)) { + union enetc_rx_bd *rxbd, *orig_rxbd; + int orig_i, orig_cleaned_cnt; + struct xdp_buff xdp_buff; + struct sk_buff *skb; + int tmp_orig_i, err; + u32 bd_status; + + rxbd = enetc_rxbd(rx_ring, i); + bd_status = le32_to_cpu(rxbd->r.lstatus); + if (!bd_status) + break; + + enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index)); + dma_rmb(); /* for reading other rxbd fields */ + + if (enetc_check_bd_errors_and_consume(rx_ring, bd_status, + &rxbd, &i)) + break; + + orig_rxbd = rxbd; + orig_cleaned_cnt = cleaned_cnt; + orig_i = i; + + enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i, + &cleaned_cnt, &xdp_buff); + + xdp_act = bpf_prog_run_xdp(prog, &xdp_buff); + + switch (xdp_act) { + default: + bpf_warn_invalid_xdp_action(xdp_act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(rx_ring->ndev, prog, xdp_act); + fallthrough; + case XDP_DROP: + enetc_xdp_drop(rx_ring, orig_i, i); + break; + case XDP_PASS: + rxbd = orig_rxbd; + cleaned_cnt = orig_cleaned_cnt; + i = orig_i; + + skb = enetc_build_skb(rx_ring, bd_status, &rxbd, + &i, &cleaned_cnt, + ENETC_RXB_DMA_SIZE_XDP); + if (unlikely(!skb)) + goto out; + + napi_gro_receive(napi, skb); + break; + case XDP_TX: + tx_ring = priv->xdp_tx_ring[rx_ring->index]; + xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr, + rx_ring, + orig_i, i); + + if (!enetc_xdp_tx(tx_ring, xdp_tx_arr, xdp_tx_bd_cnt)) { + enetc_xdp_drop(rx_ring, orig_i, i); + tx_ring->stats.xdp_tx_drops++; + } else { + tx_ring->stats.xdp_tx += xdp_tx_bd_cnt; + rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt; + xdp_tx_frm_cnt++; + /* The XDP_TX enqueue was successful, so we + * need to scrub the RX software BDs because + * the ownership of the buffers no longer + * belongs to the RX ring, and we must prevent + * enetc_refill_rx_ring() from reusing + * rx_swbd->page. + */ + while (orig_i != i) { + rx_ring->rx_swbd[orig_i].page = NULL; + enetc_bdr_idx_inc(rx_ring, &orig_i); + } + } + break; + case XDP_REDIRECT: + /* xdp_return_frame does not support S/G in the sense + * that it leaks the fragments (__xdp_return should not + * call page_frag_free only for the initial buffer). + * Until XDP_REDIRECT gains support for S/G let's keep + * the code structure in place, but dead. We drop the + * S/G frames ourselves to avoid memory leaks which + * would otherwise leave the kernel OOM. + */ + if (unlikely(cleaned_cnt - orig_cleaned_cnt != 1)) { + enetc_xdp_drop(rx_ring, orig_i, i); + rx_ring->stats.xdp_redirect_sg++; + break; + } + + tmp_orig_i = orig_i; + + while (orig_i != i) { + enetc_flip_rx_buff(rx_ring, + &rx_ring->rx_swbd[orig_i]); + enetc_bdr_idx_inc(rx_ring, &orig_i); + } + + err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog); + if (unlikely(err)) { + enetc_xdp_free(rx_ring, tmp_orig_i, i); + } else { + xdp_redirect_frm_cnt++; + rx_ring->stats.xdp_redirect++; + } + } rx_frm_cnt++; } +out: rx_ring->next_to_clean = i; rx_ring->stats.packets += rx_frm_cnt; rx_ring->stats.bytes += rx_byte_cnt; + if (xdp_redirect_frm_cnt) + xdp_do_flush_map(); + + if (xdp_tx_frm_cnt) + enetc_update_tx_ring_tail(tx_ring); + + if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight) + enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) - + rx_ring->xdp.xdp_tx_in_flight); + return rx_frm_cnt; } +static int enetc_poll(struct napi_struct *napi, int budget) +{ + struct enetc_int_vector + *v = container_of(napi, struct enetc_int_vector, napi); + struct enetc_bdr *rx_ring = &v->rx_ring; + struct bpf_prog *prog; + bool complete = true; + int work_done; + int i; + + enetc_lock_mdio(); + + for (i = 0; i < v->count_tx_rings; i++) + if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) + complete = false; + + prog = rx_ring->xdp.prog; + if (prog) + work_done = enetc_clean_rx_ring_xdp(rx_ring, napi, budget, prog); + else + work_done = enetc_clean_rx_ring(rx_ring, napi, budget); + if (work_done == budget) + complete = false; + if (work_done) + v->rx_napi_work = true; + + if (!complete) { + enetc_unlock_mdio(); + return budget; + } + + napi_complete_done(napi, work_done); + + if (likely(v->rx_dim_en)) + enetc_rx_net_dim(v); + + v->rx_napi_work = false; + + /* enable interrupts */ + enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE); + + for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) + enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), + ENETC_TBIER_TXTIE); + + enetc_unlock_mdio(); + + return work_done; +} + /* Probing and Init */ #define ENETC_MAX_RFS_SIZE 64 void enetc_get_si_caps(struct enetc_si *si) @@ -836,7 +1509,7 @@ static void enetc_free_txbdr(struct enetc_bdr *txr) int size, i; for (i = 0; i < txr->bd_count; i++) - enetc_free_tx_skb(txr, &txr->tx_swbd[i]); + enetc_free_tx_frame(txr, &txr->tx_swbd[i]); size = txr->bd_count * sizeof(union enetc_tx_bd); @@ -953,7 +1626,7 @@ static void enetc_free_tx_ring(struct enetc_bdr *tx_ring) for (i = 0; i < tx_ring->bd_count; i++) { struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; - enetc_free_tx_skb(tx_ring, tx_swbd); + enetc_free_tx_frame(tx_ring, tx_swbd); } tx_ring->next_to_clean = 0; @@ -973,8 +1646,8 @@ static void enetc_free_rx_ring(struct enetc_bdr *rx_ring) if (!rx_swbd->page) continue; - dma_unmap_page(rx_ring->dev, rx_swbd->dma, - PAGE_SIZE, DMA_FROM_DEVICE); + dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, + rx_swbd->dir); __free_page(rx_swbd->page); rx_swbd->page = NULL; } @@ -995,60 +1668,6 @@ static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv) enetc_free_tx_ring(priv->tx_ring[i]); } -int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr) -{ - int size = cbdr->bd_count * sizeof(struct enetc_cbd); - - cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base, - GFP_KERNEL); - if (!cbdr->bd_base) - return -ENOMEM; - - /* h/w requires 128B alignment */ - if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) { - dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base); - return -EINVAL; - } - - cbdr->next_to_clean = 0; - cbdr->next_to_use = 0; - - return 0; -} - -void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr) -{ - int size = cbdr->bd_count * sizeof(struct enetc_cbd); - - dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base); - cbdr->bd_base = NULL; -} - -void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr) -{ - /* set CBDR cache attributes */ - enetc_wr(hw, ENETC_SICAR2, - ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); - - enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base)); - enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base)); - enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count)); - - enetc_wr(hw, ENETC_SICBDRPIR, 0); - enetc_wr(hw, ENETC_SICBDRCIR, 0); - - /* enable ring */ - enetc_wr(hw, ENETC_SICBDRMR, BIT(31)); - - cbdr->pir = hw->reg + ENETC_SICBDRPIR; - cbdr->cir = hw->reg + ENETC_SICBDRCIR; -} - -void enetc_clear_cbdr(struct enetc_hw *hw) -{ - enetc_wr(hw, ENETC_SICBDRMR, 0); -} - static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups) { int *rss_table; @@ -1108,45 +1727,22 @@ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv) priv->bdr_int_num = cpus; priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL; priv->tx_ictt = ENETC_TXIC_TIMETHR; - - /* SI specific */ - si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE; } int enetc_alloc_si_resources(struct enetc_ndev_priv *priv) { struct enetc_si *si = priv->si; - int err; - - err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring); - if (err) - return err; - - enetc_setup_cbdr(&si->hw, &si->cbd_ring); priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules), GFP_KERNEL); - if (!priv->cls_rules) { - err = -ENOMEM; - goto err_alloc_cls; - } + if (!priv->cls_rules) + return -ENOMEM; return 0; - -err_alloc_cls: - enetc_clear_cbdr(&si->hw); - enetc_free_cbdr(priv->dev, &si->cbd_ring); - - return err; } void enetc_free_si_resources(struct enetc_ndev_priv *priv) { - struct enetc_si *si = priv->si; - - enetc_clear_cbdr(&si->hw); - enetc_free_cbdr(priv->dev, &si->cbd_ring); - kfree(priv->cls_rules); } @@ -1199,7 +1795,10 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) enetc_rxbdr_wr(hw, idx, ENETC_RBLENR, ENETC_RTBLENR_LEN(rx_ring->bd_count)); - enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE); + if (rx_ring->xdp.prog) + enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE_XDP); + else + enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE); enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0); @@ -1217,9 +1816,9 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR); rx_ring->idr = hw->reg + ENETC_SIRXIDR; + enetc_lock_mdio(); enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring)); - /* update ENETC's consumer index */ - enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, rx_ring->next_to_use); + enetc_unlock_mdio(); /* enable ring */ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); @@ -1408,6 +2007,29 @@ static int enetc_phylink_connect(struct net_device *ndev) return 0; } +static void enetc_tx_onestep_tstamp(struct work_struct *work) +{ + struct enetc_ndev_priv *priv; + struct sk_buff *skb; + + priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp); + + netif_tx_lock(priv->ndev); + + clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags); + skb = skb_dequeue(&priv->tx_skbs); + if (skb) + enetc_start_xmit(skb, priv->ndev); + + netif_tx_unlock(priv->ndev); +} + +static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv) +{ + INIT_WORK(&priv->tx_onestep_tstamp, enetc_tx_onestep_tstamp); + skb_queue_head_init(&priv->tx_skbs); +} + void enetc_start(struct net_device *ndev) { struct enetc_ndev_priv *priv = netdev_priv(ndev); @@ -1434,6 +2056,7 @@ void enetc_start(struct net_device *ndev) int enetc_open(struct net_device *ndev) { struct enetc_ndev_priv *priv = netdev_priv(ndev); + int num_stack_tx_queues; int err; err = enetc_setup_irqs(priv); @@ -1452,7 +2075,9 @@ int enetc_open(struct net_device *ndev) if (err) goto err_alloc_rx; - err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings); + num_stack_tx_queues = enetc_num_stack_tx_queues(priv); + + err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues); if (err) goto err_set_queues; @@ -1460,6 +2085,7 @@ int enetc_open(struct net_device *ndev) if (err) goto err_set_queues; + enetc_tx_onestep_tstamp_init(priv); enetc_setup_bdrs(priv); enetc_start(ndev); @@ -1524,15 +2150,17 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) struct enetc_ndev_priv *priv = netdev_priv(ndev); struct tc_mqprio_qopt *mqprio = type_data; struct enetc_bdr *tx_ring; + int num_stack_tx_queues; u8 num_tc; int i; + num_stack_tx_queues = enetc_num_stack_tx_queues(priv); mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; num_tc = mqprio->num_tc; if (!num_tc) { netdev_reset_tc(ndev); - netif_set_real_num_tx_queues(ndev, priv->num_tx_rings); + netif_set_real_num_tx_queues(ndev, num_stack_tx_queues); /* Reset all ring priorities to 0 */ for (i = 0; i < priv->num_tx_rings; i++) { @@ -1544,7 +2172,7 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) } /* Check if we have enough BD rings available to accommodate all TCs */ - if (num_tc > priv->num_tx_rings) { + if (num_tc > num_stack_tx_queues) { netdev_err(ndev, "Max %d traffic classes supported\n", priv->num_tx_rings); return -EINVAL; @@ -1590,6 +2218,54 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type, } } +static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct enetc_ndev_priv *priv = netdev_priv(dev); + struct bpf_prog *old_prog; + bool is_up; + int i; + + /* The buffer layout is changing, so we need to drain the old + * RX buffers and seed new ones. + */ + is_up = netif_running(dev); + if (is_up) + dev_close(dev); + + old_prog = xchg(&priv->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + for (i = 0; i < priv->num_rx_rings; i++) { + struct enetc_bdr *rx_ring = priv->rx_ring[i]; + + rx_ring->xdp.prog = prog; + + if (prog) + rx_ring->buffer_offset = XDP_PACKET_HEADROOM; + else + rx_ring->buffer_offset = ENETC_RXB_PAD; + } + + if (is_up) + return dev_open(dev, extack); + + return 0; +} + +int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp) +{ + switch (xdp->command) { + case XDP_SETUP_PROG: + return enetc_setup_xdp_prog(dev, xdp->prog, xdp->extack); + default: + return -EINVAL; + } + + return 0; +} + struct net_device_stats *enetc_get_stats(struct net_device *ndev) { struct enetc_ndev_priv *priv = netdev_priv(ndev); @@ -1710,11 +2386,16 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr) switch (config.tx_type) { case HWTSTAMP_TX_OFF: - priv->active_offloads &= ~ENETC_F_TX_TSTAMP; + priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK; break; case HWTSTAMP_TX_ON: + priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK; priv->active_offloads |= ENETC_F_TX_TSTAMP; break; + case HWTSTAMP_TX_ONESTEP_SYNC: + priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK; + priv->active_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP; + break; default: return -ERANGE; } @@ -1745,7 +2426,9 @@ static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr) config.flags = 0; - if (priv->active_offloads & ENETC_F_TX_TSTAMP) + if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) + config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC; + else if (priv->active_offloads & ENETC_F_TX_TSTAMP) config.tx_type = HWTSTAMP_TX_ON; else config.tx_type = HWTSTAMP_TX_OFF; @@ -1777,8 +2460,9 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) int enetc_alloc_msix(struct enetc_ndev_priv *priv) { struct pci_dev *pdev = priv->si->pdev; - int v_tx_rings; + int first_xdp_tx_ring; int i, n, err, nvec; + int v_tx_rings; nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num; /* allocate MSIX for both messaging and Rx/Tx interrupts */ @@ -1806,6 +2490,28 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv) priv->int_vector[i] = v; + bdr = &v->rx_ring; + bdr->index = i; + bdr->ndev = priv->ndev; + bdr->dev = priv->dev; + bdr->bd_count = priv->rx_bd_count; + bdr->buffer_offset = ENETC_RXB_PAD; + priv->rx_ring[i] = bdr; + + err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0); + if (err) { + kfree(v); + goto fail; + } + + err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, + MEM_TYPE_PAGE_SHARED, NULL); + if (err) { + xdp_rxq_info_unreg(&bdr->xdp.rxq); + kfree(v); + goto fail; + } + /* init defaults for adaptive IC */ if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) { v->rx_ictt = 0x1; @@ -1820,11 +2526,7 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv) int idx; /* default tx ring mapping policy */ - if (priv->bdr_int_num == ENETC_MAX_BDR_INT) - idx = 2 * j + i; /* 2 CPUs */ - else - idx = j + i * v_tx_rings; /* default */ - + idx = priv->bdr_int_num * j + i; __set_bit(idx, &v->tx_rings_map); bdr = &v->tx_ring[j]; bdr->index = idx; @@ -1833,22 +2535,23 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv) bdr->bd_count = priv->tx_bd_count; priv->tx_ring[idx] = bdr; } - - bdr = &v->rx_ring; - bdr->index = i; - bdr->ndev = priv->ndev; - bdr->dev = priv->dev; - bdr->bd_count = priv->rx_bd_count; - priv->rx_ring[i] = bdr; } + first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus(); + priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring]; + return 0; fail: while (i--) { - netif_napi_del(&priv->int_vector[i]->napi); - cancel_work_sync(&priv->int_vector[i]->rx_dim.work); - kfree(priv->int_vector[i]); + struct enetc_int_vector *v = priv->int_vector[i]; + struct enetc_bdr *rx_ring = &v->rx_ring; + + xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); + xdp_rxq_info_unreg(&rx_ring->xdp.rxq); + netif_napi_del(&v->napi); + cancel_work_sync(&v->rx_dim.work); + kfree(v); } pci_free_irq_vectors(pdev); @@ -1862,7 +2565,10 @@ void enetc_free_msix(struct enetc_ndev_priv *priv) for (i = 0; i < priv->bdr_int_num; i++) { struct enetc_int_vector *v = priv->int_vector[i]; + struct enetc_bdr *rx_ring = &v->rx_ring; + xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); + xdp_rxq_info_unreg(&rx_ring->xdp.rxq); netif_napi_del(&v->napi); cancel_work_sync(&v->rx_dim.work); } diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index 8b380fc13314..08b283347d9c 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -19,12 +19,21 @@ (ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN)) struct enetc_tx_swbd { - struct sk_buff *skb; + union { + struct sk_buff *skb; + struct xdp_frame *xdp_frame; + }; dma_addr_t dma; + struct page *page; /* valid only if is_xdp_tx */ + u16 page_offset; /* valid only if is_xdp_tx */ u16 len; + enum dma_data_direction dir; u8 is_dma_page:1; u8 check_wb:1; - u8 do_tstamp:1; + u8 do_twostep_tstamp:1; + u8 is_eof:1; + u8 is_xdp_tx:1; + u8 is_xdp_redirect:1; }; #define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE @@ -32,21 +41,45 @@ struct enetc_tx_swbd { #define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */ #define ENETC_RXB_DMA_SIZE \ (SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD) +#define ENETC_RXB_DMA_SIZE_XDP \ + (SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - XDP_PACKET_HEADROOM) struct enetc_rx_swbd { dma_addr_t dma; struct page *page; u16 page_offset; + enum dma_data_direction dir; + u16 len; }; +/* ENETC overhead: optional extension BD + 1 BD gap */ +#define ENETC_TXBDS_NEEDED(val) ((val) + 2) +/* max # of chained Tx BDs is 15, including head and extension BD */ +#define ENETC_MAX_SKB_FRAGS 13 +#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1) + struct enetc_ring_stats { unsigned int packets; unsigned int bytes; unsigned int rx_alloc_errs; + unsigned int xdp_drops; + unsigned int xdp_tx; + unsigned int xdp_tx_drops; + unsigned int xdp_redirect; + unsigned int xdp_redirect_failures; + unsigned int xdp_redirect_sg; + unsigned int recycles; + unsigned int recycle_failures; +}; + +struct enetc_xdp_data { + struct xdp_rxq_info rxq; + struct bpf_prog *prog; + int xdp_tx_in_flight; }; -#define ENETC_RX_RING_DEFAULT_SIZE 512 -#define ENETC_TX_RING_DEFAULT_SIZE 256 +#define ENETC_RX_RING_DEFAULT_SIZE 2048 +#define ENETC_TX_RING_DEFAULT_SIZE 2048 #define ENETC_DEFAULT_TX_WORK (ENETC_TX_RING_DEFAULT_SIZE / 2) struct enetc_bdr { @@ -71,6 +104,9 @@ struct enetc_bdr { }; void __iomem *idr; /* Interrupt Detect Register pointer */ + int buffer_offset; + struct enetc_xdp_data xdp; + struct enetc_ring_stats stats; dma_addr_t bd_dma_base; @@ -92,18 +128,28 @@ static inline int enetc_bd_unused(struct enetc_bdr *bdr) return bdr->bd_count + bdr->next_to_clean - bdr->next_to_use - 1; } +static inline int enetc_swbd_unused(struct enetc_bdr *bdr) +{ + if (bdr->next_to_clean > bdr->next_to_alloc) + return bdr->next_to_clean - bdr->next_to_alloc - 1; + + return bdr->bd_count + bdr->next_to_clean - bdr->next_to_alloc - 1; +} + /* Control BD ring */ #define ENETC_CBDR_DEFAULT_SIZE 64 struct enetc_cbdr { void *bd_base; /* points to Rx or Tx BD ring */ void __iomem *pir; void __iomem *cir; + void __iomem *mr; /* mode register */ int bd_count; /* # of BDs */ int next_to_use; int next_to_clean; dma_addr_t bd_dma_base; + struct device *dma_dev; }; #define ENETC_TXBD(BDR, i) (&(((union enetc_tx_bd *)((BDR).bd_base))[i])) @@ -119,19 +165,26 @@ static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i) return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]); } -static inline union enetc_rx_bd *enetc_rxbd_next(struct enetc_bdr *rx_ring, - union enetc_rx_bd *rxbd, - int i) +static inline void enetc_rxbd_next(struct enetc_bdr *rx_ring, + union enetc_rx_bd **old_rxbd, int *old_index) { - rxbd++; + union enetc_rx_bd *new_rxbd = *old_rxbd; + int new_index = *old_index; + + new_rxbd++; + #ifdef CONFIG_FSL_ENETC_PTP_CLOCK if (rx_ring->ext_en) - rxbd++; + new_rxbd++; #endif - if (unlikely(++i == rx_ring->bd_count)) - rxbd = rx_ring->bd_base; - return rxbd; + if (unlikely(++new_index == rx_ring->bd_count)) { + new_rxbd = rx_ring->bd_base; + new_index = 0; + } + + *old_rxbd = new_rxbd; + *old_index = new_index; } static inline union enetc_rx_bd *enetc_rxbd_ext(union enetc_rx_bd *rxbd) @@ -184,6 +237,22 @@ static inline bool enetc_si_is_pf(struct enetc_si *si) return !!(si->hw.port); } +static inline int enetc_pf_to_port(struct pci_dev *pf_pdev) +{ + switch (pf_pdev->devfn) { + case 0: + return 0; + case 1: + return 1; + case 2: + return 2; + case 6: + return 3; + default: + return -1; + } +} + #define ENETC_MAX_NUM_TXQS 8 #define ENETC_INT_NAME_MAX (IFNAMSIZ + 8) @@ -218,12 +287,20 @@ struct psfp_cap { u32 max_psfp_meter; }; +#define ENETC_F_TX_TSTAMP_MASK 0xff /* TODO: more hardware offloads */ enum enetc_active_offloads { - ENETC_F_RX_TSTAMP = BIT(0), - ENETC_F_TX_TSTAMP = BIT(1), - ENETC_F_QBV = BIT(2), - ENETC_F_QCI = BIT(3), + /* 8 bits reserved for TX timestamp types (hwtstamp_tx_types) */ + ENETC_F_TX_TSTAMP = BIT(0), + ENETC_F_TX_ONESTEP_SYNC_TSTAMP = BIT(1), + + ENETC_F_RX_TSTAMP = BIT(8), + ENETC_F_QBV = BIT(9), + ENETC_F_QCI = BIT(10), +}; + +enum enetc_flags_bit { + ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS = 0, }; /* interrupt coalescing modes */ @@ -252,10 +329,11 @@ struct enetc_ndev_priv { u16 rx_bd_count, tx_bd_count; u16 msg_enable; - int active_offloads; + enum enetc_active_offloads active_offloads; u32 speed; /* store speed for compare update pspeed */ + struct enetc_bdr **xdp_tx_ring; struct enetc_bdr *tx_ring[16]; struct enetc_bdr *rx_ring[16]; @@ -266,6 +344,13 @@ struct enetc_ndev_priv { struct phylink *phylink; int ic_mode; u32 tx_ictt; + + struct bpf_prog *xdp_prog; + + unsigned long flags; + + struct work_struct tx_onestep_tstamp; + struct sk_buff_head tx_skbs; }; /* Messaging */ @@ -305,15 +390,17 @@ int enetc_set_features(struct net_device *ndev, int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd); int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type, void *type_data); +int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp); +int enetc_xdp_xmit(struct net_device *ndev, int num_frames, + struct xdp_frame **frames, u32 flags); /* ethtool */ void enetc_set_ethtool_ops(struct net_device *ndev); /* control buffer descriptor ring (CBDR) */ -int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr); -void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr); -void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr); -void enetc_clear_cbdr(struct enetc_hw *hw); +int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count, + struct enetc_cbdr *cbdr); +void enetc_teardown_cbdr(struct enetc_cbdr *cbdr); int enetc_set_mac_flt_entry(struct enetc_si *si, int index, char *mac_addr, int si_map); int enetc_clear_mac_flt_entry(struct enetc_si *si, int index); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c index 201cbc362e33..073e56dcca4e 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c @@ -3,9 +3,63 @@ #include "enetc.h" -static void enetc_clean_cbdr(struct enetc_si *si) +int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count, + struct enetc_cbdr *cbdr) +{ + int size = bd_count * sizeof(struct enetc_cbd); + + cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base, + GFP_KERNEL); + if (!cbdr->bd_base) + return -ENOMEM; + + /* h/w requires 128B alignment */ + if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) { + dma_free_coherent(dev, size, cbdr->bd_base, + cbdr->bd_dma_base); + return -EINVAL; + } + + cbdr->next_to_clean = 0; + cbdr->next_to_use = 0; + cbdr->dma_dev = dev; + cbdr->bd_count = bd_count; + + cbdr->pir = hw->reg + ENETC_SICBDRPIR; + cbdr->cir = hw->reg + ENETC_SICBDRCIR; + cbdr->mr = hw->reg + ENETC_SICBDRMR; + + /* set CBDR cache attributes */ + enetc_wr(hw, ENETC_SICAR2, + ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); + + enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base)); + enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base)); + enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count)); + + enetc_wr_reg(cbdr->pir, cbdr->next_to_clean); + enetc_wr_reg(cbdr->cir, cbdr->next_to_use); + /* enable ring */ + enetc_wr_reg(cbdr->mr, BIT(31)); + + return 0; +} + +void enetc_teardown_cbdr(struct enetc_cbdr *cbdr) +{ + int size = cbdr->bd_count * sizeof(struct enetc_cbd); + + /* disable ring */ + enetc_wr_reg(cbdr->mr, 0); + + dma_free_coherent(cbdr->dma_dev, size, cbdr->bd_base, + cbdr->bd_dma_base); + cbdr->bd_base = NULL; + cbdr->dma_dev = NULL; +} + +static void enetc_clean_cbdr(struct enetc_cbdr *ring) { - struct enetc_cbdr *ring = &si->cbd_ring; struct enetc_cbd *dest_cbd; int i, status; @@ -15,7 +69,7 @@ static void enetc_clean_cbdr(struct enetc_si *si) dest_cbd = ENETC_CBD(*ring, i); status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK; if (status) - dev_warn(&si->pdev->dev, "CMD err %04x for cmd %04x\n", + dev_warn(ring->dma_dev, "CMD err %04x for cmd %04x\n", status, dest_cbd->cmd); memset(dest_cbd, 0, sizeof(*dest_cbd)); @@ -43,7 +97,7 @@ int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd) return -EIO; if (unlikely(!enetc_cbd_unused(ring))) - enetc_clean_cbdr(si); + enetc_clean_cbdr(ring); i = ring->next_to_use; dest_cbd = ENETC_CBD(*ring, i); @@ -69,7 +123,7 @@ int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd) /* CBD may writeback data, feedback up level */ *cbd = *dest_cbd; - enetc_clean_cbdr(si); + enetc_clean_cbdr(ring); return 0; } @@ -117,6 +171,7 @@ int enetc_set_mac_flt_entry(struct enetc_si *si, int index, int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse, int index) { + struct enetc_cbdr *ring = &si->cbd_ring; struct enetc_cbd cbd = {.cmd = 0}; dma_addr_t dma, dma_align; void *tmp, *tmp_align; @@ -129,10 +184,10 @@ int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse, cbd.length = cpu_to_le16(sizeof(*rfse)); cbd.opt[3] = cpu_to_le32(0); /* SI */ - tmp = dma_alloc_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN, + tmp = dma_alloc_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN, &dma, GFP_KERNEL); if (!tmp) { - dev_err(&si->pdev->dev, "DMA mapping of RFS entry failed!\n"); + dev_err(ring->dma_dev, "DMA mapping of RFS entry failed!\n"); return -ENOMEM; } @@ -145,9 +200,9 @@ int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse, err = enetc_send_cmd(si, &cbd); if (err) - dev_err(&si->pdev->dev, "FS entry add failed (%d)!", err); + dev_err(ring->dma_dev, "FS entry add failed (%d)!", err); - dma_free_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN, + dma_free_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN, tmp, dma); return err; @@ -157,6 +212,7 @@ int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse, static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count, bool read) { + struct enetc_cbdr *ring = &si->cbd_ring; struct enetc_cbd cbd = {.cmd = 0}; dma_addr_t dma, dma_align; u8 *tmp, *tmp_align; @@ -166,10 +222,10 @@ static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count, /* HW only takes in a full 64 entry table */ return -EINVAL; - tmp = dma_alloc_coherent(&si->pdev->dev, count + RSSE_ALIGN, + tmp = dma_alloc_coherent(ring->dma_dev, count + RSSE_ALIGN, &dma, GFP_KERNEL); if (!tmp) { - dev_err(&si->pdev->dev, "DMA mapping of RSS table failed!\n"); + dev_err(ring->dma_dev, "DMA mapping of RSS table failed!\n"); return -ENOMEM; } dma_align = ALIGN(dma, RSSE_ALIGN); @@ -189,13 +245,13 @@ static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count, err = enetc_send_cmd(si, &cbd); if (err) - dev_err(&si->pdev->dev, "RSS cmd failed (%d)!", err); + dev_err(ring->dma_dev, "RSS cmd failed (%d)!", err); if (read) for (i = 0; i < count; i++) table[i] = tmp_align[i]; - dma_free_coherent(&si->pdev->dev, count + RSSE_ALIGN, tmp, dma); + dma_free_coherent(ring->dma_dev, count + RSSE_ALIGN, tmp, dma); return err; } diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index 89e558135432..ebccaf02411c 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -192,10 +192,18 @@ static const struct { static const char rx_ring_stats[][ETH_GSTRING_LEN] = { "Rx ring %2d frames", "Rx ring %2d alloc errors", + "Rx ring %2d XDP drops", + "Rx ring %2d recycles", + "Rx ring %2d recycle failures", + "Rx ring %2d redirects", + "Rx ring %2d redirect failures", + "Rx ring %2d redirect S/G", }; static const char tx_ring_stats[][ETH_GSTRING_LEN] = { "Tx ring %2d frames", + "Tx ring %2d XDP frames", + "Tx ring %2d XDP drops", }; static int enetc_get_sset_count(struct net_device *ndev, int sset) @@ -267,12 +275,21 @@ static void enetc_get_ethtool_stats(struct net_device *ndev, for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) data[o++] = enetc_rd64(hw, enetc_si_counters[i].reg); - for (i = 0; i < priv->num_tx_rings; i++) + for (i = 0; i < priv->num_tx_rings; i++) { data[o++] = priv->tx_ring[i]->stats.packets; + data[o++] = priv->tx_ring[i]->stats.xdp_tx; + data[o++] = priv->tx_ring[i]->stats.xdp_tx_drops; + } for (i = 0; i < priv->num_rx_rings; i++) { data[o++] = priv->rx_ring[i]->stats.packets; data[o++] = priv->rx_ring[i]->stats.rx_alloc_errs; + data[o++] = priv->rx_ring[i]->stats.xdp_drops; + data[o++] = priv->rx_ring[i]->stats.recycles; + data[o++] = priv->rx_ring[i]->stats.recycle_failures; + data[o++] = priv->rx_ring[i]->stats.xdp_redirect; + data[o++] = priv->rx_ring[i]->stats.xdp_redirect_failures; + data[o++] = priv->rx_ring[i]->stats.xdp_redirect_sg; } if (!enetc_si_is_pf(priv->si)) @@ -654,7 +671,8 @@ static int enetc_get_ts_info(struct net_device *ndev, SOF_TIMESTAMPING_RAW_HARDWARE; info->tx_types = (1 << HWTSTAMP_TX_OFF) | - (1 << HWTSTAMP_TX_ON); + (1 << HWTSTAMP_TX_ON) | + (1 << HWTSTAMP_TX_ONESTEP_SYNC); info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_ALL); #else @@ -690,6 +708,22 @@ static int enetc_set_wol(struct net_device *dev, return ret; } +static void enetc_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct enetc_ndev_priv *priv = netdev_priv(dev); + + phylink_ethtool_get_pauseparam(priv->phylink, pause); +} + +static int enetc_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct enetc_ndev_priv *priv = netdev_priv(dev); + + return phylink_ethtool_set_pauseparam(priv->phylink, pause); +} + static int enetc_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { @@ -736,6 +770,8 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = { .get_ts_info = enetc_get_ts_info, .get_wol = enetc_get_wol, .set_wol = enetc_set_wol, + .get_pauseparam = enetc_get_pauseparam, + .set_pauseparam = enetc_set_pauseparam, }; static const struct ethtool_ops enetc_vf_ethtool_ops = { diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index 00938f7960a4..0f5f081a5baf 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -109,6 +109,7 @@ enum enetc_bdr_type {TX, RX}; /* RX BDR reg offsets */ #define ENETC_RBMR 0 #define ENETC_RBMR_BDS BIT(2) +#define ENETC_RBMR_CM BIT(4) #define ENETC_RBMR_VTE BIT(5) #define ENETC_RBMR_EN BIT(31) #define ENETC_RBSR 0x4 @@ -180,6 +181,8 @@ enum enetc_bdr_type {TX, RX}; #define ENETC_PSIVLANR(n) (0x0240 + (n) * 4) /* n = SI index */ #define ENETC_PSIVLAN_EN BIT(31) #define ENETC_PSIVLAN_SET_QOS(val) ((u32)(val) << 12) +#define ENETC_PPAUONTR 0x0410 +#define ENETC_PPAUOFFTR 0x0414 #define ENETC_PTXMBAR 0x0608 #define ENETC_PCAPR0 0x0900 #define ENETC_PCAPR0_RXBDR(val) ((val) >> 24) @@ -227,6 +230,7 @@ enum enetc_bdr_type {TX, RX}; #define ENETC_PM0_TX_EN BIT(0) #define ENETC_PM0_RX_EN BIT(1) #define ENETC_PM0_PROMISC BIT(4) +#define ENETC_PM0_PAUSE_IGN BIT(8) #define ENETC_PM0_CMD_XGLP BIT(10) #define ENETC_PM0_CMD_TXP BIT(11) #define ENETC_PM0_CMD_PHY_TX_EN BIT(15) @@ -239,6 +243,17 @@ enum enetc_bdr_type {TX, RX}; #define ENETC_PM_IMDIO_BASE 0x8030 +#define ENETC_PM0_PAUSE_QUANTA 0x8054 +#define ENETC_PM0_PAUSE_THRESH 0x8064 +#define ENETC_PM1_PAUSE_QUANTA 0x9054 +#define ENETC_PM1_PAUSE_THRESH 0x9064 + +#define ENETC_PM0_SINGLE_STEP 0x80c0 +#define ENETC_PM1_SINGLE_STEP 0x90c0 +#define ENETC_PM0_SINGLE_STEP_CH BIT(7) +#define ENETC_PM0_SINGLE_STEP_EN BIT(31) +#define ENETC_SET_SINGLE_STEP_OFFSET(v) (((v) & 0xff) << 8) + #define ENETC_PM0_IF_MODE 0x8300 #define ENETC_PM0_IFM_RG BIT(2) #define ENETC_PM0_IFM_RLP (BIT(5) | BIT(11)) @@ -548,6 +563,7 @@ static inline void enetc_clear_tx_bd(union enetc_tx_bd *txbd) /* Extension flags */ #define ENETC_TXBD_E_FLAGS_VLAN_INS BIT(0) +#define ENETC_TXBD_E_FLAGS_ONE_STEP_PTP BIT(1) #define ENETC_TXBD_E_FLAGS_TWO_STEP_PTP BIT(2) union enetc_rx_bd { diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c new file mode 100644 index 000000000000..8b356c485507 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2021 NXP Semiconductors + * + * The Integrated Endpoint Register Block (IERB) is configured by pre-boot + * software and is supposed to be to ENETC what a NVRAM is to a 'real' PCIe + * card. Upon FLR, values from the IERB are transferred to the ENETC PFs, and + * are read-only in the PF memory space. + * + * This driver fixes up the power-on reset values for the ENETC shared FIFO, + * such that the TX and RX allocations are sufficient for jumbo frames, and + * that intelligent FIFO dropping is enabled before the internal data + * structures are corrupted. + * + * Even though not all ports might be used on a given board, we are not + * concerned with partitioning the FIFO, because the default values configure + * no strict reservations, so the entire FIFO can be used by the RX of a single + * port, or the TX of a single port. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include "enetc.h" +#include "enetc_ierb.h" + +/* IERB registers */ +#define ENETC_IERB_TXMBAR(port) (((port) * 0x100) + 0x8080) +#define ENETC_IERB_RXMBER(port) (((port) * 0x100) + 0x8090) +#define ENETC_IERB_RXMBLR(port) (((port) * 0x100) + 0x8094) +#define ENETC_IERB_RXBCR(port) (((port) * 0x100) + 0x80a0) +#define ENETC_IERB_TXBCR(port) (((port) * 0x100) + 0x80a8) +#define ENETC_IERB_FMBDTR 0xa000 + +#define ENETC_RESERVED_FOR_ICM 1024 + +struct enetc_ierb { + void __iomem *regs; +}; + +static void enetc_ierb_write(struct enetc_ierb *ierb, u32 offset, u32 val) +{ + iowrite32(val, ierb->regs + offset); +} + +int enetc_ierb_register_pf(struct platform_device *pdev, + struct pci_dev *pf_pdev) +{ + struct enetc_ierb *ierb = platform_get_drvdata(pdev); + int port = enetc_pf_to_port(pf_pdev); + u16 tx_credit, rx_credit, tx_alloc; + + if (port < 0) + return -ENODEV; + + if (!ierb) + return -EPROBE_DEFER; + + /* By default, it is recommended to set the Host Transfer Agent + * per port transmit byte credit to "1000 + max_frame_size/2". + * The power-on reset value (1800 bytes) is rounded up to the nearest + * 100 assuming a maximum frame size of 1536 bytes. + */ + tx_credit = roundup(1000 + ENETC_MAC_MAXFRM_SIZE / 2, 100); + + /* Internal memory allocated for transmit buffering is guaranteed but + * not reserved; i.e. if the total transmit allocation is not used, + * then the unused portion is not left idle, it can be used for receive + * buffering but it will be reclaimed, if required, from receive by + * intelligently dropping already stored receive frames in the internal + * memory to ensure that the transmit allocation is respected. + * + * PaTXMBAR must be set to a value larger than + * PaTXBCR + 2 * max_frame_size + 32 + * if frame preemption is not enabled, or to + * 2 * PaTXBCR + 2 * p_max_frame_size (pMAC maximum frame size) + + * 2 * np_max_frame_size (eMAC maximum frame size) + 64 + * if frame preemption is enabled. + */ + tx_alloc = roundup(2 * tx_credit + 4 * ENETC_MAC_MAXFRM_SIZE + 64, 16); + + /* Initial credits, in units of 8 bytes, to the Ingress Congestion + * Manager for the maximum amount of bytes the port is allocated for + * pending traffic. + * It is recommended to set the initial credits to 2 times the maximum + * frame size (2 frames of maximum size). + */ + rx_credit = DIV_ROUND_UP(ENETC_MAC_MAXFRM_SIZE * 2, 8); + + enetc_ierb_write(ierb, ENETC_IERB_TXBCR(port), tx_credit); + enetc_ierb_write(ierb, ENETC_IERB_TXMBAR(port), tx_alloc); + enetc_ierb_write(ierb, ENETC_IERB_RXBCR(port), rx_credit); + + return 0; +} +EXPORT_SYMBOL(enetc_ierb_register_pf); + +static int enetc_ierb_probe(struct platform_device *pdev) +{ + struct enetc_ierb *ierb; + struct resource *res; + void __iomem *regs; + + ierb = devm_kzalloc(&pdev->dev, sizeof(*ierb), GFP_KERNEL); + if (!ierb) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + ierb->regs = regs; + + /* Free buffer depletion threshold in bytes. + * This sets the minimum amount of free buffer memory that should be + * maintained in the datapath sub system, and when the amount of free + * buffer memory falls below this threshold, a depletion indication is + * asserted, which may trigger "intelligent drop" frame releases from + * the ingress queues in the ICM. + * It is recommended to set the free buffer depletion threshold to 1024 + * bytes, since the ICM needs some FIFO memory for its own use. + */ + enetc_ierb_write(ierb, ENETC_IERB_FMBDTR, ENETC_RESERVED_FOR_ICM); + + platform_set_drvdata(pdev, ierb); + + return 0; +} + +static int enetc_ierb_remove(struct platform_device *pdev) +{ + return 0; +} + +static const struct of_device_id enetc_ierb_match[] = { + { .compatible = "fsl,ls1028a-enetc-ierb", }, + {}, +}; +MODULE_DEVICE_TABLE(of, enetc_ierb_match); + +static struct platform_driver enetc_ierb_driver = { + .driver = { + .name = "fsl-enetc-ierb", + .of_match_table = enetc_ierb_match, + }, + .probe = enetc_ierb_probe, + .remove = enetc_ierb_remove, +}; + +module_platform_driver(enetc_ierb_driver); + +MODULE_DESCRIPTION("NXP ENETC IERB"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.h b/drivers/net/ethernet/freescale/enetc/enetc_ierb.h new file mode 100644 index 000000000000..b3b774e0998a --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2021 NXP Semiconductors */ + +#include <linux/pci.h> +#include <linux/platform_device.h> + +#if IS_ENABLED(CONFIG_FSL_ENETC_IERB) + +int enetc_ierb_register_pf(struct platform_device *pdev, + struct pci_dev *pf_pdev); + +#else + +static inline int enetc_ierb_register_pf(struct platform_device *pdev, + struct pci_dev *pf_pdev) +{ + return -EOPNOTSUPP; +} + +#endif diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 224fc37a6757..31274325159a 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -4,8 +4,10 @@ #include <linux/mdio.h> #include <linux/module.h> #include <linux/fsl/enetc_mdio.h> +#include <linux/of_platform.h> #include <linux/of_mdio.h> #include <linux/of_net.h> +#include "enetc_ierb.h" #include "enetc_pf.h" #define ENETC_DRV_NAME_STR "ENETC PF driver" @@ -129,16 +131,20 @@ static void enetc_clear_mac_ht_flt(struct enetc_si *si, int si_idx, int type) } static void enetc_set_mac_ht_flt(struct enetc_si *si, int si_idx, int type, - u32 *hash) + unsigned long hash) { bool err = si->errata & ENETC_ERR_UCMCSWP; if (type == UC) { - enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), *hash); - enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), *(hash + 1)); + enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), + lower_32_bits(hash)); + enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), + upper_32_bits(hash)); } else { /* MC */ - enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), *hash); - enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), *(hash + 1)); + enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), + lower_32_bits(hash)); + enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), + upper_32_bits(hash)); } } @@ -182,7 +188,7 @@ static void enetc_sync_mac_filters(struct enetc_pf *pf) if (i == UC) enetc_clear_mac_flt_entry(si, pos); - enetc_set_mac_ht_flt(si, 0, i, (u32 *)f->mac_hash_table); + enetc_set_mac_ht_flt(si, 0, i, *f->mac_hash_table); } } @@ -248,10 +254,10 @@ static void enetc_pf_set_rx_mode(struct net_device *ndev) } static void enetc_set_vlan_ht_filter(struct enetc_hw *hw, int si_idx, - u32 *hash) + unsigned long hash) { - enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), *hash); - enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), *(hash + 1)); + enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), lower_32_bits(hash)); + enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), upper_32_bits(hash)); } static int enetc_vid_hash_idx(unsigned int vid) @@ -279,7 +285,7 @@ static void enetc_sync_vlan_ht_filter(struct enetc_pf *pf, bool rehash) } } - enetc_set_vlan_ht_filter(&pf->si->hw, 0, (u32 *)pf->vlan_ht_filter); + enetc_set_vlan_ht_filter(&pf->si->hw, 0, *pf->vlan_ht_filter); } static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid) @@ -386,23 +392,54 @@ static int enetc_pf_set_vf_spoofchk(struct net_device *ndev, int vf, bool en) return 0; } -static void enetc_port_setup_primary_mac_address(struct enetc_si *si) +static int enetc_setup_mac_address(struct device_node *np, struct enetc_pf *pf, + int si) { - unsigned char mac_addr[MAX_ADDR_LEN]; - struct enetc_pf *pf = enetc_si_priv(si); - struct enetc_hw *hw = &si->hw; - int i; + struct device *dev = &pf->si->pdev->dev; + struct enetc_hw *hw = &pf->si->hw; + u8 mac_addr[ETH_ALEN] = { 0 }; + int err; - /* check MAC addresses for PF and all VFs, if any is 0 set it ro rand */ - for (i = 0; i < pf->total_vfs + 1; i++) { - enetc_pf_get_primary_mac_addr(hw, i, mac_addr); - if (!is_zero_ether_addr(mac_addr)) - continue; + /* (1) try to get the MAC address from the device tree */ + if (np) { + err = of_get_mac_address(np, mac_addr); + if (err == -EPROBE_DEFER) + return err; + } + + /* (2) bootloader supplied MAC address */ + if (is_zero_ether_addr(mac_addr)) + enetc_pf_get_primary_mac_addr(hw, si, mac_addr); + + /* (3) choose a random one */ + if (is_zero_ether_addr(mac_addr)) { eth_random_addr(mac_addr); - dev_info(&si->pdev->dev, "no MAC address specified for SI%d, using %pM\n", - i, mac_addr); - enetc_pf_set_primary_mac_addr(hw, i, mac_addr); + dev_info(dev, "no MAC address specified for SI%d, using %pM\n", + si, mac_addr); + } + + enetc_pf_set_primary_mac_addr(hw, si, mac_addr); + + return 0; +} + +static int enetc_setup_mac_addresses(struct device_node *np, + struct enetc_pf *pf) +{ + int err, i; + + /* The PF might take its MAC from the device tree */ + err = enetc_setup_mac_address(np, pf, 0); + if (err) + return err; + + for (i = 0; i < pf->total_vfs; i++) { + err = enetc_setup_mac_address(NULL, pf, i + 1); + if (err) + return err; } + + return 0; } static void enetc_port_assign_rfs_entries(struct enetc_si *si) @@ -483,7 +520,6 @@ static void enetc_configure_port_mac(struct enetc_hw *hw) ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE)); enetc_port_wr(hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE); - enetc_port_wr(hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE); enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN | ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC); @@ -558,9 +594,6 @@ static void enetc_configure_port(struct enetc_pf *pf) /* split up RFS entries */ enetc_port_assign_rfs_entries(pf->si); - /* fix-up primary MAC addresses, if not set already */ - enetc_port_setup_primary_mac_address(pf->si); - /* enforce VLAN promisc mode for all SIs */ pf->vlan_promisc_simap = ENETC_VLAN_PROMISC_MAP_ALL; enetc_set_vlan_promisc(hw, pf->vlan_promisc_simap); @@ -703,6 +736,8 @@ static const struct net_device_ops enetc_ndev_ops = { .ndo_set_features = enetc_pf_set_features, .ndo_do_ioctl = enetc_ioctl, .ndo_setup_tc = enetc_setup_tc, + .ndo_bpf = enetc_setup_bpf, + .ndo_xdp_xmit = enetc_xdp_xmit, }; static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, @@ -979,7 +1014,12 @@ static void enetc_pl_mac_link_up(struct phylink_config *config, int duplex, bool tx_pause, bool rx_pause) { struct enetc_pf *pf = phylink_to_enetc_pf(config); + u32 pause_off_thresh = 0, pause_on_thresh = 0; + u32 init_quanta = 0, refresh_quanta = 0; + struct enetc_hw *hw = &pf->si->hw; struct enetc_ndev_priv *priv; + u32 rbmr, cmd_cfg; + int idx; priv = netdev_priv(pf->si->ndev); if (priv->active_offloads & ENETC_F_QBV) @@ -987,9 +1027,60 @@ static void enetc_pl_mac_link_up(struct phylink_config *config, if (!phylink_autoneg_inband(mode) && phy_interface_mode_is_rgmii(interface)) - enetc_force_rgmii_mac(&pf->si->hw, speed, duplex); + enetc_force_rgmii_mac(hw, speed, duplex); + + /* Flow control */ + for (idx = 0; idx < priv->num_rx_rings; idx++) { + rbmr = enetc_rxbdr_rd(hw, idx, ENETC_RBMR); + + if (tx_pause) + rbmr |= ENETC_RBMR_CM; + else + rbmr &= ~ENETC_RBMR_CM; - enetc_mac_enable(&pf->si->hw, true); + enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); + } + + if (tx_pause) { + /* When the port first enters congestion, send a PAUSE request + * with the maximum number of quanta. When the port exits + * congestion, it will automatically send a PAUSE frame with + * zero quanta. + */ + init_quanta = 0xffff; + + /* Also, set up the refresh timer to send follow-up PAUSE + * frames at half the quanta value, in case the congestion + * condition persists. + */ + refresh_quanta = 0xffff / 2; + + /* Start emitting PAUSE frames when 3 large frames (or more + * smaller frames) have accumulated in the FIFO waiting to be + * DMAed to the RX ring. + */ + pause_on_thresh = 3 * ENETC_MAC_MAXFRM_SIZE; + pause_off_thresh = 1 * ENETC_MAC_MAXFRM_SIZE; + } + + enetc_port_wr(hw, ENETC_PM0_PAUSE_QUANTA, init_quanta); + enetc_port_wr(hw, ENETC_PM1_PAUSE_QUANTA, init_quanta); + enetc_port_wr(hw, ENETC_PM0_PAUSE_THRESH, refresh_quanta); + enetc_port_wr(hw, ENETC_PM1_PAUSE_THRESH, refresh_quanta); + enetc_port_wr(hw, ENETC_PPAUONTR, pause_on_thresh); + enetc_port_wr(hw, ENETC_PPAUOFFTR, pause_off_thresh); + + cmd_cfg = enetc_port_rd(hw, ENETC_PM0_CMD_CFG); + + if (rx_pause) + cmd_cfg &= ~ENETC_PM0_PAUSE_IGN; + else + cmd_cfg |= ENETC_PM0_PAUSE_IGN; + + enetc_port_wr(hw, ENETC_PM0_CMD_CFG, cmd_cfg); + enetc_port_wr(hw, ENETC_PM1_CMD_CFG, cmd_cfg); + + enetc_mac_enable(hw, true); } static void enetc_pl_mac_link_down(struct phylink_config *config, @@ -1081,24 +1172,28 @@ static int enetc_init_port_rss_memory(struct enetc_si *si) return err; } -static void enetc_init_unused_port(struct enetc_si *si) +static int enetc_pf_register_with_ierb(struct pci_dev *pdev) { - struct device *dev = &si->pdev->dev; - struct enetc_hw *hw = &si->hw; - int err; + struct device_node *node = pdev->dev.of_node; + struct platform_device *ierb_pdev; + struct device_node *ierb_node; - si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE; - err = enetc_alloc_cbdr(dev, &si->cbd_ring); - if (err) - return; + /* Don't register with the IERB if the PF itself is disabled */ + if (!node || !of_device_is_available(node)) + return 0; + + ierb_node = of_find_compatible_node(NULL, NULL, + "fsl,ls1028a-enetc-ierb"); + if (!ierb_node || !of_device_is_available(ierb_node)) + return -ENODEV; - enetc_setup_cbdr(hw, &si->cbd_ring); + ierb_pdev = of_find_device_by_node(ierb_node); + of_node_put(ierb_node); - enetc_init_port_rfs_memory(si); - enetc_init_port_rss_memory(si); + if (!ierb_pdev) + return -EPROBE_DEFER; - enetc_clear_cbdr(hw); - enetc_free_cbdr(dev, &si->cbd_ring); + return enetc_ierb_register_pf(ierb_pdev, pdev); } static int enetc_pf_probe(struct pci_dev *pdev, @@ -1111,6 +1206,14 @@ static int enetc_pf_probe(struct pci_dev *pdev, struct enetc_pf *pf; int err; + err = enetc_pf_register_with_ierb(pdev); + if (err == -EPROBE_DEFER) + return err; + if (err) + dev_warn(&pdev->dev, + "Could not register with IERB driver: %pe, please update the device tree\n", + ERR_PTR(err)); + err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf)); if (err) { dev_err(&pdev->dev, "PCI probing failed\n"); @@ -1124,8 +1227,24 @@ static int enetc_pf_probe(struct pci_dev *pdev, goto err_map_pf_space; } + err = enetc_setup_cbdr(&pdev->dev, &si->hw, ENETC_CBDR_DEFAULT_SIZE, + &si->cbd_ring); + if (err) + goto err_setup_cbdr; + + err = enetc_init_port_rfs_memory(si); + if (err) { + dev_err(&pdev->dev, "Failed to initialize RFS memory\n"); + goto err_init_port_rfs; + } + + err = enetc_init_port_rss_memory(si); + if (err) { + dev_err(&pdev->dev, "Failed to initialize RSS memory\n"); + goto err_init_port_rss; + } + if (node && !of_device_is_available(node)) { - enetc_init_unused_port(si); dev_info(&pdev->dev, "device is disabled, skipping\n"); err = -ENODEV; goto err_device_disabled; @@ -1135,6 +1254,10 @@ static int enetc_pf_probe(struct pci_dev *pdev, pf->si = si; pf->total_vfs = pci_sriov_get_totalvfs(pdev); + err = enetc_setup_mac_addresses(node, pf); + if (err) + goto err_setup_mac_addresses; + enetc_configure_port(pf); enetc_get_si_caps(si); @@ -1158,18 +1281,6 @@ static int enetc_pf_probe(struct pci_dev *pdev, goto err_alloc_si_res; } - err = enetc_init_port_rfs_memory(si); - if (err) { - dev_err(&pdev->dev, "Failed to initialize RFS memory\n"); - goto err_init_port_rfs; - } - - err = enetc_init_port_rss_memory(si); - if (err) { - dev_err(&pdev->dev, "Failed to initialize RSS memory\n"); - goto err_init_port_rss; - } - err = enetc_configure_si(priv); if (err) { dev_err(&pdev->dev, "Failed to configure SI\n"); @@ -1205,15 +1316,18 @@ err_phylink_create: err_mdiobus_create: enetc_free_msix(priv); err_config_si: -err_init_port_rss: -err_init_port_rfs: err_alloc_msix: enetc_free_si_resources(priv); err_alloc_si_res: si->ndev = NULL; free_netdev(ndev); err_alloc_netdev: +err_init_port_rss: +err_init_port_rfs: err_device_disabled: +err_setup_mac_addresses: + enetc_teardown_cbdr(&si->cbd_ring); +err_setup_cbdr: err_map_pf_space: enetc_pci_remove(pdev); @@ -1239,6 +1353,7 @@ static void enetc_pf_remove(struct pci_dev *pdev) enetc_free_msix(priv); enetc_free_si_resources(priv); + enetc_teardown_cbdr(&si->cbd_ring); free_netdev(si->ndev); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index a9aee219fb58..af699f2ad095 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -455,11 +455,6 @@ static struct enetc_psfp epsfp = { static LIST_HEAD(enetc_block_cb_list); -static inline int enetc_get_port(struct enetc_ndev_priv *priv) -{ - return priv->si->pdev->devfn & 0x7; -} - /* Stream Identity Entry Set Descriptor */ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, struct enetc_streamid *sid, @@ -504,7 +499,7 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, si_conf = &cbd.sid_set; /* Only one port supported for one entry, set itself */ - si_conf->iports = cpu_to_le32(1 << enetc_get_port(priv)); + si_conf->iports = cpu_to_le32(1 << enetc_pf_to_port(priv->si->pdev)); si_conf->id_type = 1; si_conf->oui[2] = 0x0; si_conf->oui[1] = 0x80; @@ -529,7 +524,7 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, si_conf->en = 0x80; si_conf->stream_handle = cpu_to_le32(sid->handle); - si_conf->iports = cpu_to_le32(1 << enetc_get_port(priv)); + si_conf->iports = cpu_to_le32(1 << enetc_pf_to_port(priv->si->pdev)); si_conf->id_type = sid->filtertype; si_conf->oui[2] = 0x0; si_conf->oui[1] = 0x80; @@ -591,7 +586,8 @@ static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv, } sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id); - sfi_config->input_ports = cpu_to_le32(1 << enetc_get_port(priv)); + sfi_config->input_ports = + cpu_to_le32(1 << enetc_pf_to_port(priv->si->pdev)); /* The priority value which may be matched against the * frame’s priority value to determine a match for this entry. @@ -1221,6 +1217,11 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, /* Flow meter and max frame size */ if (entryp) { + if (entryp->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); + err = -EOPNOTSUPP; + goto free_sfi; + } if (entryp->police.burst) { fmi = kzalloc(sizeof(*fmi), GFP_KERNEL); if (!fmi) { @@ -1557,10 +1558,10 @@ int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data) switch (f->command) { case FLOW_BLOCK_BIND: - set_bit(enetc_get_port(priv), &epsfp.dev_bitmap); + set_bit(enetc_pf_to_port(priv->si->pdev), &epsfp.dev_bitmap); break; case FLOW_BLOCK_UNBIND: - clear_bit(enetc_get_port(priv), &epsfp.dev_bitmap); + clear_bit(enetc_pf_to_port(priv->si->pdev), &epsfp.dev_bitmap); if (!epsfp.dev_bitmap) clean_psfp_all(); break; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c index 9b755a84c2d6..03090ba7e226 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -165,6 +165,11 @@ static int enetc_vf_probe(struct pci_dev *pdev, enetc_init_si_rings_params(priv); + err = enetc_setup_cbdr(priv->dev, &si->hw, ENETC_CBDR_DEFAULT_SIZE, + &si->cbd_ring); + if (err) + goto err_setup_cbdr; + err = enetc_alloc_si_resources(priv); if (err) { dev_err(&pdev->dev, "SI resource alloc failed\n"); @@ -197,6 +202,8 @@ err_config_si: err_alloc_msix: enetc_free_si_resources(priv); err_alloc_si_res: + enetc_teardown_cbdr(&si->cbd_ring); +err_setup_cbdr: si->ndev = NULL; free_netdev(ndev); err_alloc_netdev: @@ -216,6 +223,7 @@ static void enetc_vf_remove(struct pci_dev *pdev) enetc_free_msix(priv); enetc_free_si_resources(priv); + enetc_teardown_cbdr(&si->cbd_ring); free_netdev(si->ndev); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 3db882322b2b..f2065f9d02e6 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -38,6 +38,7 @@ #include <linux/in.h> #include <linux/ip.h> #include <net/ip.h> +#include <net/selftests.h> #include <net/tso.h> #include <linux/tcp.h> #include <linux/udp.h> @@ -1665,6 +1666,7 @@ static void fec_get_mac(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); unsigned char *iap, tmpaddr[ETH_ALEN]; + int ret; /* * try to get mac address in following order: @@ -1680,9 +1682,9 @@ static void fec_get_mac(struct net_device *ndev) if (!is_valid_ether_addr(iap)) { struct device_node *np = fep->pdev->dev.of_node; if (np) { - const char *mac = of_get_mac_address(np); - if (!IS_ERR(mac)) - iap = (unsigned char *) mac; + ret = of_get_mac_address(np, tmpaddr); + if (!ret) + iap = tmpaddr; } } @@ -2048,6 +2050,8 @@ static int fec_enet_mii_probe(struct net_device *ndev) fep->link = 0; fep->full_duplex = 0; + phy_dev->mac_managed_pm = 1; + phy_attached_info(phy_dev); return 0; @@ -2479,6 +2483,9 @@ static void fec_enet_get_strings(struct net_device *netdev, memcpy(data + i * ETH_GSTRING_LEN, fec_stats[i].name, ETH_GSTRING_LEN); break; + case ETH_SS_TEST: + net_selftest_get_strings(data); + break; } } @@ -2487,6 +2494,8 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset) switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(fec_stats); + case ETH_SS_TEST: + return net_selftest_get_count(); default: return -EOPNOTSUPP; } @@ -2738,6 +2747,7 @@ static const struct ethtool_ops fec_enet_ethtool_ops = { .set_wol = fec_enet_set_wol, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, + .self_test = net_selftest, }; static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) @@ -3864,6 +3874,7 @@ static int __maybe_unused fec_resume(struct device *dev) netif_device_attach(ndev); netif_tx_unlock_bh(ndev); napi_enable(&fep->napi); + phy_init_hw(ndev->phydev); phy_start(ndev->phydev); } rtnl_unlock(); diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index b3bad429e03b..02c47658a215 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -813,7 +813,6 @@ static int mpc52xx_fec_probe(struct platform_device *op) const u32 *prop; int prop_size; struct device_node *np = op->dev.of_node; - const char *mac_addr; phys_addr_t rx_fifo; phys_addr_t tx_fifo; @@ -891,10 +890,8 @@ static int mpc52xx_fec_probe(struct platform_device *op) * * First try to read MAC address from DT */ - mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) { - ether_addr_copy(ndev->dev_addr, mac_addr); - } else { + rv = of_get_mac_address(np, ndev->dev_addr); + if (rv) { struct mpc52xx_fec __iomem *fec = priv->fec; /* diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 901749a7a318..46ecb42f2ef8 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -605,7 +605,6 @@ static int mac_probe(struct platform_device *_of_dev) struct platform_device *of_dev; struct resource res; struct mac_priv_s *priv; - const u8 *mac_addr; u32 val; u8 fman_id; phy_interface_t phy_if; @@ -723,11 +722,9 @@ static int mac_probe(struct platform_device *_of_dev) priv->cell_index = (u8)val; /* Get the MAC address */ - mac_addr = of_get_mac_address(mac_node); - if (IS_ERR(mac_addr)) + err = of_get_mac_address(mac_node, mac_dev->addr); + if (err) dev_warn(dev, "of_get_mac_address(%pOF) failed\n", mac_node); - else - ether_addr_copy(mac_dev->addr, mac_addr); /* Get the port handles */ nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL); @@ -853,7 +850,7 @@ static int mac_probe(struct platform_device *_of_dev) if (err < 0) dev_err(dev, "fman_set_mac_active_pause() = %d\n", err); - if (!IS_ERR(mac_addr)) + if (!is_zero_ether_addr(mac_dev->addr)) dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr); priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev); diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 78e008b81374..6ee325ad35c5 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -918,7 +918,6 @@ static int fs_enet_probe(struct platform_device *ofdev) const u32 *data; struct clk *clk; int err; - const u8 *mac_addr; const char *phy_connection_type; int privsize, len, ret = -ENODEV; @@ -1006,9 +1005,7 @@ static int fs_enet_probe(struct platform_device *ofdev) spin_lock_init(&fep->lock); spin_lock_init(&fep->tx_lock); - mac_addr = of_get_mac_address(ofdev->dev.of_node); - if (!IS_ERR(mac_addr)) - ether_addr_copy(ndev->dev_addr, mac_addr); + of_get_mac_address(ofdev->dev.of_node, ndev->dev_addr); ret = fep->ops->allocate_bd(ndev); if (ret) diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 3ec4d9fddd52..f2945abdb041 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -175,10 +175,7 @@ static void gfar_mac_rx_config(struct gfar_private *priv) if (priv->rx_filer_enable) { rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT; /* Program the RIR0 reg with the required distribution */ - if (priv->poll_mode == GFAR_SQ_POLLING) - gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0); - else /* GFAR_MQ_POLLING */ - gfar_write(®s->rir0, DEFAULT_8RXQ_RIR0); + gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0); } /* Restore PROMISC mode */ @@ -521,29 +518,9 @@ static int gfar_parse_group(struct device_node *np, grp->priv = priv; spin_lock_init(&grp->grplock); if (priv->mode == MQ_MG_MODE) { - u32 rxq_mask, txq_mask; - int ret; - + /* One Q per interrupt group: Q0 to G0, Q1 to G1 */ grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); - - ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask); - if (!ret) { - grp->rx_bit_map = rxq_mask ? - rxq_mask : (DEFAULT_MAPPING >> priv->num_grps); - } - - ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask); - if (!ret) { - grp->tx_bit_map = txq_mask ? - txq_mask : (DEFAULT_MAPPING >> priv->num_grps); - } - - if (priv->poll_mode == GFAR_SQ_POLLING) { - /* One Q per interrupt group: Q0 to G0, Q1 to G1 */ - grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); - grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); - } } else { grp->rx_bit_map = 0xFF; grp->tx_bit_map = 0xFF; @@ -640,7 +617,6 @@ static phy_interface_t gfar_get_interface(struct net_device *dev) static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) { const char *model; - const void *mac_addr; int err = 0, i; phy_interface_t interface; struct net_device *dev = NULL; @@ -650,18 +626,15 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) u32 stash_len = 0; u32 stash_idx = 0; unsigned int num_tx_qs, num_rx_qs; - unsigned short mode, poll_mode; + unsigned short mode; if (!np) return -ENODEV; - if (of_device_is_compatible(np, "fsl,etsec2")) { + if (of_device_is_compatible(np, "fsl,etsec2")) mode = MQ_MG_MODE; - poll_mode = GFAR_SQ_POLLING; - } else { + else mode = SQ_SG_MODE; - poll_mode = GFAR_SQ_POLLING; - } if (mode == SQ_SG_MODE) { num_tx_qs = 1; @@ -677,22 +650,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) return -EINVAL; } - if (poll_mode == GFAR_SQ_POLLING) { - num_tx_qs = num_grps; /* one txq per int group */ - num_rx_qs = num_grps; /* one rxq per int group */ - } else { /* GFAR_MQ_POLLING */ - u32 tx_queues, rx_queues; - int ret; - - /* parse the num of HW tx and rx queues */ - ret = of_property_read_u32(np, "fsl,num_tx_queues", - &tx_queues); - num_tx_qs = ret ? 1 : tx_queues; - - ret = of_property_read_u32(np, "fsl,num_rx_queues", - &rx_queues); - num_rx_qs = ret ? 1 : rx_queues; - } + num_tx_qs = num_grps; /* one txq per int group */ + num_rx_qs = num_grps; /* one rxq per int group */ } if (num_tx_qs > MAX_TX_QS) { @@ -718,7 +677,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) priv->ndev = dev; priv->mode = mode; - priv->poll_mode = poll_mode; priv->num_tx_queues = num_tx_qs; netif_set_real_num_rx_queues(dev, num_rx_qs); @@ -782,11 +740,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) if (stash_len || stash_idx) priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; - mac_addr = of_get_mac_address(np); - - if (!IS_ERR(mac_addr)) { - ether_addr_copy(dev->dev_addr, mac_addr); - } else { + err = of_get_mac_address(np, dev->dev_addr); + if (err) { eth_hw_addr_random(dev); dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr); } @@ -2695,106 +2650,6 @@ static int gfar_poll_tx_sq(struct napi_struct *napi, int budget) return 0; } -static int gfar_poll_rx(struct napi_struct *napi, int budget) -{ - struct gfar_priv_grp *gfargrp = - container_of(napi, struct gfar_priv_grp, napi_rx); - struct gfar_private *priv = gfargrp->priv; - struct gfar __iomem *regs = gfargrp->regs; - struct gfar_priv_rx_q *rx_queue = NULL; - int work_done = 0, work_done_per_q = 0; - int i, budget_per_q = 0; - unsigned long rstat_rxf; - int num_act_queues; - - /* Clear IEVENT, so interrupts aren't called again - * because of the packets that have already arrived - */ - gfar_write(®s->ievent, IEVENT_RX_MASK); - - rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK; - - num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS); - if (num_act_queues) - budget_per_q = budget/num_act_queues; - - for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { - /* skip queue if not active */ - if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) - continue; - - rx_queue = priv->rx_queue[i]; - work_done_per_q = - gfar_clean_rx_ring(rx_queue, budget_per_q); - work_done += work_done_per_q; - - /* finished processing this queue */ - if (work_done_per_q < budget_per_q) { - /* clear active queue hw indication */ - gfar_write(®s->rstat, - RSTAT_CLEAR_RXF0 >> i); - num_act_queues--; - - if (!num_act_queues) - break; - } - } - - if (!num_act_queues) { - u32 imask; - napi_complete_done(napi, work_done); - - /* Clear the halt bit in RSTAT */ - gfar_write(®s->rstat, gfargrp->rstat); - - spin_lock_irq(&gfargrp->grplock); - imask = gfar_read(®s->imask); - imask |= IMASK_RX_DEFAULT; - gfar_write(®s->imask, imask); - spin_unlock_irq(&gfargrp->grplock); - } - - return work_done; -} - -static int gfar_poll_tx(struct napi_struct *napi, int budget) -{ - struct gfar_priv_grp *gfargrp = - container_of(napi, struct gfar_priv_grp, napi_tx); - struct gfar_private *priv = gfargrp->priv; - struct gfar __iomem *regs = gfargrp->regs; - struct gfar_priv_tx_q *tx_queue = NULL; - int has_tx_work = 0; - int i; - - /* Clear IEVENT, so interrupts aren't called again - * because of the packets that have already arrived - */ - gfar_write(®s->ievent, IEVENT_TX_MASK); - - for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { - tx_queue = priv->tx_queue[i]; - /* run Tx cleanup to completion */ - if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { - gfar_clean_tx_ring(tx_queue); - has_tx_work = 1; - } - } - - if (!has_tx_work) { - u32 imask; - napi_complete(napi); - - spin_lock_irq(&gfargrp->grplock); - imask = gfar_read(®s->imask); - imask |= IMASK_TX_DEFAULT; - gfar_write(®s->imask, imask); - spin_unlock_irq(&gfargrp->grplock); - } - - return 0; -} - /* GFAR error interrupt handler */ static irqreturn_t gfar_error(int irq, void *grp_id) { @@ -3352,17 +3207,10 @@ static int gfar_probe(struct platform_device *ofdev) /* Register for napi ...We are registering NAPI for each grp */ for (i = 0; i < priv->num_grps; i++) { - if (priv->poll_mode == GFAR_SQ_POLLING) { - netif_napi_add(dev, &priv->gfargrp[i].napi_rx, - gfar_poll_rx_sq, GFAR_DEV_WEIGHT); - netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, - gfar_poll_tx_sq, 2); - } else { - netif_napi_add(dev, &priv->gfargrp[i].napi_rx, - gfar_poll_rx, GFAR_DEV_WEIGHT); - netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, - gfar_poll_tx, 2); - } + netif_napi_add(dev, &priv->gfargrp[i].napi_rx, + gfar_poll_rx_sq, GFAR_DEV_WEIGHT); + netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, + gfar_poll_tx_sq, 2); } if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 8ced783f5302..5ea47df93e5e 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -909,22 +909,6 @@ enum { MQ_MG_MODE }; -/* GFAR_SQ_POLLING: Single Queue NAPI polling mode - * The driver supports a single pair of RX/Tx queues - * per interrupt group (Rx/Tx int line). MQ_MG mode - * devices have 2 interrupt groups, so the device will - * have a total of 2 Tx and 2 Rx queues in this case. - * GFAR_MQ_POLLING: Multi Queue NAPI polling mode - * The driver supports all the 8 Rx and Tx HW queues - * each queue mapped by the Device Tree to one of - * the 2 interrupt groups. This mode implies significant - * processing overhead (CPU and controller level). - */ -enum gfar_poll_mode { - GFAR_SQ_POLLING = 0, - GFAR_MQ_POLLING -}; - /* * Per TX queue stats */ @@ -1105,7 +1089,6 @@ struct gfar_private { unsigned long state; unsigned short mode; - unsigned short poll_mode; unsigned int num_tx_queues; unsigned int num_rx_queues; unsigned int num_grps; diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index ef4e2febeb5b..e0936510fa34 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3562,7 +3562,6 @@ static int ucc_geth_probe(struct platform_device* ofdev) struct resource res; int err, ucc_num, max_speed = 0; const unsigned int *prop; - const void *mac_addr; phy_interface_t phy_interface; static const int enet_to_speed[] = { SPEED_10, SPEED_10, SPEED_10, @@ -3733,9 +3732,7 @@ static int ucc_geth_probe(struct platform_device* ofdev) goto err_free_netdev; } - mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) - ether_addr_copy(dev->dev_addr, mac_addr); + of_get_mac_address(np, dev->dev_addr); ugeth->ug_info = ug_info; ugeth->dev = device; diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index 0901fa6853ca..5fb05cf36b49 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -14,9 +14,9 @@ static void gve_get_drvinfo(struct net_device *netdev, { struct gve_priv *priv = netdev_priv(netdev); - strlcpy(info->driver, "gve", sizeof(info->driver)); - strlcpy(info->version, gve_version_str, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info)); + strscpy(info->driver, "gve", sizeof(info->driver)); + strscpy(info->version, gve_version_str, sizeof(info->version)); + strscpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info)); } static void gve_set_msglevel(struct net_device *netdev, u32 value) @@ -388,7 +388,7 @@ static int gve_set_channels(struct net_device *netdev, gve_get_channels(netdev, &old_settings); - /* Changing combined is not allowed allowed */ + /* Changing combined is not allowed */ if (cmd->combined_count != old_settings.combined_count) return -EINVAL; diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c index 57c3bc4f7089..3c4db4a6b431 100644 --- a/drivers/net/ethernet/hisilicon/hisi_femac.c +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -772,7 +772,6 @@ static int hisi_femac_drv_probe(struct platform_device *pdev) struct net_device *ndev; struct hisi_femac_priv *priv; struct phy_device *phy; - const char *mac_addr; int ret; ndev = alloc_etherdev(sizeof(*priv)); @@ -842,10 +841,8 @@ static int hisi_femac_drv_probe(struct platform_device *pdev) (unsigned long)phy->phy_id, phy_modes(phy->interface)); - mac_addr = of_get_mac_address(node); - if (!IS_ERR(mac_addr)) - ether_addr_copy(ndev->dev_addr, mac_addr); - if (!is_valid_ether_addr(ndev->dev_addr)) { + ret = of_get_mac_address(node, ndev->dev_addr); + if (ret) { eth_hw_addr_random(ndev); dev_warn(dev, "using random MAC address %pM\n", ndev->dev_addr); diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index 8b2bf85039f1..c1aae0fca5e9 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -1098,7 +1098,6 @@ static int hix5hd2_dev_probe(struct platform_device *pdev) struct net_device *ndev; struct hix5hd2_priv *priv; struct mii_bus *bus; - const char *mac_addr; int ret; ndev = alloc_etherdev(sizeof(struct hix5hd2_priv)); @@ -1220,10 +1219,8 @@ static int hix5hd2_dev_probe(struct platform_device *pdev) goto out_phy_node; } - mac_addr = of_get_mac_address(node); - if (!IS_ERR(mac_addr)) - ether_addr_copy(ndev->dev_addr, mac_addr); - if (!is_valid_ether_addr(ndev->dev_addr)) { + ret = of_get_mac_address(node, ndev->dev_addr); + if (ret) { eth_hw_addr_random(ndev); netdev_warn(ndev, "using random MAC address %pM\n", ndev->dev_addr); diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 6ab9458302e1..2b7db1c22321 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -414,10 +414,6 @@ enum hnae_media_type { * get ring bd number limit * get_pauseparam() * get tx and rx of pause frame use - * set_autoneg() - * set auto autonegotiation of pause frame use - * get_autoneg() - * get auto autonegotiation of pause frame use * set_pauseparam() * set tx and rx of pause frame use * get_coalesce_usecs() @@ -487,8 +483,6 @@ struct hnae_ae_ops { u32 *uplimit); void (*get_pauseparam)(struct hnae_handle *handle, u32 *auto_neg, u32 *rx_en, u32 *tx_en); - int (*set_autoneg)(struct hnae_handle *handle, u8 enable); - int (*get_autoneg)(struct hnae_handle *handle); int (*set_pauseparam)(struct hnae_handle *handle, u32 auto_neg, u32 rx_en, u32 tx_en); void (*get_coalesce_usecs)(struct hnae_handle *handle, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index b98244f75ab9..c615fbf9094e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -487,13 +487,6 @@ static void hns_ae_get_pauseparam(struct hnae_handle *handle, hns_dsaf_get_rx_mac_pause_en(dsaf_dev, mac_cb->mac_id, rx_en); } -static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable) -{ - assert(handle); - - return hns_mac_set_autoneg(hns_get_mac_cb(handle), enable); -} - static void hns_ae_set_promisc_mode(struct hnae_handle *handle, u32 en) { struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); @@ -502,17 +495,6 @@ static void hns_ae_set_promisc_mode(struct hnae_handle *handle, u32 en) hns_mac_set_promisc(mac_cb, (u8)!!en); } -static int hns_ae_get_autoneg(struct hnae_handle *handle) -{ - u32 auto_neg; - - assert(handle); - - hns_mac_get_autoneg(hns_get_mac_cb(handle), &auto_neg); - - return auto_neg; -} - static int hns_ae_set_pauseparam(struct hnae_handle *handle, u32 autoneg, u32 rx_en, u32 tx_en) { @@ -648,7 +630,7 @@ static void hns_ae_update_stats(struct hnae_handle *handle, struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); u64 tx_bytes = 0, rx_bytes = 0, tx_packets = 0, rx_packets = 0; u64 rx_errors = 0, tx_errors = 0, tx_dropped = 0; - u64 rx_missed_errors = 0; + u64 rx_missed_errors; dsaf_dev = hns_ae_get_dsaf_dev(handle->dev); if (!dsaf_dev) @@ -965,8 +947,6 @@ static struct hnae_ae_ops hns_dsaf_ops = { .set_loopback = hns_ae_config_loopback, .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit, .get_pauseparam = hns_ae_get_pauseparam, - .set_autoneg = hns_ae_set_autoneg, - .get_autoneg = hns_ae_get_autoneg, .set_pauseparam = hns_ae_set_pauseparam, .get_coalesce_usecs = hns_ae_get_coalesce_usecs, .get_max_coalesced_frames = hns_ae_get_max_coalesced_frames, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 7fb7a419607d..f387a859a201 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c @@ -130,14 +130,6 @@ static void hns_gmac_get_tx_auto_pause_frames(void *mac_drv, u16 *newval) GMAC_FC_TX_TIMER_M, GMAC_FC_TX_TIMER_S); } -static void hns_gmac_set_rx_auto_pause_frames(void *mac_drv, u32 newval) -{ - struct mac_driver *drv = (struct mac_driver *)mac_drv; - - dsaf_set_dev_bit(drv, GMAC_PAUSE_EN_REG, - GMAC_PAUSE_EN_RX_FDFC_B, !!newval); -} - static void hns_gmac_config_max_frame_length(void *mac_drv, u16 newval) { struct mac_driver *drv = (struct mac_driver *)mac_drv; @@ -179,14 +171,6 @@ static void hns_gmac_tx_loop_pkt_dis(void *mac_drv) dsaf_write_dev(drv, GMAC_TX_LOOP_PKT_PRI_REG, tx_loop_pkt_pri); } -static void hns_gmac_set_duplex_type(void *mac_drv, u8 newval) -{ - struct mac_driver *drv = (struct mac_driver *)mac_drv; - - dsaf_set_dev_bit(drv, GMAC_DUPLEX_TYPE_REG, - GMAC_DUPLEX_TYPE_B, !!newval); -} - static void hns_gmac_get_duplex_type(void *mac_drv, enum hns_gmac_duplex_mdoe *duplex_mode) { @@ -687,17 +671,14 @@ static void hns_gmac_get_stats(void *mac_drv, u64 *data) static void hns_gmac_get_strings(u32 stringset, u8 *data) { - char *buff = (char *)data; + u8 *buff = data; u32 i; if (stringset != ETH_SS_STATS) return; - for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) { - snprintf(buff, ETH_GSTRING_LEN, "%s", - g_gmac_stats_string[i].desc); - buff = buff + ETH_GSTRING_LEN; - } + for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) + ethtool_sprintf(&buff, g_gmac_stats_string[i].desc); } static int hns_gmac_get_sset_count(int stringset) @@ -741,8 +722,6 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param) mac_drv->set_an_mode = hns_gmac_config_an_mode; mac_drv->config_loopback = hns_gmac_config_loopback; mac_drv->config_pad_and_crc = hns_gmac_config_pad_and_crc; - mac_drv->config_half_duplex = hns_gmac_set_duplex_type; - mac_drv->set_rx_ignore_pause_frames = hns_gmac_set_rx_auto_pause_frames; mac_drv->get_info = hns_gmac_get_info; mac_drv->autoneg_stat = hns_gmac_autoneg_stat; mac_drv->get_pause_enable = hns_gmac_get_pausefrm_cfg; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 4a448138b4ec..f4cf569a2599 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -1202,7 +1202,7 @@ void hns_mac_get_regs(struct hns_mac_cb *mac_cb, void *data) void hns_set_led_opt(struct hns_mac_cb *mac_cb) { - int nic_data = 0; + int nic_data; int txpkts, rxpkts; txpkts = mac_cb->txpkt_for_led - mac_cb->hw_stats.tx_good_pkts; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index 3278bf471ddf..8943ffab4418 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h @@ -364,12 +364,8 @@ struct mac_driver { void (*config_max_frame_length)(void *mac_drv, u16 newval); /*config PAD and CRC enable */ void (*config_pad_and_crc)(void *mac_drv, u8 newval); - /* config duplex mode*/ - void (*config_half_duplex)(void *mac_drv, u8 newval); /*config tx pause time,if pause_time is zero,disable tx pause enable*/ void (*set_tx_auto_pause_frames)(void *mac_drv, u16 pause_time); - /*config rx pause enable*/ - void (*set_rx_ignore_pause_frames)(void *mac_drv, u32 enable); /* config rx mode for promiscuous*/ void (*set_promiscuous)(void *mac_drv, u8 enable); void (*mac_pausefrm_cfg)(void *mac_drv, u32 rx_en, u32 tx_en); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 87d3db4666df..c2a60612f503 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -1613,7 +1613,7 @@ int hns_dsaf_set_mac_uc_entry( struct dsaf_device *dsaf_dev, struct dsaf_drv_mac_single_dest_entry *mac_entry) { - u16 entry_index = DSAF_INVALID_ENTRY_IDX; + u16 entry_index; struct dsaf_drv_tbl_tcam_key mac_key; struct dsaf_tbl_tcam_ucast_cfg mac_data; struct dsaf_drv_priv *priv = @@ -1679,7 +1679,7 @@ int hns_dsaf_rm_mac_addr( struct dsaf_device *dsaf_dev, struct dsaf_drv_mac_single_dest_entry *mac_entry) { - u16 entry_index = DSAF_INVALID_ENTRY_IDX; + u16 entry_index; struct dsaf_tbl_tcam_ucast_cfg mac_data; struct dsaf_drv_tbl_tcam_key mac_key; @@ -1751,7 +1751,7 @@ static void hns_dsaf_mc_mask_bit_clear(char *dst, const char *src) int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, struct dsaf_drv_mac_single_dest_entry *mac_entry) { - u16 entry_index = DSAF_INVALID_ENTRY_IDX; + u16 entry_index; struct dsaf_drv_tbl_tcam_key mac_key; struct dsaf_drv_tbl_tcam_key mask_key; struct dsaf_tbl_tcam_data *pmask_key = NULL; @@ -1861,7 +1861,7 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id, u8 in_port_num, u8 *addr) { - u16 entry_index = DSAF_INVALID_ENTRY_IDX; + u16 entry_index; struct dsaf_drv_tbl_tcam_key mac_key; struct dsaf_drv_priv *priv = (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev); @@ -1910,7 +1910,7 @@ int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id, int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, struct dsaf_drv_mac_single_dest_entry *mac_entry) { - u16 entry_index = DSAF_INVALID_ENTRY_IDX; + u16 entry_index; struct dsaf_drv_tbl_tcam_key mac_key; struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev); struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl; @@ -2264,7 +2264,7 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num) */ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data) { - u32 i = 0; + u32 i; u32 j; u32 *p = data; u32 reg_tmp; @@ -2768,7 +2768,7 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) struct dsaf_drv_mac_single_dest_entry mask_entry; struct dsaf_drv_tbl_tcam_key temp_key, mask_key; struct dsaf_drv_soft_mac_tbl *soft_mac_entry; - u16 entry_index = DSAF_INVALID_ENTRY_IDX; + u16 entry_index; struct dsaf_drv_tbl_tcam_key mac_key; struct hns_mac_cb *mac_cb; u8 addr[ETH_ALEN] = {0}; @@ -2870,7 +2870,7 @@ static void set_promisc_tcam_disable(struct dsaf_device *dsaf_dev, u32 port) struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, 0}; struct dsaf_tbl_tcam_data tbl_tcam_mask = {0, 0}; struct dsaf_drv_soft_mac_tbl *soft_mac_entry; - u16 entry_index = DSAF_INVALID_ENTRY_IDX; + u16 entry_index; struct dsaf_drv_tbl_tcam_key mac_key; u8 addr[ETH_ALEN] = {0}; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 173d6966c1a3..325e81d30cfd 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -686,7 +686,7 @@ hns_mac_config_sds_loopback_acpi(struct hns_mac_cb *mac_cb, bool en) obj_args[0].integer.type = ACPI_TYPE_INTEGER; obj_args[0].integer.value = mac_cb->mac_id; obj_args[1].integer.type = ACPI_TYPE_INTEGER; - obj_args[1].integer.value = !!en; + obj_args[1].integer.value = en; argv4.type = ACPI_TYPE_PACKAGE; argv4.package.count = 2; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index d0f8b1fff333..ff03cafccb66 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -462,33 +462,22 @@ int hns_ppe_get_regs_count(void) */ void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data) { - char *buff = (char *)data; int index = ppe_cb->index; - - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_sw_pkt", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_ok", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_drop_pkt_no_bd", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_alloc_buf_fail", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_alloc_buf_wait", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_drop_no_buf", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_err_fifo_full", index); - buff = buff + ETH_GSTRING_LEN; - - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_bd", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_ok", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_err_fifo_empty", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_err_csum_fail", index); + u8 *buff = data; + + ethtool_sprintf(&buff, "ppe%d_rx_sw_pkt", index); + ethtool_sprintf(&buff, "ppe%d_rx_pkt_ok", index); + ethtool_sprintf(&buff, "ppe%d_rx_drop_pkt_no_bd", index); + ethtool_sprintf(&buff, "ppe%d_rx_alloc_buf_fail", index); + ethtool_sprintf(&buff, "ppe%d_rx_alloc_buf_wait", index); + ethtool_sprintf(&buff, "ppe%d_rx_pkt_drop_no_buf", index); + ethtool_sprintf(&buff, "ppe%d_rx_pkt_err_fifo_full", index); + + ethtool_sprintf(&buff, "ppe%d_tx_bd", index); + ethtool_sprintf(&buff, "ppe%d_tx_pkt", index); + ethtool_sprintf(&buff, "ppe%d_tx_pkt_ok", index); + ethtool_sprintf(&buff, "ppe%d_tx_pkt_err_fifo_empty", index); + ethtool_sprintf(&buff, "ppe%d_tx_pkt_err_csum_fail", index); } void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index b6c8910cf7ba..5d5dc6942232 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -929,69 +929,42 @@ int hns_rcb_get_ring_regs_count(void) */ void hns_rcb_get_strings(int stringset, u8 *data, int index) { - char *buff = (char *)data; + u8 *buff = data; if (stringset != ETH_SS_STATS) return; - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_rcb_pkt_num", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_tx_pkt_num", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_drop_pkt_num", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_fbd_num", index); - buff = buff + ETH_GSTRING_LEN; - - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_pkt_num", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_bytes", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_err_cnt", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_io_err", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_sw_err", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_seg_pkt", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_restart_queue", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_tx_busy", index); - buff = buff + ETH_GSTRING_LEN; - - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_rcb_pkt_num", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_pkt_num", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_drop_pkt_num", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_fbd_num", index); - buff = buff + ETH_GSTRING_LEN; - - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_pkt_num", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bytes", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_err_cnt", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_io_err", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_sw_err", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_seg_pkt", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_reuse_pg", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_len_err", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_non_vld_desc_err", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bd_num_err", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l2_err", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l3l4csum_err", index); + ethtool_sprintf(&buff, "tx_ring%d_rcb_pkt_num", index); + ethtool_sprintf(&buff, "tx_ring%d_ppe_tx_pkt_num", index); + ethtool_sprintf(&buff, "tx_ring%d_ppe_drop_pkt_num", index); + ethtool_sprintf(&buff, "tx_ring%d_fbd_num", index); + + ethtool_sprintf(&buff, "tx_ring%d_pkt_num", index); + ethtool_sprintf(&buff, "tx_ring%d_bytes", index); + ethtool_sprintf(&buff, "tx_ring%d_err_cnt", index); + ethtool_sprintf(&buff, "tx_ring%d_io_err", index); + ethtool_sprintf(&buff, "tx_ring%d_sw_err", index); + ethtool_sprintf(&buff, "tx_ring%d_seg_pkt", index); + ethtool_sprintf(&buff, "tx_ring%d_restart_queue", index); + ethtool_sprintf(&buff, "tx_ring%d_tx_busy", index); + + ethtool_sprintf(&buff, "rx_ring%d_rcb_pkt_num", index); + ethtool_sprintf(&buff, "rx_ring%d_ppe_pkt_num", index); + ethtool_sprintf(&buff, "rx_ring%d_ppe_drop_pkt_num", index); + ethtool_sprintf(&buff, "rx_ring%d_fbd_num", index); + + ethtool_sprintf(&buff, "rx_ring%d_pkt_num", index); + ethtool_sprintf(&buff, "rx_ring%d_bytes", index); + ethtool_sprintf(&buff, "rx_ring%d_err_cnt", index); + ethtool_sprintf(&buff, "rx_ring%d_io_err", index); + ethtool_sprintf(&buff, "rx_ring%d_sw_err", index); + ethtool_sprintf(&buff, "rx_ring%d_seg_pkt", index); + ethtool_sprintf(&buff, "rx_ring%d_reuse_pg", index); + ethtool_sprintf(&buff, "rx_ring%d_len_err", index); + ethtool_sprintf(&buff, "rx_ring%d_non_vld_desc_err", index); + ethtool_sprintf(&buff, "rx_ring%d_bd_num_err", index); + ethtool_sprintf(&buff, "rx_ring%d_l2_err", index); + ethtool_sprintf(&buff, "rx_ring%d_l3l4csum_err", index); } void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) @@ -1001,7 +974,7 @@ void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) bool is_dbg = HNS_DSAF_IS_DEBUG(rcb_com->dsaf_dev); u32 reg_tmp; u32 reg_num_tmp; - u32 i = 0; + u32 i; /*rcb common registers */ regs[0] = dsaf_read_dev(rcb_com, RCB_COM_CFG_ENDIAN_REG); @@ -1072,7 +1045,7 @@ void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data) u32 *regs = data; struct ring_pair_cb *ring_pair = container_of(queue, struct ring_pair_cb, q); - u32 i = 0; + u32 i; /*rcb ring registers */ regs[0] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_L_REG); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c index 7e3609ce112a..be52acd448f9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c @@ -267,19 +267,6 @@ static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, char *mac_addr) } /** - *hns_xgmac_set_rx_ignore_pause_frames - set rx pause param about xgmac - *@mac_drv: mac driver - *@enable:enable rx pause param - */ -static void hns_xgmac_set_rx_ignore_pause_frames(void *mac_drv, u32 enable) -{ - struct mac_driver *drv = (struct mac_driver *)mac_drv; - - dsaf_set_dev_bit(drv, XGMAC_MAC_PAUSE_CTRL_REG, - XGMAC_PAUSE_CTL_RX_B, !!enable); -} - -/** *hns_xgmac_set_tx_auto_pause_frames - set tx pause param about xgmac *@mac_drv: mac driver *@enable:enable tx pause param @@ -495,7 +482,7 @@ static void hns_xgmac_get_link_status(void *mac_drv, u32 *link_stat) */ static void hns_xgmac_get_regs(void *mac_drv, void *data) { - u32 i = 0; + u32 i; struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 *regs = data; u64 qtmp; @@ -758,16 +745,14 @@ static void hns_xgmac_get_stats(void *mac_drv, u64 *data) */ static void hns_xgmac_get_strings(u32 stringset, u8 *data) { - char *buff = (char *)data; + u8 *buff = data; u32 i; if (stringset != ETH_SS_STATS) return; - for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) { - snprintf(buff, ETH_GSTRING_LEN, g_xgmac_stats_string[i].desc); - buff = buff + ETH_GSTRING_LEN; - } + for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) + ethtool_sprintf(&buff, g_xgmac_stats_string[i].desc); } /** @@ -814,9 +799,6 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param) mac_drv->set_an_mode = NULL; mac_drv->config_loopback = NULL; mac_drv->config_pad_and_crc = hns_xgmac_config_pad_and_crc; - mac_drv->config_half_duplex = NULL; - mac_drv->set_rx_ignore_pause_frames = - hns_xgmac_set_rx_ignore_pause_frames; mac_drv->mac_free = hns_xgmac_free; mac_drv->adjust_link = NULL; mac_drv->set_tx_auto_pause_frames = hns_xgmac_set_tx_auto_pause_frames; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index c66a7a51198e..5e349c0bdecc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -872,7 +872,7 @@ out: static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data) { struct hnae_ring *ring = ring_data->ring; - int num = 0; + int num; bool rx_stopped; hns_update_rx_rate(ring); @@ -1235,7 +1235,7 @@ static int hns_nic_init_affinity_mask(int q_num, int ring_idx, { int cpu; - /* Diffrent irq banlance between 16core and 32core. + /* Different irq balance between 16core and 32core. * The cpu mask set by ring index according to the ring flag * which indicate the ring is tx or rx. */ @@ -1592,7 +1592,7 @@ static void hns_disable_serdes_lb(struct net_device *ndev) * which buffer size is 4096. * 2. we set the chip serdes loopback and set rss indirection to the ring. * 3. construct 64-bytes ip broadcast packages, wait the associated rx ring - * recieving all packages and it will fetch new descriptions. + * receiving all packages and it will fetch new descriptions. * 4. recover to the original state. * *@ndev: net device @@ -1621,7 +1621,7 @@ static int hns_nic_clear_all_rx_fetch(struct net_device *ndev) if (!org_indir) return -ENOMEM; - /* store the orginal indirection */ + /* store the original indirection */ ops->get_rss(h, org_indir, NULL, NULL); cur_indir = kzalloc(indir_size, GFP_KERNEL); @@ -1881,7 +1881,7 @@ static void hns_nic_set_rx_mode(struct net_device *ndev) static void hns_nic_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats) { - int idx = 0; + int idx; u64 tx_bytes = 0; u64 rx_bytes = 0; u64 tx_pkts = 0; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index a6e3f07caf99..da48c05435ea 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -17,7 +17,6 @@ #define HNS_PHY_CSC_REG 16 /* Copper Specific Control Register */ #define HNS_PHY_CSS_REG 17 /* Copper Specific Status Register */ #define HNS_LED_FC_REG 16 /* LED Function Control Reg. */ -#define HNS_LED_PC_REG 17 /* LED Polarity Control Reg. */ #define HNS_LED_FORCE_ON 9 #define HNS_LED_FORCE_OFF 8 @@ -480,7 +479,7 @@ static int __lb_run_test(struct net_device *ndev, #define NIC_LB_TEST_NO_MEM_ERR 1 #define NIC_LB_TEST_TX_CNT_ERR 2 #define NIC_LB_TEST_RX_CNT_ERR 3 -#define NIC_LB_TEST_RX_PKG_ERR 4 + struct hns_nic_priv *priv = netdev_priv(ndev); struct hnae_handle *h = priv->ae_handle; int i, j, lc, good_cnt, ret_val = 0; @@ -895,7 +894,7 @@ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct hns_nic_priv *priv = netdev_priv(netdev); struct hnae_handle *h = priv->ae_handle; - char *buff = (char *)data; + u8 *buff = data; if (!h->dev->ops->get_strings) { netdev_err(netdev, "h->dev->ops->get_strings is null!\n"); @@ -903,74 +902,45 @@ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data) } if (stringset == ETH_SS_TEST) { - if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII) { - memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_MAC], - ETH_GSTRING_LEN); - buff += ETH_GSTRING_LEN; - } - memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_SERDES], - ETH_GSTRING_LEN); - buff += ETH_GSTRING_LEN; + if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII) + ethtool_sprintf(&buff, + hns_nic_test_strs[MAC_INTERNALLOOP_MAC]); + ethtool_sprintf(&buff, + hns_nic_test_strs[MAC_INTERNALLOOP_SERDES]); if ((netdev->phydev) && (!netdev->phydev->is_c45)) - memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_PHY], - ETH_GSTRING_LEN); + ethtool_sprintf(&buff, + hns_nic_test_strs[MAC_INTERNALLOOP_PHY]); } else { - snprintf(buff, ETH_GSTRING_LEN, "rx_packets"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_packets"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_bytes"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_bytes"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_dropped"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_dropped"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "multicast"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "collisions"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_over_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_crc_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_frame_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_fifo_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_missed_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_aborted_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_carrier_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_fifo_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_heartbeat_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_length_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_window_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_compressed"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_compressed"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "netdev_rx_dropped"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "netdev_tx_dropped"); - buff = buff + ETH_GSTRING_LEN; - - snprintf(buff, ETH_GSTRING_LEN, "netdev_tx_timeout"); - buff = buff + ETH_GSTRING_LEN; - - h->dev->ops->get_strings(h, stringset, (u8 *)buff); + ethtool_sprintf(&buff, "rx_packets"); + ethtool_sprintf(&buff, "tx_packets"); + ethtool_sprintf(&buff, "rx_bytes"); + ethtool_sprintf(&buff, "tx_bytes"); + ethtool_sprintf(&buff, "rx_errors"); + ethtool_sprintf(&buff, "tx_errors"); + ethtool_sprintf(&buff, "rx_dropped"); + ethtool_sprintf(&buff, "tx_dropped"); + ethtool_sprintf(&buff, "multicast"); + ethtool_sprintf(&buff, "collisions"); + ethtool_sprintf(&buff, "rx_over_errors"); + ethtool_sprintf(&buff, "rx_crc_errors"); + ethtool_sprintf(&buff, "rx_frame_errors"); + ethtool_sprintf(&buff, "rx_fifo_errors"); + ethtool_sprintf(&buff, "rx_missed_errors"); + ethtool_sprintf(&buff, "tx_aborted_errors"); + ethtool_sprintf(&buff, "tx_carrier_errors"); + ethtool_sprintf(&buff, "tx_fifo_errors"); + ethtool_sprintf(&buff, "tx_heartbeat_errors"); + ethtool_sprintf(&buff, "rx_length_errors"); + ethtool_sprintf(&buff, "tx_window_errors"); + ethtool_sprintf(&buff, "rx_compressed"); + ethtool_sprintf(&buff, "tx_compressed"); + ethtool_sprintf(&buff, "netdev_rx_dropped"); + ethtool_sprintf(&buff, "netdev_tx_dropped"); + + ethtool_sprintf(&buff, "netdev_tx_timeout"); + + h->dev->ops->get_strings(h, stringset, buff); } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 33defa4c180a..a2c17af57fde 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -172,4 +172,7 @@ struct hclgevf_mbx_arq_ring { (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM) #define hclge_mbx_head_ptr_move_arq(arq) \ (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM) + +/* PF immediately push link status to VFs when link status changed */ +#define HCLGE_MBX_PUSH_LINK_STATUS_EN BIT(0) #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index e9e60a935f40..1d2189047781 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -90,6 +90,7 @@ enum HNAE3_DEV_CAP_BITS { HNAE3_DEV_SUPPORT_HW_PAD_B, HNAE3_DEV_SUPPORT_STASH_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, + HNAE3_DEV_SUPPORT_PAUSE_B, }; #define hnae3_dev_fd_supported(hdev) \ @@ -134,6 +135,9 @@ enum HNAE3_DEV_CAP_BITS { #define hnae3_dev_stash_supported(hdev) \ test_bit(HNAE3_DEV_SUPPORT_STASH_B, (hdev)->ae_dev->caps) +#define hnae3_dev_pause_supported(hdev) \ + test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, (hdev)->ae_dev->caps) + #define hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev) \ test_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, (ae_dev)->caps) @@ -470,8 +474,9 @@ struct hnae3_ae_dev { struct hnae3_ae_ops { int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev); void (*uninit_ae_dev)(struct hnae3_ae_dev *ae_dev); - void (*flr_prepare)(struct hnae3_ae_dev *ae_dev); - void (*flr_done)(struct hnae3_ae_dev *ae_dev); + void (*reset_prepare)(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type); + void (*reset_done)(struct hnae3_ae_dev *ae_dev); int (*init_client_instance)(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev); void (*uninit_client_instance)(struct hnae3_client *client, @@ -575,7 +580,7 @@ struct hnae3_ae_ops { int vector_num, struct hnae3_ring_chain_node *vr_chain); - int (*reset_queue)(struct hnae3_handle *handle, u16 queue_id); + int (*reset_queue)(struct hnae3_handle *handle); u32 (*get_fw_version)(struct hnae3_handle *handle); void (*get_mdix_mode)(struct hnae3_handle *handle, u8 *tp_mdix_ctrl, u8 *tp_mdix); @@ -608,8 +613,6 @@ struct hnae3_ae_ops { struct ethtool_rxnfc *cmd); int (*del_fd_entry)(struct hnae3_handle *handle, struct ethtool_rxnfc *cmd); - void (*del_all_fd_entries)(struct hnae3_handle *handle, - bool clear_list); int (*get_fd_rule_cnt)(struct hnae3_handle *handle, struct ethtool_rxnfc *cmd); int (*get_fd_rule_info)(struct hnae3_handle *handle, @@ -649,6 +652,10 @@ struct hnae3_ae_ops { int (*del_cls_flower)(struct hnae3_handle *handle, struct flow_cls_offload *cls_flower); bool (*cls_flower_active)(struct hnae3_handle *handle); + int (*get_phy_link_ksettings)(struct hnae3_handle *handle, + struct ethtool_link_ksettings *cmd); + int (*set_phy_link_ksettings)(struct hnae3_handle *handle, + const struct ethtool_link_ksettings *cmd); }; struct hnae3_dcb_ops { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index dd11c57027bb..9d702bd0c7c1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -362,6 +362,11 @@ static void hns3_dbg_dev_caps(struct hnae3_handle *h) dev_info(&h->pdev->dev, "support UDP tunnel csum: %s\n", test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, caps) ? "yes" : "no"); + dev_info(&h->pdev->dev, "support PAUSE: %s\n", + test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps) ? + "yes" : "no"); + dev_info(&h->pdev->dev, "support imp-controlled PHY: %s\n", + test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, caps) ? "yes" : "no"); } static void hns3_dbg_dev_specs(struct hnae3_handle *h) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index bf4302a5cf95..c21dd11baed9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -210,7 +210,6 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, * Rl defines rate of interrupts i.e. number of interrupts-per-second * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing */ - if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable && !tqp_vector->rx_group.coal.adapt_enable) /* According to the hardware, the range of rl_reg is @@ -695,7 +694,7 @@ void hns3_enable_vlan_filter(struct net_device *netdev, bool enable) } static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs, - u16 *mss, u32 *type_cs_vlan_tso) + u16 *mss, u32 *type_cs_vlan_tso, u32 *send_bytes) { u32 l4_offset, hdr_len; union l3_hdr_info l3; @@ -751,6 +750,8 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs, (__force __wsum)htonl(l4_paylen)); } + *send_bytes = (skb_shinfo(skb)->gso_segs - 1) * hdr_len + skb->len; + /* find the txbd field values */ *paylen_fdop_ol4cs = skb->len - hdr_len; hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1); @@ -883,7 +884,6 @@ static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto, hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_NO_CSUM); - } else if (skb->protocol == htons(ETH_P_IPV6)) { hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); @@ -1078,7 +1078,8 @@ static bool hns3_check_hw_tx_csum(struct sk_buff *skb) } static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, - struct sk_buff *skb, struct hns3_desc *desc) + struct sk_buff *skb, struct hns3_desc *desc, + struct hns3_desc_cb *desc_cb) { u32 ol_type_vlan_len_msec = 0; u32 paylen_ol4cs = skb->len; @@ -1107,6 +1108,8 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, 1); } + desc_cb->send_bytes = skb->len; + if (skb->ip_summed == CHECKSUM_PARTIAL) { u8 ol4_proto, il4_proto; @@ -1142,7 +1145,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, } ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum, - &type_cs_vlan_tso); + &type_cs_vlan_tso, &desc_cb->send_bytes); if (unlikely(ret < 0)) { u64_stats_update_begin(&ring->syncp); ring->stats.tx_tso_err++; @@ -1277,31 +1280,29 @@ static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size, } static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size, - u8 max_non_tso_bd_num) + u8 max_non_tso_bd_num, unsigned int bd_num, + unsigned int recursion_level) { +#define HNS3_MAX_RECURSION_LEVEL 24 + struct sk_buff *frag_skb; - unsigned int bd_num = 0; /* If the total len is within the max bd limit */ - if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) && + if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level && + !skb_has_frag_list(skb) && skb_shinfo(skb)->nr_frags < max_non_tso_bd_num)) return skb_shinfo(skb)->nr_frags + 1U; - /* The below case will always be linearized, return - * HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized. - */ - if (unlikely(skb->len > HNS3_MAX_TSO_SIZE || - (!skb_is_gso(skb) && skb->len > - HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num)))) - return HNS3_MAX_TSO_BD_NUM + 1U; + if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL)) + return UINT_MAX; bd_num = hns3_skb_bd_num(skb, bd_size, bd_num); - if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM) return bd_num; skb_walk_frags(skb, frag_skb) { - bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num); + bd_num = hns3_tx_bd_num(frag_skb, bd_size, max_non_tso_bd_num, + bd_num, recursion_level + 1); if (bd_num > HNS3_MAX_TSO_BD_NUM) return bd_num; } @@ -1361,6 +1362,43 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size) size[i] = skb_frag_size(&shinfo->frags[i]); } +static int hns3_skb_linearize(struct hns3_enet_ring *ring, + struct sk_buff *skb, + u8 max_non_tso_bd_num, + unsigned int bd_num) +{ + /* 'bd_num == UINT_MAX' means the skb' fraglist has a + * recursion level of over HNS3_MAX_RECURSION_LEVEL. + */ + if (bd_num == UINT_MAX) { + u64_stats_update_begin(&ring->syncp); + ring->stats.over_max_recursion++; + u64_stats_update_end(&ring->syncp); + return -ENOMEM; + } + + /* The skb->len has exceeded the hw limitation, linearization + * will not help. + */ + if (skb->len > HNS3_MAX_TSO_SIZE || + (!skb_is_gso(skb) && skb->len > + HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) { + u64_stats_update_begin(&ring->syncp); + ring->stats.hw_limitation++; + u64_stats_update_end(&ring->syncp); + return -ENOMEM; + } + + if (__skb_linearize(skb)) { + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + return -ENOMEM; + } + + return 0; +} + static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, struct net_device *netdev, struct sk_buff *skb) @@ -1370,7 +1408,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U]; unsigned int bd_num; - bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num); + bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num, 0, 0); if (unlikely(bd_num > max_non_tso_bd_num)) { if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) && !hns3_skb_need_linearized(skb, bd_size, bd_num, @@ -1379,16 +1417,11 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, goto out; } - if (__skb_linearize(skb)) + if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num, + bd_num)) return -ENOMEM; bd_num = hns3_tx_bd_count(skb->len); - if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) || - (!skb_is_gso(skb) && - bd_num > max_non_tso_bd_num)) { - trace_hns3_over_max_bd(skb); - return -ENOMEM; - } u64_stats_update_begin(&ring->syncp); ring->stats.tx_copy++; @@ -1412,6 +1445,10 @@ out: return bd_num; } + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_busy++; + u64_stats_update_end(&ring->syncp); + return -EBUSY; } @@ -1459,6 +1496,7 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring, struct sk_buff *skb, enum hns_desc_type type) { unsigned int size = skb_headlen(skb); + struct sk_buff *frag_skb; int i, ret, bd_num = 0; if (size) { @@ -1483,6 +1521,15 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring, bd_num += ret; } + skb_walk_frags(skb, frag_skb) { + ret = hns3_fill_skb_to_desc(ring, frag_skb, + DESC_TYPE_FRAGLIST_SKB); + if (unlikely(ret < 0)) + return ret; + + bd_num += ret; + } + return bd_num; } @@ -1511,16 +1558,20 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; struct netdev_queue *dev_queue; int pre_ntu, next_to_use_head; - struct sk_buff *frag_skb; - int bd_num = 0; bool doorbell; int ret; /* Hardware can only handle short frames above 32 bytes */ if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) { hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); + + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + return NETDEV_TX_OK; } @@ -1530,15 +1581,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) ret = hns3_nic_maybe_stop_tx(ring, netdev, skb); if (unlikely(ret <= 0)) { if (ret == -EBUSY) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_busy++; - u64_stats_update_end(&ring->syncp); hns3_tx_doorbell(ring, 0, true); return NETDEV_TX_BUSY; - } else if (ret == -ENOMEM) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); } hns3_rl_err(netdev, "xmit error: %d!\n", ret); @@ -1547,25 +1591,19 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) next_to_use_head = ring->next_to_use; - ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]); + ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use], + desc_cb); if (unlikely(ret < 0)) goto fill_err; + /* 'ret < 0' means filling error, 'ret == 0' means skb->len is + * zero, which is unlikely, and 'ret > 0' means how many tx desc + * need to be notified to the hw. + */ ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); - if (unlikely(ret < 0)) + if (unlikely(ret <= 0)) goto fill_err; - bd_num += ret; - - skb_walk_frags(skb, frag_skb) { - ret = hns3_fill_skb_to_desc(ring, frag_skb, - DESC_TYPE_FRAGLIST_SKB); - if (unlikely(ret < 0)) - goto fill_err; - - bd_num += ret; - } - pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) : (ring->desc_num - 1); ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |= @@ -1574,9 +1612,9 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) /* Complete translate all packets */ dev_queue = netdev_get_tx_queue(netdev, ring->queue_index); - doorbell = __netdev_tx_sent_queue(dev_queue, skb->len, + doorbell = __netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes, netdev_xmit_more()); - hns3_tx_doorbell(ring, bd_num, doorbell); + hns3_tx_doorbell(ring, ret, doorbell); return NETDEV_TX_OK; @@ -1748,11 +1786,15 @@ static void hns3_nic_get_stats64(struct net_device *netdev, tx_drop += ring->stats.tx_l4_proto_err; tx_drop += ring->stats.tx_l2l3l4_err; tx_drop += ring->stats.tx_tso_err; + tx_drop += ring->stats.over_max_recursion; + tx_drop += ring->stats.hw_limitation; tx_errors += ring->stats.sw_err_cnt; tx_errors += ring->stats.tx_vlan_err; tx_errors += ring->stats.tx_l4_proto_err; tx_errors += ring->stats.tx_l2l3l4_err; tx_errors += ring->stats.tx_tso_err; + tx_errors += ring->stats.over_max_recursion; + tx_errors += ring->stats.hw_limitation; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); /* fetch the rx stats */ @@ -2323,6 +2365,32 @@ static void hns3_shutdown(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D3hot); } +static int __maybe_unused hns3_suspend(struct device *dev) +{ + struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev); + + if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { + dev_info(dev, "Begin to suspend.\n"); + if (ae_dev->ops && ae_dev->ops->reset_prepare) + ae_dev->ops->reset_prepare(ae_dev, HNAE3_FUNC_RESET); + } + + return 0; +} + +static int __maybe_unused hns3_resume(struct device *dev) +{ + struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev); + + if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { + dev_info(dev, "Begin to resume.\n"); + if (ae_dev->ops && ae_dev->ops->reset_done) + ae_dev->ops->reset_done(ae_dev); + } + + return 0; +} + static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { @@ -2381,8 +2449,8 @@ static void hns3_reset_prepare(struct pci_dev *pdev) struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); dev_info(&pdev->dev, "FLR prepare\n"); - if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare) - ae_dev->ops->flr_prepare(ae_dev); + if (ae_dev && ae_dev->ops && ae_dev->ops->reset_prepare) + ae_dev->ops->reset_prepare(ae_dev, HNAE3_FLR_RESET); } static void hns3_reset_done(struct pci_dev *pdev) @@ -2390,8 +2458,8 @@ static void hns3_reset_done(struct pci_dev *pdev) struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); dev_info(&pdev->dev, "FLR done\n"); - if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done) - ae_dev->ops->flr_done(ae_dev); + if (ae_dev && ae_dev->ops && ae_dev->ops->reset_done) + ae_dev->ops->reset_done(ae_dev); } static const struct pci_error_handlers hns3_err_handler = { @@ -2401,12 +2469,15 @@ static const struct pci_error_handlers hns3_err_handler = { .reset_done = hns3_reset_done, }; +static SIMPLE_DEV_PM_OPS(hns3_pm_ops, hns3_suspend, hns3_resume); + static struct pci_driver hns3_driver = { .name = hns3_driver_name, .id_table = hns3_pci_tbl, .probe = hns3_probe, .remove = hns3_remove, .shutdown = hns3_shutdown, + .driver.pm = &hns3_pm_ops, .sriov_configure = hns3_pci_sriov_configure, .err_handler = &hns3_err_handler, }; @@ -2691,8 +2762,12 @@ static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, break; desc_cb = &ring->desc_cb[ntc]; - (*pkts) += (desc_cb->type == DESC_TYPE_SKB); - (*bytes) += desc_cb->length; + + if (desc_cb->type == DESC_TYPE_SKB) { + (*pkts)++; + (*bytes) += desc_cb->send_bytes; + } + /* desc_cb will be cleaned, after hnae3_free_buffer_detach */ hns3_free_buffer_detach(ring, ntc, budget); @@ -2965,7 +3040,6 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, HNS3_RXD_L3ID_S); l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S); - /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ if ((l3_type == HNS3_L3_TYPE_IPV4 || l3_type == HNS3_L3_TYPE_IPV6) && @@ -3295,7 +3369,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring) if (!skb) { bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - /* Check valid BD */ if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) return -ENXIO; @@ -3557,7 +3630,6 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) hns3_for_each_ring(ring, tqp_vector->rx_group) { int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, hns3_rx_skb); - if (rx_cleaned >= rx_budget) clean_complete = false; @@ -3704,7 +3776,6 @@ static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv) static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) { - struct hnae3_ring_chain_node vector_ring_chain; struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_tqp_vector *tqp_vector; int ret; @@ -3736,6 +3807,8 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) } for (i = 0; i < priv->vector_num; i++) { + struct hnae3_ring_chain_node vector_ring_chain; + tqp_vector = &priv->tqp_vector[i]; tqp_vector->rx_group.total_bytes = 0; @@ -4024,7 +4097,6 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring) hns3_buf_size2type(ring->buf_size)); hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, ring->desc_num / 8 - 1); - } else { hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, (u32)dma); @@ -4143,14 +4215,6 @@ static void hns3_uninit_phy(struct net_device *netdev) h->ae_algo->ops->mac_disconnect_phy(h); } -static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (h->ae_algo->ops->del_all_fd_entries) - h->ae_algo->ops->del_all_fd_entries(h, clear_list); -} - static int hns3_client_start(struct hnae3_handle *handle) { if (!handle->ae_algo->ops->client_start) @@ -4337,8 +4401,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) hns3_nic_uninit_irq(priv); - hns3_del_all_fd_rules(netdev, true); - hns3_clear_all_ring(handle, true); hns3_nic_uninit_vector_data(priv); @@ -4472,11 +4534,11 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h) int i, j; int ret; - for (i = 0; i < h->kinfo.num_tqps; i++) { - ret = h->ae_algo->ops->reset_queue(h, i); - if (ret) - return ret; + ret = h->ae_algo->ops->reset_queue(h); + if (ret) + return ret; + for (i = 0; i < h->kinfo.num_tqps; i++) { hns3_init_ring_hw(&priv->ring[i]); /* We need to clear tx ring here because self test will diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index d069b04ee587..daa04aeb0942 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -298,7 +298,12 @@ struct hns3_desc_cb { /* priv data for the desc, e.g. skb when use with ip stack */ void *priv; - u32 page_offset; + + union { + u32 page_offset; /* for rx */ + u32 send_bytes; /* for tx */ + }; + u32 length; /* length of the buffer */ u16 reuse_flag; @@ -376,6 +381,8 @@ struct ring_stats { u64 tx_l4_proto_err; u64 tx_l2l3l4_err; u64 tx_tso_err; + u64 over_max_recursion; + u64 hw_limitation; }; struct { u64 rx_pkts; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index adcec4ea7cb9..b48faf769b1c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -44,6 +44,8 @@ static const struct hns3_stats hns3_txq_stats[] = { HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err), HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err), HNS3_TQP_STAT("tso_err", tx_tso_err), + HNS3_TQP_STAT("over_max_recursion", over_max_recursion), + HNS3_TQP_STAT("hw_limitation", hw_limitation), }; #define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats) @@ -307,7 +309,7 @@ out: } /** - * hns3_nic_self_test - self test + * hns3_self_test - self test * @ndev: net device * @eth_test: test cmd * @data: test result @@ -642,6 +644,10 @@ static void hns3_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *param) { struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + + if (!test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps)) + return; if (h->ae_algo->ops->get_pauseparam) h->ae_algo->ops->get_pauseparam(h, ¶m->autoneg, @@ -652,6 +658,10 @@ static int hns3_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *param) { struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + + if (!test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps)) + return -EOPNOTSUPP; netif_dbg(h, drv, netdev, "set pauseparam: autoneg=%u, rx:%u, tx:%u\n", @@ -692,6 +702,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); const struct hnae3_ae_ops *ops; u8 module_type; u8 media_type; @@ -722,7 +733,10 @@ static int hns3_get_link_ksettings(struct net_device *netdev, break; case HNAE3_MEDIA_TYPE_COPPER: cmd->base.port = PORT_TP; - if (!netdev->phydev) + if (test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps) && + ops->get_phy_link_ksettings) + ops->get_phy_link_ksettings(h, cmd); + else if (!netdev->phydev) hns3_get_ksettings(h, cmd); else phy_ethtool_ksettings_get(netdev->phydev, cmd); @@ -815,6 +829,9 @@ static int hns3_set_link_ksettings(struct net_device *netdev, return -EINVAL; return phy_ethtool_ksettings_set(netdev->phydev, cmd); + } else if (test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps) && + ops->set_phy_link_ksettings) { + return ops->set_phy_link_ksettings(handle, cmd); } if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 1bd0ddfaec4d..76a482456f1f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -353,7 +353,10 @@ static void hclge_set_default_capability(struct hclge_dev *hdev) set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps); set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps); - set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); + if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) { + set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); + set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps); + } } static void hclge_parse_capability(struct hclge_dev *hdev, @@ -363,7 +366,6 @@ static void hclge_parse_capability(struct hclge_dev *hdev, u32 caps; caps = __le32_to_cpu(cmd->caps[0]); - if (hnae3_get_bit(caps, HCLGE_CAP_UDP_GSO_B)) set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps); if (hnae3_get_bit(caps, HCLGE_CAP_PTP_B)) @@ -378,6 +380,12 @@ static void hclge_parse_capability(struct hclge_dev *hdev, set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps); if (hnae3_get_bit(caps, HCLGE_CAP_FD_FORWARD_TC_B)) set_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps); + if (hnae3_get_bit(caps, HCLGE_CAP_FEC_B)) + set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); + if (hnae3_get_bit(caps, HCLGE_CAP_PAUSE_B)) + set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps); + if (hnae3_get_bit(caps, HCLGE_CAP_PHY_IMP_B)) + set_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps); } static __le32 hclge_build_api_caps(void) @@ -467,6 +475,8 @@ static int hclge_firmware_compat_config(struct hclge_dev *hdev) hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1); hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1); + if (hnae3_dev_phy_imp_supported(hdev)) + hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1); req->compat = cpu_to_le32(compat); return hclge_cmd_send(&hdev->hw, &desc, 1); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 057dda735492..c6fc22e29581 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -127,7 +127,7 @@ enum hclge_opcode_type { HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310, HCLGE_OPC_MAC_TNL_INT_EN = 0x0311, HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312, - HCLGE_OPC_SERDES_LOOPBACK = 0x0315, + HCLGE_OPC_COMMON_LOOPBACK = 0x0315, HCLGE_OPC_CONFIG_FEC_MODE = 0x031A, /* PFC/Pause commands */ @@ -243,6 +243,7 @@ enum hclge_opcode_type { HCLGE_OPC_FD_KEY_CONFIG = 0x1202, HCLGE_OPC_FD_TCAM_OP = 0x1203, HCLGE_OPC_FD_AD_OP = 0x1204, + HCLGE_OPC_FD_USER_DEF_OP = 0x1207, /* MDIO command */ HCLGE_OPC_MDIO_CONFIG = 0x1900, @@ -303,6 +304,10 @@ enum hclge_opcode_type { HCLGE_PPP_CMD1_INT_CMD = 0x2101, HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105, HCLGE_NCSI_INT_EN = 0x2401, + + /* PHY command */ + HCLGE_OPC_PHY_LINK_KSETTING = 0x7025, + HCLGE_OPC_PHY_REG = 0x7026, }; #define HCLGE_TQP_REG_OFFSET 0x80000 @@ -384,6 +389,8 @@ enum HCLGE_CAP_BITS { HCLGE_CAP_HW_PAD_B, HCLGE_CAP_STASH_B, HCLGE_CAP_UDP_TUNNEL_CSUM_B, + HCLGE_CAP_FEC_B = 13, + HCLGE_CAP_PAUSE_B = 14, }; enum HCLGE_API_CAP_BITS { @@ -499,8 +506,6 @@ struct hclge_pf_res_cmd { #define HCLGE_CFG_RD_LEN_BYTES 16 #define HCLGE_CFG_RD_LEN_UNIT 4 -#define HCLGE_CFG_VMDQ_S 0 -#define HCLGE_CFG_VMDQ_M GENMASK(7, 0) #define HCLGE_CFG_TC_NUM_S 8 #define HCLGE_CFG_TC_NUM_M GENMASK(15, 8) #define HCLGE_CFG_TQP_DESC_N_S 16 @@ -943,10 +948,16 @@ struct hclge_reset_tqp_queue_cmd { #define HCLGE_CFG_RESET_MAC_B 3 #define HCLGE_CFG_RESET_FUNC_B 7 +#define HCLGE_CFG_RESET_RCB_B 1 struct hclge_reset_cmd { u8 mac_func_reset; u8 fun_reset_vfid; - u8 rsv[22]; + u8 fun_reset_rcb; + u8 rsv; + __le16 fun_reset_rcb_vqid_start; + __le16 fun_reset_rcb_vqid_num; + u8 fun_reset_rcb_return_status; + u8 rsv1[15]; }; #define HCLGE_PF_RESET_DONE_BIT BIT(0) @@ -958,9 +969,10 @@ struct hclge_pf_rst_done_cmd { #define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0) #define HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B BIT(2) -#define HCLGE_CMD_SERDES_DONE_B BIT(0) -#define HCLGE_CMD_SERDES_SUCCESS_B BIT(1) -struct hclge_serdes_lb_cmd { +#define HCLGE_CMD_GE_PHY_INNER_LOOP_B BIT(3) +#define HCLGE_CMD_COMMON_LB_DONE_B BIT(0) +#define HCLGE_CMD_COMMON_LB_SUCCESS_B BIT(1) +struct hclge_common_lb_cmd { u8 mask; u8 enable; u8 result; @@ -1075,6 +1087,19 @@ struct hclge_fd_ad_config_cmd { u8 rsv2[8]; }; +#define HCLGE_FD_USER_DEF_OFT_S 0 +#define HCLGE_FD_USER_DEF_OFT_M GENMASK(14, 0) +#define HCLGE_FD_USER_DEF_EN_B 15 +struct hclge_fd_user_def_cfg_cmd { + __le16 ol2_cfg; + __le16 l2_cfg; + __le16 ol3_cfg; + __le16 l3_cfg; + __le16 ol4_cfg; + __le16 l4_cfg; + u8 rsv[12]; +}; + struct hclge_get_m7_bd_cmd { __le32 bd_num; u8 rsv[20]; @@ -1096,6 +1121,7 @@ struct hclge_query_ppu_pf_other_int_dfx_cmd { #define HCLGE_LINK_EVENT_REPORT_EN_B 0 #define HCLGE_NCSI_ERROR_REPORT_EN_B 1 +#define HCLGE_PHY_IMP_EN_B 2 struct hclge_firmware_compat_cmd { __le32 compat; u8 rsv[20]; @@ -1137,6 +1163,36 @@ struct hclge_dev_specs_1_cmd { u8 rsv1[18]; }; +#define HCLGE_PHY_LINK_SETTING_BD_NUM 2 + +struct hclge_phy_link_ksetting_0_cmd { + __le32 speed; + u8 duplex; + u8 autoneg; + u8 eth_tp_mdix; + u8 eth_tp_mdix_ctrl; + u8 port; + u8 transceiver; + u8 phy_address; + u8 rsv; + __le32 supported; + __le32 advertising; + __le32 lp_advertising; +}; + +struct hclge_phy_link_ksetting_1_cmd { + u8 master_slave_cfg; + u8 master_slave_state; + u8 rsv[22]; +}; + +struct hclge_phy_reg_cmd { + __le16 reg_addr; + u8 rsv0[2]; + __le16 reg_val; + u8 rsv1[18]; +}; + int hclge_cmd_init(struct hclge_dev *hdev); static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 6b1d197df881..85d306459e36 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -1541,18 +1541,17 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, } } -static void hclge_dbg_dump_loopback(struct hclge_dev *hdev, - const char *cmd_buf) +static void hclge_dbg_dump_loopback(struct hclge_dev *hdev) { struct phy_device *phydev = hdev->hw.mac.phydev; struct hclge_config_mac_mode_cmd *req_app; - struct hclge_serdes_lb_cmd *req_serdes; + struct hclge_common_lb_cmd *req_common; struct hclge_desc desc; u8 loopback_en; int ret; req_app = (struct hclge_config_mac_mode_cmd *)desc.data; - req_serdes = (struct hclge_serdes_lb_cmd *)desc.data; + req_common = (struct hclge_common_lb_cmd *)desc.data; dev_info(&hdev->pdev->dev, "mac id: %u\n", hdev->hw.mac.mac_id); @@ -1569,27 +1568,33 @@ static void hclge_dbg_dump_loopback(struct hclge_dev *hdev, dev_info(&hdev->pdev->dev, "app loopback: %s\n", loopback_en ? "on" : "off"); - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, true); + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, - "failed to dump serdes loopback status, ret = %d\n", + "failed to dump common loopback status, ret = %d\n", ret); return; } - loopback_en = req_serdes->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; + loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; dev_info(&hdev->pdev->dev, "serdes serial loopback: %s\n", loopback_en ? "on" : "off"); - loopback_en = req_serdes->enable & + loopback_en = req_common->enable & HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; dev_info(&hdev->pdev->dev, "serdes parallel loopback: %s\n", loopback_en ? "on" : "off"); - if (phydev) + if (phydev) { dev_info(&hdev->pdev->dev, "phy loopback: %s\n", phydev->loopback_enabled ? "on" : "off"); + } else if (hnae3_dev_phy_imp_supported(hdev)) { + loopback_en = req_common->enable & + HCLGE_CMD_GE_PHY_INNER_LOOP_B; + dev_info(&hdev->pdev->dev, "phy loopback: %s\n", + loopback_en ? "on" : "off"); + } } /* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt @@ -1772,7 +1777,7 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf) hclge_dbg_dump_mac_tnl_status(hdev); } else if (strncmp(cmd_buf, DUMP_LOOPBACK, strlen(DUMP_LOOPBACK)) == 0) { - hclge_dbg_dump_loopback(hdev, &cmd_buf[sizeof(DUMP_LOOPBACK)]); + hclge_dbg_dump_loopback(hdev); } else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) { hclge_dbg_dump_qs_shaper(hdev, &cmd_buf[sizeof("dump qs shaper")]); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 0ca7f1b984bf..d25291916b31 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -865,13 +865,7 @@ static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en) } /* configure TM QCN hw errors */ - ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG, 0); - if (ret) { - dev_err(dev, "fail(%d) to read TM QCN CFG status\n", ret); - return ret; - } - - hclge_cmd_reuse_desc(&desc, false); + hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_QCN_MEM_INT_CFG, false); if (en) desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN); @@ -1497,7 +1491,6 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) } status = le32_to_cpu(desc[0].data[0]); - if (status & HCLGE_ROCEE_AXI_ERR_INT_MASK) { if (status & HCLGE_ROCEE_RERR_INT_MASK) dev_err(dev, "ROCEE RAS AXI rresp error\n"); @@ -1647,7 +1640,6 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev) } status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG); - if (status & HCLGE_RAS_REG_NFE_MASK || status & HCLGE_RAS_REG_ROCEE_ERR_MASK) ae_dev->hw_err_reset_req = 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index b0dbe6dcaa7b..c296ab64fb0a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -62,7 +62,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle); static void hclge_rfs_filter_expire(struct hclge_dev *hdev); -static void hclge_clear_arfs_rules(struct hnae3_handle *handle); +static int hclge_clear_arfs_rules(struct hclge_dev *hdev); static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, unsigned long *addr); static int hclge_set_default_loopback(struct hclge_dev *hdev); @@ -70,6 +70,7 @@ static int hclge_set_default_loopback(struct hclge_dev *hdev); static void hclge_sync_mac_table(struct hclge_dev *hdev); static void hclge_restore_hw_table(struct hclge_dev *hdev); static void hclge_sync_promisc_mode(struct hclge_dev *hdev); +static void hclge_sync_fd_table(struct hclge_dev *hdev); static struct hnae3_ae_algo ae_algo; @@ -384,36 +385,62 @@ static const struct key_info meta_data_key_info[] = { }; static const struct key_info tuple_key_info[] = { - { OUTER_DST_MAC, 48}, - { OUTER_SRC_MAC, 48}, - { OUTER_VLAN_TAG_FST, 16}, - { OUTER_VLAN_TAG_SEC, 16}, - { OUTER_ETH_TYPE, 16}, - { OUTER_L2_RSV, 16}, - { OUTER_IP_TOS, 8}, - { OUTER_IP_PROTO, 8}, - { OUTER_SRC_IP, 32}, - { OUTER_DST_IP, 32}, - { OUTER_L3_RSV, 16}, - { OUTER_SRC_PORT, 16}, - { OUTER_DST_PORT, 16}, - { OUTER_L4_RSV, 32}, - { OUTER_TUN_VNI, 24}, - { OUTER_TUN_FLOW_ID, 8}, - { INNER_DST_MAC, 48}, - { INNER_SRC_MAC, 48}, - { INNER_VLAN_TAG_FST, 16}, - { INNER_VLAN_TAG_SEC, 16}, - { INNER_ETH_TYPE, 16}, - { INNER_L2_RSV, 16}, - { INNER_IP_TOS, 8}, - { INNER_IP_PROTO, 8}, - { INNER_SRC_IP, 32}, - { INNER_DST_IP, 32}, - { INNER_L3_RSV, 16}, - { INNER_SRC_PORT, 16}, - { INNER_DST_PORT, 16}, - { INNER_L4_RSV, 32}, + { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 }, + { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 }, + { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 }, + { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 }, + { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 }, + { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 }, + { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 }, + { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 }, + { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 }, + { INNER_DST_MAC, 48, KEY_OPT_MAC, + offsetof(struct hclge_fd_rule, tuples.dst_mac), + offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) }, + { INNER_SRC_MAC, 48, KEY_OPT_MAC, + offsetof(struct hclge_fd_rule, tuples.src_mac), + offsetof(struct hclge_fd_rule, tuples_mask.src_mac) }, + { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.vlan_tag1), + offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) }, + { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 }, + { INNER_ETH_TYPE, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.ether_proto), + offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) }, + { INNER_L2_RSV, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.l2_user_def), + offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) }, + { INNER_IP_TOS, 8, KEY_OPT_U8, + offsetof(struct hclge_fd_rule, tuples.ip_tos), + offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) }, + { INNER_IP_PROTO, 8, KEY_OPT_U8, + offsetof(struct hclge_fd_rule, tuples.ip_proto), + offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) }, + { INNER_SRC_IP, 32, KEY_OPT_IP, + offsetof(struct hclge_fd_rule, tuples.src_ip), + offsetof(struct hclge_fd_rule, tuples_mask.src_ip) }, + { INNER_DST_IP, 32, KEY_OPT_IP, + offsetof(struct hclge_fd_rule, tuples.dst_ip), + offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) }, + { INNER_L3_RSV, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.l3_user_def), + offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) }, + { INNER_SRC_PORT, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.src_port), + offsetof(struct hclge_fd_rule, tuples_mask.src_port) }, + { INNER_DST_PORT, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.dst_port), + offsetof(struct hclge_fd_rule, tuples_mask.dst_port) }, + { INNER_L4_RSV, 32, KEY_OPT_LE32, + offsetof(struct hclge_fd_rule, tuples.l4_user_def), + offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) }, }; static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) @@ -526,7 +553,6 @@ static int hclge_mac_update_stats(struct hclge_dev *hdev) int ret; ret = hclge_mac_query_reg_num(hdev, &desc_num); - /* The firmware supports the new statistics acquisition method */ if (!ret) ret = hclge_mac_update_stats_complete(hdev, desc_num); @@ -751,12 +777,12 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; - if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && - hdev->hw.mac.phydev->drv->set_loopback) { + if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && + hdev->hw.mac.phydev->drv->set_loopback) || + hnae3_dev_phy_imp_supported(hdev)) { count += 1; handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK; } - } else if (stringset == ETH_SS_STATS) { count = ARRAY_SIZE(g_mac_stats_string) + hclge_tqps_get_sset_count(handle, stringset); @@ -1150,8 +1176,10 @@ static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, if (hnae3_dev_fec_supported(hdev)) hclge_convert_setting_fec(mac); + if (hnae3_dev_pause_supported(hdev)) + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported); - linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); } @@ -1163,8 +1191,11 @@ static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev, hclge_convert_setting_kr(mac, speed_ability); if (hnae3_dev_fec_supported(hdev)) hclge_convert_setting_fec(mac); + + if (hnae3_dev_pause_supported(hdev)) + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported); - linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); } @@ -1193,10 +1224,13 @@ static void hclge_parse_copper_link_mode(struct hclge_dev *hdev, linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported); } + if (hnae3_dev_pause_supported(hdev)) { + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported); + } + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported); linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported); - linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); - linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported); } static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability) @@ -1256,9 +1290,6 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) req = (struct hclge_cfg_param_cmd *)desc[0].data; /* get the configuration */ - cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]), - HCLGE_CFG_VMDQ_M, - HCLGE_CFG_VMDQ_S); cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), @@ -1475,7 +1506,7 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) "Running kdump kernel. Using minimal resources\n"); /* minimal queue pairs equals to the number of vports */ - hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; + hdev->num_tqps = hdev->num_req_vfs + 1; hdev->num_tx_desc = HCLGE_MIN_TX_DESC; hdev->num_rx_desc = HCLGE_MIN_RX_DESC; } @@ -1490,7 +1521,6 @@ static int hclge_configure(struct hclge_dev *hdev) if (ret) return ret; - hdev->num_vmdq_vport = cfg.vmdq_vport_num; hdev->base_tqp_pid = 0; hdev->vf_rss_size_max = cfg.vf_rss_size_max; hdev->pf_rss_size_max = cfg.pf_rss_size_max; @@ -1741,7 +1771,7 @@ static int hclge_map_tqp(struct hclge_dev *hdev) struct hclge_vport *vport = hdev->vport; u16 i, num_vport; - num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; + num_vport = hdev->num_req_vfs + 1; for (i = 0; i < num_vport; i++) { int ret; @@ -1783,7 +1813,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) int ret; /* We need to alloc a vport for main NIC of PF */ - num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; + num_vport = hdev->num_req_vfs + 1; if (hdev->num_tqps < num_vport) { dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)", @@ -2159,7 +2189,6 @@ static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev, COMPENSATE_HALF_MPS_NUM * half_mps; min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT); rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT); - if (rx_priv < min_rx_priv) return false; @@ -2188,7 +2217,7 @@ static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev, /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs * @hdev: pointer to struct hclge_dev * @buf_alloc: pointer to buffer calculation data - * @return: 0: calculate sucessful, negative: fail + * @return: 0: calculate successful, negative: fail */ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, struct hclge_pkt_buf_alloc *buf_alloc) @@ -2851,15 +2880,36 @@ static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status) return hclge_get_mac_link_status(hdev, link_status); } +static void hclge_push_link_status(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int ret; + u16 i; + + for (i = 0; i < pci_num_vf(hdev->pdev); i++) { + vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; + + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) || + vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO) + continue; + + ret = hclge_push_vf_link_status(vport); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to push link status to vf%u, ret = %d\n", + i, ret); + } + } +} + static void hclge_update_link_status(struct hclge_dev *hdev) { + struct hnae3_handle *rhandle = &hdev->vport[0].roce; + struct hnae3_handle *handle = &hdev->vport[0].nic; struct hnae3_client *rclient = hdev->roce_client; struct hnae3_client *client = hdev->nic_client; - struct hnae3_handle *rhandle; - struct hnae3_handle *handle; int state; int ret; - int i; if (!client) return; @@ -2874,25 +2924,24 @@ static void hclge_update_link_status(struct hclge_dev *hdev) } if (state != hdev->hw.mac.link) { - for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { - handle = &hdev->vport[i].nic; - client->ops->link_status_change(handle, state); - hclge_config_mac_tnl_int(hdev, state); - rhandle = &hdev->vport[i].roce; - if (rclient && rclient->ops->link_status_change) - rclient->ops->link_status_change(rhandle, - state); - } + client->ops->link_status_change(handle, state); + hclge_config_mac_tnl_int(hdev, state); + if (rclient && rclient->ops->link_status_change) + rclient->ops->link_status_change(rhandle, state); + hdev->hw.mac.link = state; + hclge_push_link_status(hdev); } clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); } -static void hclge_update_port_capability(struct hclge_mac *mac) +static void hclge_update_port_capability(struct hclge_dev *hdev, + struct hclge_mac *mac) { - /* update fec ability by speed */ - hclge_convert_setting_fec(mac); + if (hnae3_dev_fec_supported(hdev)) + /* update fec ability by speed */ + hclge_convert_setting_fec(mac); /* firmware can not identify back plane type, the media type * read from configuration can help deal it @@ -2984,6 +3033,141 @@ static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac) return 0; } +static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle, + struct ethtool_link_ksettings *cmd) +{ + struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM]; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_phy_link_ksetting_0_cmd *req0; + struct hclge_phy_link_ksetting_1_cmd *req1; + u32 supported, advertising, lp_advertising; + struct hclge_dev *hdev = vport->back; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING, + true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING, + true); + + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get phy link ksetting, ret = %d.\n", ret); + return ret; + } + + req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data; + cmd->base.autoneg = req0->autoneg; + cmd->base.speed = le32_to_cpu(req0->speed); + cmd->base.duplex = req0->duplex; + cmd->base.port = req0->port; + cmd->base.transceiver = req0->transceiver; + cmd->base.phy_address = req0->phy_address; + cmd->base.eth_tp_mdix = req0->eth_tp_mdix; + cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl; + supported = le32_to_cpu(req0->supported); + advertising = le32_to_cpu(req0->advertising); + lp_advertising = le32_to_cpu(req0->lp_advertising); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, + lp_advertising); + + req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data; + cmd->base.master_slave_cfg = req1->master_slave_cfg; + cmd->base.master_slave_state = req1->master_slave_state; + + return 0; +} + +static int +hclge_set_phy_link_ksettings(struct hnae3_handle *handle, + const struct ethtool_link_ksettings *cmd) +{ + struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM]; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_phy_link_ksetting_0_cmd *req0; + struct hclge_phy_link_ksetting_1_cmd *req1; + struct hclge_dev *hdev = vport->back; + u32 advertising; + int ret; + + if (cmd->base.autoneg == AUTONEG_DISABLE && + ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) || + (cmd->base.duplex != DUPLEX_HALF && + cmd->base.duplex != DUPLEX_FULL))) + return -EINVAL; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING, + false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING, + false); + + req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data; + req0->autoneg = cmd->base.autoneg; + req0->speed = cpu_to_le32(cmd->base.speed); + req0->duplex = cmd->base.duplex; + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + req0->advertising = cpu_to_le32(advertising); + req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl; + + req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data; + req1->master_slave_cfg = cmd->base.master_slave_cfg; + + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to set phy link ksettings, ret = %d.\n", ret); + return ret; + } + + hdev->hw.mac.autoneg = cmd->base.autoneg; + hdev->hw.mac.speed = cmd->base.speed; + hdev->hw.mac.duplex = cmd->base.duplex; + linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising); + + return 0; +} + +static int hclge_update_tp_port_info(struct hclge_dev *hdev) +{ + struct ethtool_link_ksettings cmd; + int ret; + + if (!hnae3_dev_phy_imp_supported(hdev)) + return 0; + + ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd); + if (ret) + return ret; + + hdev->hw.mac.autoneg = cmd.base.autoneg; + hdev->hw.mac.speed = cmd.base.speed; + hdev->hw.mac.duplex = cmd.base.duplex; + + return 0; +} + +static int hclge_tp_port_init(struct hclge_dev *hdev) +{ + struct ethtool_link_ksettings cmd; + + if (!hnae3_dev_phy_imp_supported(hdev)) + return 0; + + cmd.base.autoneg = hdev->hw.mac.autoneg; + cmd.base.speed = hdev->hw.mac.speed; + cmd.base.duplex = hdev->hw.mac.duplex; + linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising); + + return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd); +} + static int hclge_update_port_info(struct hclge_dev *hdev) { struct hclge_mac *mac = &hdev->hw.mac; @@ -2992,7 +3176,7 @@ static int hclge_update_port_info(struct hclge_dev *hdev) /* get the port info from SFP cmd if not copper port */ if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) - return 0; + return hclge_update_tp_port_info(hdev); /* if IMP does not support get SFP/qSFP info, return directly */ if (!hdev->support_sfp_query) @@ -3012,7 +3196,7 @@ static int hclge_update_port_info(struct hclge_dev *hdev) if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { if (mac->speed_type == QUERY_ACTIVE_SPEED) { - hclge_update_port_capability(mac); + hclge_update_port_capability(hdev, mac); return 0; } return hclge_cfg_mac_speed_dup(hdev, mac->speed, @@ -3085,14 +3269,24 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf, { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + int link_state_old; + int ret; vport = hclge_get_vf_vport(hdev, vf); if (!vport) return -EINVAL; + link_state_old = vport->vf_info.link_state; vport->vf_info.link_state = link_state; - return 0; + ret = hclge_push_vf_link_status(vport); + if (ret) { + vport->vf_info.link_state = link_state_old; + dev_err(&hdev->pdev->dev, + "failed to push vf%d link status, ret = %d\n", vf, ret); + } + + return ret; } static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) @@ -3197,7 +3391,7 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data) * caused this event. Therefore, we will do below for now: * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we * have defered type of reset to be used. - * 2. Schedule the reset serivce task. + * 2. Schedule the reset service task. * 3. When service task receives HNAE3_UNKNOWN_RESET type it * will fetch the correct type of reset. This would be done * by first decoding the types of errors. @@ -3325,8 +3519,9 @@ static void hclge_misc_irq_uninit(struct hclge_dev *hdev) int hclge_notify_client(struct hclge_dev *hdev, enum hnae3_reset_notify_type type) { + struct hnae3_handle *handle = &hdev->vport[0].nic; struct hnae3_client *client = hdev->nic_client; - u16 i; + int ret; if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client) return 0; @@ -3334,27 +3529,20 @@ int hclge_notify_client(struct hclge_dev *hdev, if (!client->ops->reset_notify) return -EOPNOTSUPP; - for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { - struct hnae3_handle *handle = &hdev->vport[i].nic; - int ret; - - ret = client->ops->reset_notify(handle, type); - if (ret) { - dev_err(&hdev->pdev->dev, - "notify nic client failed %d(%d)\n", type, ret); - return ret; - } - } + ret = client->ops->reset_notify(handle, type); + if (ret) + dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", + type, ret); - return 0; + return ret; } static int hclge_notify_roce_client(struct hclge_dev *hdev, enum hnae3_reset_notify_type type) { + struct hnae3_handle *handle = &hdev->vport[0].roce; struct hnae3_client *client = hdev->roce_client; int ret; - u16 i; if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client) return 0; @@ -3362,17 +3550,10 @@ static int hclge_notify_roce_client(struct hclge_dev *hdev, if (!client->ops->reset_notify) return -EOPNOTSUPP; - for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { - struct hnae3_handle *handle = &hdev->vport[i].roce; - - ret = client->ops->reset_notify(handle, type); - if (ret) { - dev_err(&hdev->pdev->dev, - "notify roce client failed %d(%d)", - type, ret); - return ret; - } - } + ret = client->ops->reset_notify(handle, type); + if (ret) + dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", + type, ret); return ret; } @@ -3440,7 +3621,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) { int i; - for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) { + for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) { struct hclge_vport *vport = &hdev->vport[i]; int ret; @@ -3521,14 +3702,12 @@ void hclge_report_hw_error(struct hclge_dev *hdev, enum hnae3_hw_error_type type) { struct hnae3_client *client = hdev->nic_client; - u16 i; if (!client || !client->ops->process_hw_error || !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) return; - for (i = 0; i < hdev->num_vmdq_vport + 1; i++) - client->ops->process_hw_error(&hdev->vport[i].nic, type); + client->ops->process_hw_error(&hdev->vport[0].nic, type); } static void hclge_handle_imp_error(struct hclge_dev *hdev) @@ -3794,6 +3973,21 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev) return false; } +static void hclge_update_reset_level(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + enum hnae3_reset_type reset_level; + + /* if default_reset_request has a higher level reset request, + * it should be handled as soon as possible. since some errors + * need this kind of reset to fix. + */ + reset_level = hclge_get_reset_level(ae_dev, + &hdev->default_reset_request); + if (reset_level != HNAE3_NONE_RESET) + set_bit(reset_level, &hdev->reset_request); +} + static int hclge_set_rst_done(struct hclge_dev *hdev) { struct hclge_pf_rst_done_cmd *req; @@ -3881,8 +4075,6 @@ static int hclge_reset_prepare(struct hclge_dev *hdev) static int hclge_reset_rebuild(struct hclge_dev *hdev) { - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - enum hnae3_reset_type reset_level; int ret; hdev->rst_stats.hw_reset_done_cnt++; @@ -3926,14 +4118,7 @@ static int hclge_reset_rebuild(struct hclge_dev *hdev) hdev->rst_stats.reset_done_cnt++; clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); - /* if default_reset_request has a higher level reset request, - * it should be handled as soon as possible. since some errors - * need this kind of reset to fix. - */ - reset_level = hclge_get_reset_level(ae_dev, - &hdev->default_reset_request); - if (reset_level != HNAE3_NONE_RESET) - set_bit(reset_level, &hdev->reset_request); + hclge_update_reset_level(hdev); return 0; } @@ -4094,6 +4279,7 @@ static void hclge_periodic_service_task(struct hclge_dev *hdev) hclge_update_link_status(hdev); hclge_sync_mac_table(hdev); hclge_sync_promisc_mode(hdev); + hclge_sync_fd_table(hdev); if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { delta = jiffies - hdev->last_serv_processed; @@ -4738,58 +4924,44 @@ int hclge_rss_init_hw(struct hclge_dev *hdev) void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) { - struct hclge_vport *vport = hdev->vport; - int i, j; + struct hclge_vport *vport = &hdev->vport[0]; + int i; - for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { - for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) - vport[j].rss_indirection_tbl[i] = - i % vport[j].alloc_rss_size; - } + for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) + vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size; } static int hclge_rss_init_cfg(struct hclge_dev *hdev) { u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size; - int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; - struct hclge_vport *vport = hdev->vport; + int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; + struct hclge_vport *vport = &hdev->vport[0]; + u16 *rss_ind_tbl; if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE; - for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { - u16 *rss_ind_tbl; - - vport[i].rss_tuple_sets.ipv4_tcp_en = - HCLGE_RSS_INPUT_TUPLE_OTHER; - vport[i].rss_tuple_sets.ipv4_udp_en = - HCLGE_RSS_INPUT_TUPLE_OTHER; - vport[i].rss_tuple_sets.ipv4_sctp_en = - HCLGE_RSS_INPUT_TUPLE_SCTP; - vport[i].rss_tuple_sets.ipv4_fragment_en = - HCLGE_RSS_INPUT_TUPLE_OTHER; - vport[i].rss_tuple_sets.ipv6_tcp_en = - HCLGE_RSS_INPUT_TUPLE_OTHER; - vport[i].rss_tuple_sets.ipv6_udp_en = - HCLGE_RSS_INPUT_TUPLE_OTHER; - vport[i].rss_tuple_sets.ipv6_sctp_en = - hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ? - HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT : - HCLGE_RSS_INPUT_TUPLE_SCTP; - vport[i].rss_tuple_sets.ipv6_fragment_en = - HCLGE_RSS_INPUT_TUPLE_OTHER; - - vport[i].rss_algo = rss_algo; - - rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size, - sizeof(*rss_ind_tbl), GFP_KERNEL); - if (!rss_ind_tbl) - return -ENOMEM; + vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP; + vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + vport->rss_tuple_sets.ipv6_sctp_en = + hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ? + HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT : + HCLGE_RSS_INPUT_TUPLE_SCTP; + vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + + vport->rss_algo = rss_algo; + + rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size, + sizeof(*rss_ind_tbl), GFP_KERNEL); + if (!rss_ind_tbl) + return -ENOMEM; - vport[i].rss_indirection_tbl = rss_ind_tbl; - memcpy(vport[i].rss_hash_key, hclge_hash_key, - HCLGE_RSS_KEY_SIZE); - } + vport->rss_indirection_tbl = rss_ind_tbl; + memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE); hclge_rss_indir_init_cfg(hdev); @@ -4995,6 +5167,285 @@ static void hclge_request_update_promisc_mode(struct hnae3_handle *handle) set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); } +static void hclge_sync_fd_state(struct hclge_dev *hdev) +{ + if (hlist_empty(&hdev->fd_rule_list)) + hdev->fd_active_type = HCLGE_FD_RULE_NONE; +} + +static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location) +{ + if (!test_bit(location, hdev->fd_bmap)) { + set_bit(location, hdev->fd_bmap); + hdev->hclge_fd_rule_num++; + } +} + +static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location) +{ + if (test_bit(location, hdev->fd_bmap)) { + clear_bit(location, hdev->fd_bmap); + hdev->hclge_fd_rule_num--; + } +} + +static void hclge_fd_free_node(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + hlist_del(&rule->rule_node); + kfree(rule); + hclge_sync_fd_state(hdev); +} + +static void hclge_update_fd_rule_node(struct hclge_dev *hdev, + struct hclge_fd_rule *old_rule, + struct hclge_fd_rule *new_rule, + enum HCLGE_FD_NODE_STATE state) +{ + switch (state) { + case HCLGE_FD_TO_ADD: + case HCLGE_FD_ACTIVE: + /* 1) if the new state is TO_ADD, just replace the old rule + * with the same location, no matter its state, because the + * new rule will be configured to the hardware. + * 2) if the new state is ACTIVE, it means the new rule + * has been configured to the hardware, so just replace + * the old rule node with the same location. + * 3) for it doesn't add a new node to the list, so it's + * unnecessary to update the rule number and fd_bmap. + */ + new_rule->rule_node.next = old_rule->rule_node.next; + new_rule->rule_node.pprev = old_rule->rule_node.pprev; + memcpy(old_rule, new_rule, sizeof(*old_rule)); + kfree(new_rule); + break; + case HCLGE_FD_DELETED: + hclge_fd_dec_rule_cnt(hdev, old_rule->location); + hclge_fd_free_node(hdev, old_rule); + break; + case HCLGE_FD_TO_DEL: + /* if new request is TO_DEL, and old rule is existent + * 1) the state of old rule is TO_DEL, we need do nothing, + * because we delete rule by location, other rule content + * is unncessary. + * 2) the state of old rule is ACTIVE, we need to change its + * state to TO_DEL, so the rule will be deleted when periodic + * task being scheduled. + * 3) the state of old rule is TO_ADD, it means the rule hasn't + * been added to hardware, so we just delete the rule node from + * fd_rule_list directly. + */ + if (old_rule->state == HCLGE_FD_TO_ADD) { + hclge_fd_dec_rule_cnt(hdev, old_rule->location); + hclge_fd_free_node(hdev, old_rule); + return; + } + old_rule->state = HCLGE_FD_TO_DEL; + break; + } +} + +static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist, + u16 location, + struct hclge_fd_rule **parent) +{ + struct hclge_fd_rule *rule; + struct hlist_node *node; + + hlist_for_each_entry_safe(rule, node, hlist, rule_node) { + if (rule->location == location) + return rule; + else if (rule->location > location) + return NULL; + /* record the parent node, use to keep the nodes in fd_rule_list + * in ascend order. + */ + *parent = rule; + } + + return NULL; +} + +/* insert fd rule node in ascend order according to rule->location */ +static void hclge_fd_insert_rule_node(struct hlist_head *hlist, + struct hclge_fd_rule *rule, + struct hclge_fd_rule *parent) +{ + INIT_HLIST_NODE(&rule->rule_node); + + if (parent) + hlist_add_behind(&rule->rule_node, &parent->rule_node); + else + hlist_add_head(&rule->rule_node, hlist); +} + +static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev, + struct hclge_fd_user_def_cfg *cfg) +{ + struct hclge_fd_user_def_cfg_cmd *req; + struct hclge_desc desc; + u16 data = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false); + + req = (struct hclge_fd_user_def_cfg_cmd *)desc.data; + + hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0); + hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, + HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset); + req->ol2_cfg = cpu_to_le16(data); + + data = 0; + hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0); + hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, + HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset); + req->ol3_cfg = cpu_to_le16(data); + + data = 0; + hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0); + hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, + HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset); + req->ol4_cfg = cpu_to_le16(data); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to set fd user def data, ret= %d\n", ret); + return ret; +} + +static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked) +{ + int ret; + + if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state)) + return; + + if (!locked) + spin_lock_bh(&hdev->fd_rule_lock); + + ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg); + if (ret) + set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); + + if (!locked) + spin_unlock_bh(&hdev->fd_rule_lock); +} + +static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + struct hlist_head *hlist = &hdev->fd_rule_list; + struct hclge_fd_rule *fd_rule, *parent = NULL; + struct hclge_fd_user_def_info *info, *old_info; + struct hclge_fd_user_def_cfg *cfg; + + if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || + rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) + return 0; + + /* for valid layer is start from 1, so need minus 1 to get the cfg */ + cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; + info = &rule->ep.user_def; + + if (!cfg->ref_cnt || cfg->offset == info->offset) + return 0; + + if (cfg->ref_cnt > 1) + goto error; + + fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent); + if (fd_rule) { + old_info = &fd_rule->ep.user_def; + if (info->layer == old_info->layer) + return 0; + } + +error: + dev_err(&hdev->pdev->dev, + "No available offset for layer%d fd rule, each layer only support one user def offset.\n", + info->layer + 1); + return -ENOSPC; +} + +static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + struct hclge_fd_user_def_cfg *cfg; + + if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || + rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) + return; + + cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; + if (!cfg->ref_cnt) { + cfg->offset = rule->ep.user_def.offset; + set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); + } + cfg->ref_cnt++; +} + +static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + struct hclge_fd_user_def_cfg *cfg; + + if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || + rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) + return; + + cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; + if (!cfg->ref_cnt) + return; + + cfg->ref_cnt--; + if (!cfg->ref_cnt) { + cfg->offset = 0; + set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); + } +} + +static void hclge_update_fd_list(struct hclge_dev *hdev, + enum HCLGE_FD_NODE_STATE state, u16 location, + struct hclge_fd_rule *new_rule) +{ + struct hlist_head *hlist = &hdev->fd_rule_list; + struct hclge_fd_rule *fd_rule, *parent = NULL; + + fd_rule = hclge_find_fd_rule(hlist, location, &parent); + if (fd_rule) { + hclge_fd_dec_user_def_refcnt(hdev, fd_rule); + if (state == HCLGE_FD_ACTIVE) + hclge_fd_inc_user_def_refcnt(hdev, new_rule); + hclge_sync_fd_user_def_cfg(hdev, true); + + hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state); + return; + } + + /* it's unlikely to fail here, because we have checked the rule + * exist before. + */ + if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) { + dev_warn(&hdev->pdev->dev, + "failed to delete fd rule %u, it's inexistent\n", + location); + return; + } + + hclge_fd_inc_user_def_refcnt(hdev, new_rule); + hclge_sync_fd_user_def_cfg(hdev, true); + + hclge_fd_insert_rule_node(hlist, new_rule, parent); + hclge_fd_inc_rule_cnt(hdev, new_rule->location); + + if (state == HCLGE_FD_TO_ADD) { + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); + hclge_task_schedule(hdev, 0); + } +} + static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) { struct hclge_get_fd_mode_cmd *req; @@ -5073,6 +5524,17 @@ static int hclge_set_fd_key_config(struct hclge_dev *hdev, return ret; } +static void hclge_fd_disable_user_def(struct hclge_dev *hdev) +{ + struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg; + + spin_lock_bh(&hdev->fd_rule_lock); + memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg)); + spin_unlock_bh(&hdev->fd_rule_lock); + + hclge_fd_set_user_def_cmd(hdev, cfg); +} + static int hclge_init_fd_config(struct hclge_dev *hdev) { #define LOW_2_WORDS 0x03 @@ -5113,9 +5575,12 @@ static int hclge_init_fd_config(struct hclge_dev *hdev) BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); /* If use max 400bit key, we can support tuples for ether type */ - if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) + if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { key_cfg->tuple_active |= BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) + key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES; + } /* roce_type is used to filter roce frames * dst_vport is used to specify the rule @@ -5224,96 +5689,57 @@ static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, struct hclge_fd_rule *rule) { + int offset, moffset, ip_offset; + enum HCLGE_FD_KEY_OPT key_opt; u16 tmp_x_s, tmp_y_s; u32 tmp_x_l, tmp_y_l; + u8 *p = (u8 *)rule; int i; - if (rule->unused_tuple & tuple_bit) + if (rule->unused_tuple & BIT(tuple_bit)) return true; - switch (tuple_bit) { - case BIT(INNER_DST_MAC): - for (i = 0; i < ETH_ALEN; i++) { - calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i], - rule->tuples_mask.dst_mac[i]); - calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i], - rule->tuples_mask.dst_mac[i]); - } + key_opt = tuple_key_info[tuple_bit].key_opt; + offset = tuple_key_info[tuple_bit].offset; + moffset = tuple_key_info[tuple_bit].moffset; - return true; - case BIT(INNER_SRC_MAC): - for (i = 0; i < ETH_ALEN; i++) { - calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i], - rule->tuples_mask.src_mac[i]); - calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i], - rule->tuples_mask.src_mac[i]); - } + switch (key_opt) { + case KEY_OPT_U8: + calc_x(*key_x, p[offset], p[moffset]); + calc_y(*key_y, p[offset], p[moffset]); return true; - case BIT(INNER_VLAN_TAG_FST): - calc_x(tmp_x_s, rule->tuples.vlan_tag1, - rule->tuples_mask.vlan_tag1); - calc_y(tmp_y_s, rule->tuples.vlan_tag1, - rule->tuples_mask.vlan_tag1); + case KEY_OPT_LE16: + calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset])); + calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset])); *(__le16 *)key_x = cpu_to_le16(tmp_x_s); *(__le16 *)key_y = cpu_to_le16(tmp_y_s); return true; - case BIT(INNER_ETH_TYPE): - calc_x(tmp_x_s, rule->tuples.ether_proto, - rule->tuples_mask.ether_proto); - calc_y(tmp_y_s, rule->tuples.ether_proto, - rule->tuples_mask.ether_proto); - *(__le16 *)key_x = cpu_to_le16(tmp_x_s); - *(__le16 *)key_y = cpu_to_le16(tmp_y_s); - - return true; - case BIT(INNER_IP_TOS): - calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); - calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); - - return true; - case BIT(INNER_IP_PROTO): - calc_x(*key_x, rule->tuples.ip_proto, - rule->tuples_mask.ip_proto); - calc_y(*key_y, rule->tuples.ip_proto, - rule->tuples_mask.ip_proto); - - return true; - case BIT(INNER_SRC_IP): - calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX], - rule->tuples_mask.src_ip[IPV4_INDEX]); - calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX], - rule->tuples_mask.src_ip[IPV4_INDEX]); + case KEY_OPT_LE32: + calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset])); + calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset])); *(__le32 *)key_x = cpu_to_le32(tmp_x_l); *(__le32 *)key_y = cpu_to_le32(tmp_y_l); return true; - case BIT(INNER_DST_IP): - calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX], - rule->tuples_mask.dst_ip[IPV4_INDEX]); - calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX], - rule->tuples_mask.dst_ip[IPV4_INDEX]); - *(__le32 *)key_x = cpu_to_le32(tmp_x_l); - *(__le32 *)key_y = cpu_to_le32(tmp_y_l); - - return true; - case BIT(INNER_SRC_PORT): - calc_x(tmp_x_s, rule->tuples.src_port, - rule->tuples_mask.src_port); - calc_y(tmp_y_s, rule->tuples.src_port, - rule->tuples_mask.src_port); - *(__le16 *)key_x = cpu_to_le16(tmp_x_s); - *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + case KEY_OPT_MAC: + for (i = 0; i < ETH_ALEN; i++) { + calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i], + p[moffset + i]); + calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i], + p[moffset + i]); + } return true; - case BIT(INNER_DST_PORT): - calc_x(tmp_x_s, rule->tuples.dst_port, - rule->tuples_mask.dst_port); - calc_y(tmp_y_s, rule->tuples.dst_port, - rule->tuples_mask.dst_port); - *(__le16 *)key_x = cpu_to_le16(tmp_x_s); - *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + case KEY_OPT_IP: + ip_offset = IPV4_INDEX * sizeof(u32); + calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]), + *(u32 *)(&p[moffset + ip_offset])); + calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]), + *(u32 *)(&p[moffset + ip_offset])); + *(__le32 *)key_x = cpu_to_le32(tmp_x_l); + *(__le32 *)key_y = cpu_to_le32(tmp_y_l); return true; default: @@ -5401,12 +5827,12 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage, for (i = 0 ; i < MAX_TUPLE; i++) { bool tuple_valid; - u32 check_tuple; tuple_size = tuple_key_info[i].key_length / 8; - check_tuple = key_cfg->tuple_active & BIT(i); + if (!(key_cfg->tuple_active & BIT(i))) + continue; - tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x, + tuple_valid = hclge_fd_convert_tuple(i, cur_key_x, cur_key_y, rule); if (tuple_valid) { cur_key_x += tuple_size; @@ -5537,8 +5963,7 @@ static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec, if (!spec || !unused_tuple) return -EINVAL; - *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | - BIT(INNER_IP_TOS); + *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); /* check whether src/dst ip address used */ if (ipv6_addr_any((struct in6_addr *)spec->ip6src)) @@ -5553,8 +5978,8 @@ static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec, if (!spec->pdst) *unused_tuple |= BIT(INNER_DST_PORT); - if (spec->tclass) - return -EOPNOTSUPP; + if (!spec->tclass) + *unused_tuple |= BIT(INNER_IP_TOS); return 0; } @@ -5566,7 +5991,7 @@ static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec, return -EINVAL; *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | - BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); /* check whether src/dst ip address used */ if (ipv6_addr_any((struct in6_addr *)spec->ip6src)) @@ -5578,8 +6003,8 @@ static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec, if (!spec->l4_proto) *unused_tuple |= BIT(INNER_IP_PROTO); - if (spec->tclass) - return -EOPNOTSUPP; + if (!spec->tclass) + *unused_tuple |= BIT(INNER_IP_TOS); if (spec->l4_4_bytes) return -EOPNOTSUPP; @@ -5649,9 +6074,98 @@ static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev, return 0; } +static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple, + struct hclge_fd_user_def_info *info) +{ + switch (flow_type) { + case ETHER_FLOW: + info->layer = HCLGE_FD_USER_DEF_L2; + *unused_tuple &= ~BIT(INNER_L2_RSV); + break; + case IP_USER_FLOW: + case IPV6_USER_FLOW: + info->layer = HCLGE_FD_USER_DEF_L3; + *unused_tuple &= ~BIT(INNER_L3_RSV); + break; + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + info->layer = HCLGE_FD_USER_DEF_L4; + *unused_tuple &= ~BIT(INNER_L4_RSV); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs) +{ + return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0; +} + +static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + u32 *unused_tuple, + struct hclge_fd_user_def_info *info) +{ + u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active; + u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); + u16 data, offset, data_mask, offset_mask; + int ret; + + info->layer = HCLGE_FD_USER_DEF_NONE; + *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES; + + if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs)) + return 0; + + /* user-def data from ethtool is 64 bit value, the bit0~15 is used + * for data, and bit32~47 is used for offset. + */ + data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA; + data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA; + offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET; + offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET; + + if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) { + dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); + return -EOPNOTSUPP; + } + + if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) { + dev_err(&hdev->pdev->dev, + "user-def offset[%u] should be no more than %u\n", + offset, HCLGE_FD_MAX_USER_DEF_OFFSET); + return -EINVAL; + } + + if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) { + dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n"); + return -EINVAL; + } + + ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info); + if (ret) { + dev_err(&hdev->pdev->dev, + "unsupported flow type for user-def bytes, ret = %d\n", + ret); + return ret; + } + + info->data = data; + info->data_mask = data_mask; + info->offset = offset; + + return 0; +} + static int hclge_fd_check_spec(struct hclge_dev *hdev, struct ethtool_rx_flow_spec *fs, - u32 *unused_tuple) + u32 *unused_tuple, + struct hclge_fd_user_def_info *info) { u32 flow_type; int ret; @@ -5664,11 +6178,9 @@ static int hclge_fd_check_spec(struct hclge_dev *hdev, return -EINVAL; } - if ((fs->flow_type & FLOW_EXT) && - (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) { - dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); - return -EOPNOTSUPP; - } + ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info); + if (ret) + return ret; flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); switch (flow_type) { @@ -5720,217 +6232,194 @@ static int hclge_fd_check_spec(struct hclge_dev *hdev, return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple); } -static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location) +static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule, u8 ip_proto) { - struct hclge_fd_rule *rule = NULL; - struct hlist_node *node2; + rule->tuples.src_ip[IPV4_INDEX] = + be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); + rule->tuples_mask.src_ip[IPV4_INDEX] = + be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); - spin_lock_bh(&hdev->fd_rule_lock); - hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { - if (rule->location >= location) - break; - } + rule->tuples.dst_ip[IPV4_INDEX] = + be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); + rule->tuples_mask.dst_ip[IPV4_INDEX] = + be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); - spin_unlock_bh(&hdev->fd_rule_lock); + rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); + rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); - return rule && rule->location == location; -} + rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); + rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); -/* make sure being called after lock up with fd_rule_lock */ -static int hclge_fd_update_rule_list(struct hclge_dev *hdev, - struct hclge_fd_rule *new_rule, - u16 location, - bool is_add) -{ - struct hclge_fd_rule *rule = NULL, *parent = NULL; - struct hlist_node *node2; + rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; + rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; - if (is_add && !new_rule) - return -EINVAL; - - hlist_for_each_entry_safe(rule, node2, - &hdev->fd_rule_list, rule_node) { - if (rule->location >= location) - break; - parent = rule; - } - - if (rule && rule->location == location) { - hlist_del(&rule->rule_node); - kfree(rule); - hdev->hclge_fd_rule_num--; + rule->tuples.ether_proto = ETH_P_IP; + rule->tuples_mask.ether_proto = 0xFFFF; - if (!is_add) { - if (!hdev->hclge_fd_rule_num) - hdev->fd_active_type = HCLGE_FD_RULE_NONE; - clear_bit(location, hdev->fd_bmap); + rule->tuples.ip_proto = ip_proto; + rule->tuples_mask.ip_proto = 0xFF; +} - return 0; - } - } else if (!is_add) { - dev_err(&hdev->pdev->dev, - "delete fail, rule %u is inexistent\n", - location); - return -EINVAL; - } +static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + rule->tuples.src_ip[IPV4_INDEX] = + be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); + rule->tuples_mask.src_ip[IPV4_INDEX] = + be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); - INIT_HLIST_NODE(&new_rule->rule_node); + rule->tuples.dst_ip[IPV4_INDEX] = + be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); + rule->tuples_mask.dst_ip[IPV4_INDEX] = + be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); - if (parent) - hlist_add_behind(&new_rule->rule_node, &parent->rule_node); - else - hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list); + rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; + rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; - set_bit(location, hdev->fd_bmap); - hdev->hclge_fd_rule_num++; - hdev->fd_active_type = new_rule->rule_type; + rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; + rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; - return 0; + rule->tuples.ether_proto = ETH_P_IP; + rule->tuples_mask.ether_proto = 0xFFFF; } -static int hclge_fd_get_tuple(struct hclge_dev *hdev, - struct ethtool_rx_flow_spec *fs, - struct hclge_fd_rule *rule) +static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule, u8 ip_proto) { - u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); + be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src, + IPV6_SIZE); - switch (flow_type) { - case SCTP_V4_FLOW: - case TCP_V4_FLOW: - case UDP_V4_FLOW: - rule->tuples.src_ip[IPV4_INDEX] = - be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); - rule->tuples_mask.src_ip[IPV4_INDEX] = - be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); - - rule->tuples.dst_ip[IPV4_INDEX] = - be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); - rule->tuples_mask.dst_ip[IPV4_INDEX] = - be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); - - rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); - rule->tuples_mask.src_port = - be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); + be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst, + IPV6_SIZE); - rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); - rule->tuples_mask.dst_port = - be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); + rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); + rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); - rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; - rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; + rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); + rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); - rule->tuples.ether_proto = ETH_P_IP; - rule->tuples_mask.ether_proto = 0xFFFF; + rule->tuples.ether_proto = ETH_P_IPV6; + rule->tuples_mask.ether_proto = 0xFFFF; - break; - case IP_USER_FLOW: - rule->tuples.src_ip[IPV4_INDEX] = - be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); - rule->tuples_mask.src_ip[IPV4_INDEX] = - be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); + rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; + rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; - rule->tuples.dst_ip[IPV4_INDEX] = - be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); - rule->tuples_mask.dst_ip[IPV4_INDEX] = - be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); + rule->tuples.ip_proto = ip_proto; + rule->tuples_mask.ip_proto = 0xFF; +} - rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; - rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; +static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src, + IPV6_SIZE); - rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; - rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; + be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst, + IPV6_SIZE); - rule->tuples.ether_proto = ETH_P_IP; - rule->tuples_mask.ether_proto = 0xFFFF; + rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; + rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; - break; - case SCTP_V6_FLOW: - case TCP_V6_FLOW: - case UDP_V6_FLOW: - be32_to_cpu_array(rule->tuples.src_ip, - fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE); - be32_to_cpu_array(rule->tuples_mask.src_ip, - fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE); + rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; + rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; - be32_to_cpu_array(rule->tuples.dst_ip, - fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE); - be32_to_cpu_array(rule->tuples_mask.dst_ip, - fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE); + rule->tuples.ether_proto = ETH_P_IPV6; + rule->tuples_mask.ether_proto = 0xFFFF; +} - rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); - rule->tuples_mask.src_port = - be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); +static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source); + ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source); - rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); - rule->tuples_mask.dst_port = - be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); + ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest); + ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest); - rule->tuples.ether_proto = ETH_P_IPV6; - rule->tuples_mask.ether_proto = 0xFFFF; + rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto); + rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto); +} +static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info, + struct hclge_fd_rule *rule) +{ + switch (info->layer) { + case HCLGE_FD_USER_DEF_L2: + rule->tuples.l2_user_def = info->data; + rule->tuples_mask.l2_user_def = info->data_mask; break; - case IPV6_USER_FLOW: - be32_to_cpu_array(rule->tuples.src_ip, - fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE); - be32_to_cpu_array(rule->tuples_mask.src_ip, - fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE); - - be32_to_cpu_array(rule->tuples.dst_ip, - fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE); - be32_to_cpu_array(rule->tuples_mask.dst_ip, - fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE); - - rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; - rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; - - rule->tuples.ether_proto = ETH_P_IPV6; - rule->tuples_mask.ether_proto = 0xFFFF; - + case HCLGE_FD_USER_DEF_L3: + rule->tuples.l3_user_def = info->data; + rule->tuples_mask.l3_user_def = info->data_mask; break; - case ETHER_FLOW: - ether_addr_copy(rule->tuples.src_mac, - fs->h_u.ether_spec.h_source); - ether_addr_copy(rule->tuples_mask.src_mac, - fs->m_u.ether_spec.h_source); - - ether_addr_copy(rule->tuples.dst_mac, - fs->h_u.ether_spec.h_dest); - ether_addr_copy(rule->tuples_mask.dst_mac, - fs->m_u.ether_spec.h_dest); - - rule->tuples.ether_proto = - be16_to_cpu(fs->h_u.ether_spec.h_proto); - rule->tuples_mask.ether_proto = - be16_to_cpu(fs->m_u.ether_spec.h_proto); - + case HCLGE_FD_USER_DEF_L4: + rule->tuples.l4_user_def = (u32)info->data << 16; + rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16; break; default: - return -EOPNOTSUPP; + break; } + rule->ep.user_def = *info; +} + +static int hclge_fd_get_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule, + struct hclge_fd_user_def_info *info) +{ + u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); + switch (flow_type) { case SCTP_V4_FLOW: - case SCTP_V6_FLOW: - rule->tuples.ip_proto = IPPROTO_SCTP; - rule->tuples_mask.ip_proto = 0xFF; + hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP); break; case TCP_V4_FLOW: - case TCP_V6_FLOW: - rule->tuples.ip_proto = IPPROTO_TCP; - rule->tuples_mask.ip_proto = 0xFF; + hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP); break; case UDP_V4_FLOW: + hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP); + break; + case IP_USER_FLOW: + hclge_fd_get_ip4_tuple(hdev, fs, rule); + break; + case SCTP_V6_FLOW: + hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP); + break; + case TCP_V6_FLOW: + hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP); + break; case UDP_V6_FLOW: - rule->tuples.ip_proto = IPPROTO_UDP; - rule->tuples_mask.ip_proto = 0xFF; + hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP); break; - default: + case IPV6_USER_FLOW: + hclge_fd_get_ip6_tuple(hdev, fs, rule); break; + case ETHER_FLOW: + hclge_fd_get_ether_tuple(hdev, fs, rule); + break; + default: + return -EOPNOTSUPP; } if (fs->flow_type & FLOW_EXT) { rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); + hclge_fd_get_user_def_tuple(info, rule); } if (fs->flow_type & FLOW_MAC_EXT) { @@ -5941,33 +6430,53 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev, return 0; } -/* make sure being called after lock up with fd_rule_lock */ static int hclge_fd_config_rule(struct hclge_dev *hdev, struct hclge_fd_rule *rule) { int ret; - if (!rule) { + ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); + if (ret) + return ret; + + return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); +} + +static int hclge_add_fd_entry_common(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + int ret; + + spin_lock_bh(&hdev->fd_rule_lock); + + if (hdev->fd_active_type != rule->rule_type && + (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || + hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) { dev_err(&hdev->pdev->dev, - "The flow director rule is NULL\n"); + "mode conflict(new type %d, active type %d), please delete existent rules first\n", + rule->rule_type, hdev->fd_active_type); + spin_unlock_bh(&hdev->fd_rule_lock); return -EINVAL; } - /* it will never fail here, so needn't to check return value */ - hclge_fd_update_rule_list(hdev, rule, rule->location, true); + ret = hclge_fd_check_user_def_refcnt(hdev, rule); + if (ret) + goto out; - ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); + ret = hclge_clear_arfs_rules(hdev); if (ret) - goto clear_rule; + goto out; - ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); + ret = hclge_fd_config_rule(hdev, rule); if (ret) - goto clear_rule; + goto out; - return 0; + rule->state = HCLGE_FD_ACTIVE; + hdev->fd_active_type = rule->rule_type; + hclge_update_fd_list(hdev, rule->state, rule->location, rule); -clear_rule: - hclge_fd_update_rule_list(hdev, rule, rule->location, false); +out: + spin_unlock_bh(&hdev->fd_rule_lock); return ret; } @@ -5979,11 +6488,48 @@ static bool hclge_is_cls_flower_active(struct hnae3_handle *handle) return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE; } +static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie, + u16 *vport_id, u8 *action, u16 *queue_id) +{ + struct hclge_vport *vport = hdev->vport; + + if (ring_cookie == RX_CLS_FLOW_DISC) { + *action = HCLGE_FD_ACTION_DROP_PACKET; + } else { + u32 ring = ethtool_get_flow_spec_ring(ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie); + u16 tqps; + + if (vf > hdev->num_req_vfs) { + dev_err(&hdev->pdev->dev, + "Error: vf id (%u) > max vf num (%u)\n", + vf, hdev->num_req_vfs); + return -EINVAL; + } + + *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; + tqps = hdev->vport[vf].nic.kinfo.num_tqps; + + if (ring >= tqps) { + dev_err(&hdev->pdev->dev, + "Error: queue id (%u) > max tqp num (%u)\n", + ring, tqps - 1); + return -EINVAL; + } + + *action = HCLGE_FD_ACTION_SELECT_QUEUE; + *queue_id = ring; + } + + return 0; +} + static int hclge_add_fd_entry(struct hnae3_handle *handle, struct ethtool_rxnfc *cmd) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + struct hclge_fd_user_def_info info; u16 dst_vport_id = 0, q_index = 0; struct ethtool_rx_flow_spec *fs; struct hclge_fd_rule *rule; @@ -6003,51 +6549,22 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, return -EOPNOTSUPP; } - if (hclge_is_cls_flower_active(handle)) { - dev_err(&hdev->pdev->dev, - "please delete all exist cls flower rules first\n"); - return -EINVAL; - } - fs = (struct ethtool_rx_flow_spec *)&cmd->fs; - ret = hclge_fd_check_spec(hdev, fs, &unused); + ret = hclge_fd_check_spec(hdev, fs, &unused, &info); if (ret) return ret; - if (fs->ring_cookie == RX_CLS_FLOW_DISC) { - action = HCLGE_FD_ACTION_DROP_PACKET; - } else { - u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); - u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); - u16 tqps; - - if (vf > hdev->num_req_vfs) { - dev_err(&hdev->pdev->dev, - "Error: vf id (%u) > max vf num (%u)\n", - vf, hdev->num_req_vfs); - return -EINVAL; - } - - dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; - tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps; - - if (ring >= tqps) { - dev_err(&hdev->pdev->dev, - "Error: queue id (%u) > max tqp num (%u)\n", - ring, tqps - 1); - return -EINVAL; - } - - action = HCLGE_FD_ACTION_SELECT_QUEUE; - q_index = ring; - } + ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id, + &action, &q_index); + if (ret) + return ret; rule = kzalloc(sizeof(*rule), GFP_KERNEL); if (!rule) return -ENOMEM; - ret = hclge_fd_get_tuple(hdev, fs, rule); + ret = hclge_fd_get_tuple(hdev, fs, rule, &info); if (ret) { kfree(rule); return ret; @@ -6061,15 +6578,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, rule->action = action; rule->rule_type = HCLGE_FD_EP_ACTIVE; - /* to avoid rule conflict, when user configure rule by ethtool, - * we need to clear all arfs rules - */ - spin_lock_bh(&hdev->fd_rule_lock); - hclge_clear_arfs_rules(handle); - - ret = hclge_fd_config_rule(hdev, rule); - - spin_unlock_bh(&hdev->fd_rule_lock); + ret = hclge_add_fd_entry_common(hdev, rule); + if (ret) + kfree(rule); return ret; } @@ -6090,32 +6601,30 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle, if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) return -EINVAL; - if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num || - !hclge_fd_rule_exist(hdev, fs->location)) { + spin_lock_bh(&hdev->fd_rule_lock); + if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || + !test_bit(fs->location, hdev->fd_bmap)) { dev_err(&hdev->pdev->dev, "Delete fail, rule %u is inexistent\n", fs->location); + spin_unlock_bh(&hdev->fd_rule_lock); return -ENOENT; } ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location, NULL, false); if (ret) - return ret; + goto out; - spin_lock_bh(&hdev->fd_rule_lock); - ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false); + hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL); +out: spin_unlock_bh(&hdev->fd_rule_lock); - return ret; } -/* make sure being called after lock up with fd_rule_lock */ -static void hclge_del_all_fd_entries(struct hnae3_handle *handle, - bool clear_list) +static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev, + bool clear_list) { - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; struct hclge_fd_rule *rule; struct hlist_node *node; u16 location; @@ -6123,6 +6632,8 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, if (!hnae3_dev_fd_supported(hdev)) return; + spin_lock_bh(&hdev->fd_rule_lock); + for_each_set_bit(location, hdev->fd_bmap, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location, @@ -6139,6 +6650,14 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, bitmap_zero(hdev->fd_bmap, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); } + + spin_unlock_bh(&hdev->fd_rule_lock); +} + +static void hclge_del_all_fd_entries(struct hclge_dev *hdev) +{ + hclge_clear_fd_rules_in_list(hdev, true); + hclge_fd_disable_user_def(hdev); } static int hclge_restore_fd_entries(struct hnae3_handle *handle) @@ -6147,7 +6666,6 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle) struct hclge_dev *hdev = vport->back; struct hclge_fd_rule *rule; struct hlist_node *node; - int ret; /* Return ok here, because reset error handling will check this * return value. If error is returned here, the reset process will @@ -6162,25 +6680,11 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle) spin_lock_bh(&hdev->fd_rule_lock); hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { - ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); - if (!ret) - ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); - - if (ret) { - dev_warn(&hdev->pdev->dev, - "Restore rule %u failed, remove it\n", - rule->location); - clear_bit(rule->location, hdev->fd_bmap); - hlist_del(&rule->rule_node); - kfree(rule); - hdev->hclge_fd_rule_num--; - } + if (rule->state == HCLGE_FD_ACTIVE) + rule->state = HCLGE_FD_TO_ADD; } - - if (hdev->hclge_fd_rule_num) - hdev->fd_active_type = HCLGE_FD_EP_ACTIVE; - spin_unlock_bh(&hdev->fd_rule_lock); + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); return 0; } @@ -6268,6 +6772,10 @@ static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule, cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip, IPV6_SIZE); + spec->tclass = rule->tuples.ip_tos; + spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? + 0 : rule->tuples_mask.ip_tos; + spec->psrc = cpu_to_be16(rule->tuples.src_port); spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? 0 : cpu_to_be16(rule->tuples_mask.src_port); @@ -6295,6 +6803,10 @@ static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule, cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip, IPV6_SIZE); + spec->tclass = rule->tuples.ip_tos; + spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? + 0 : rule->tuples_mask.ip_tos; + spec->l4_proto = rule->tuples.ip_proto; spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? 0 : rule->tuples_mask.ip_proto; @@ -6322,6 +6834,24 @@ static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule, 0 : cpu_to_be16(rule->tuples_mask.ether_proto); } +static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) == + HCLGE_FD_TUPLE_USER_DEF_TUPLES) { + fs->h_ext.data[0] = 0; + fs->h_ext.data[1] = 0; + fs->m_ext.data[0] = 0; + fs->m_ext.data[1] = 0; + } else { + fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset); + fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data); + fs->m_ext.data[0] = + cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK); + fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask); + } +} + static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs, struct hclge_fd_rule *rule) { @@ -6330,6 +6860,8 @@ static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs, fs->m_ext.vlan_tci = rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1); + + hclge_fd_get_user_def_info(fs, rule); } if (fs->flow_type & FLOW_MAC_EXT) { @@ -6441,6 +6973,9 @@ static int hclge_get_all_rules(struct hnae3_handle *handle, return -EMSGSIZE; } + if (rule->state == HCLGE_FD_TO_DEL) + continue; + rule_locs[cnt] = rule->location; cnt++; } @@ -6500,6 +7035,7 @@ static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples, rule->action = 0; rule->vf_id = 0; rule->rule_type = HCLGE_FD_ARFS_ACTIVE; + rule->state = HCLGE_FD_TO_ADD; if (tuples->ether_proto == ETH_P_IP) { if (tuples->ip_proto == IPPROTO_TCP) rule->flow_type = TCP_V4_FLOW; @@ -6522,9 +7058,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, struct hclge_fd_rule_tuples new_tuples = {}; struct hclge_dev *hdev = vport->back; struct hclge_fd_rule *rule; - u16 tmp_queue_id; u16 bit_id; - int ret; if (!hnae3_dev_fd_supported(hdev)) return -EOPNOTSUPP; @@ -6560,34 +7094,19 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, return -ENOMEM; } - set_bit(bit_id, hdev->fd_bmap); rule->location = bit_id; rule->arfs.flow_id = flow_id; rule->queue_id = queue_id; hclge_fd_build_arfs_rule(&new_tuples, rule); - ret = hclge_fd_config_rule(hdev, rule); - - spin_unlock_bh(&hdev->fd_rule_lock); - - if (ret) - return ret; - - return rule->location; + hclge_update_fd_list(hdev, rule->state, rule->location, rule); + hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE; + } else if (rule->queue_id != queue_id) { + rule->queue_id = queue_id; + rule->state = HCLGE_FD_TO_ADD; + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); + hclge_task_schedule(hdev, 0); } - spin_unlock_bh(&hdev->fd_rule_lock); - - if (rule->queue_id == queue_id) - return rule->location; - - tmp_queue_id = rule->queue_id; - rule->queue_id = queue_id; - ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); - if (ret) { - rule->queue_id = tmp_queue_id; - return ret; - } - return rule->location; } @@ -6597,7 +7116,6 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev) struct hnae3_handle *handle = &hdev->vport[0].nic; struct hclge_fd_rule *rule; struct hlist_node *node; - HLIST_HEAD(del_list); spin_lock_bh(&hdev->fd_rule_lock); if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) { @@ -6605,34 +7123,51 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev) return; } hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + if (rule->state != HCLGE_FD_ACTIVE) + continue; if (rps_may_expire_flow(handle->netdev, rule->queue_id, rule->arfs.flow_id, rule->location)) { - hlist_del_init(&rule->rule_node); - hlist_add_head(&rule->rule_node, &del_list); - hdev->hclge_fd_rule_num--; - clear_bit(rule->location, hdev->fd_bmap); + rule->state = HCLGE_FD_TO_DEL; + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); } } spin_unlock_bh(&hdev->fd_rule_lock); - - hlist_for_each_entry_safe(rule, node, &del_list, rule_node) { - hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, - rule->location, NULL, false); - kfree(rule); - } #endif } /* make sure being called after lock up with fd_rule_lock */ -static void hclge_clear_arfs_rules(struct hnae3_handle *handle) +static int hclge_clear_arfs_rules(struct hclge_dev *hdev) { #ifdef CONFIG_RFS_ACCEL - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + struct hlist_node *node; + int ret; + + if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) + return 0; + + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + switch (rule->state) { + case HCLGE_FD_TO_DEL: + case HCLGE_FD_ACTIVE: + ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, + rule->location, NULL, false); + if (ret) + return ret; + fallthrough; + case HCLGE_FD_TO_ADD: + hclge_fd_dec_rule_cnt(hdev, rule->location); + hlist_del(&rule->rule_node); + kfree(rule); + break; + default: + break; + } + } + hclge_sync_fd_state(hdev); - if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE) - hclge_del_all_fd_entries(handle, true); #endif + return 0; } static void hclge_get_cls_key_basic(const struct flow_rule *flow, @@ -6814,12 +7349,6 @@ static int hclge_add_cls_flower(struct hnae3_handle *handle, struct hclge_fd_rule *rule; int ret; - if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) { - dev_err(&hdev->pdev->dev, - "please remove all exist fd rules via ethtool first\n"); - return -EINVAL; - } - ret = hclge_check_cls_flower(hdev, cls_flower, tc); if (ret) { dev_err(&hdev->pdev->dev, @@ -6832,8 +7361,10 @@ static int hclge_add_cls_flower(struct hnae3_handle *handle, return -ENOMEM; ret = hclge_parse_cls_flower(hdev, cls_flower, rule); - if (ret) - goto err; + if (ret) { + kfree(rule); + return ret; + } rule->action = HCLGE_FD_ACTION_SELECT_TC; rule->cls_flower.tc = tc; @@ -6842,22 +7373,10 @@ static int hclge_add_cls_flower(struct hnae3_handle *handle, rule->cls_flower.cookie = cls_flower->cookie; rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE; - spin_lock_bh(&hdev->fd_rule_lock); - hclge_clear_arfs_rules(handle); - - ret = hclge_fd_config_rule(hdev, rule); - - spin_unlock_bh(&hdev->fd_rule_lock); - - if (ret) { - dev_err(&hdev->pdev->dev, - "failed to add cls flower rule, ret = %d\n", ret); - goto err; - } + ret = hclge_add_fd_entry_common(hdev, rule); + if (ret) + kfree(rule); - return 0; -err: - kfree(rule); return ret; } @@ -6894,25 +7413,66 @@ static int hclge_del_cls_flower(struct hnae3_handle *handle, ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location, NULL, false); if (ret) { - dev_err(&hdev->pdev->dev, - "failed to delete cls flower rule %u, ret = %d\n", - rule->location, ret); spin_unlock_bh(&hdev->fd_rule_lock); return ret; } - ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false); - if (ret) { - dev_err(&hdev->pdev->dev, - "failed to delete cls flower rule %u in list, ret = %d\n", - rule->location, ret); - spin_unlock_bh(&hdev->fd_rule_lock); - return ret; + hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL); + spin_unlock_bh(&hdev->fd_rule_lock); + + return 0; +} + +static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist) +{ + struct hclge_fd_rule *rule; + struct hlist_node *node; + int ret = 0; + + if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state)) + return; + + spin_lock_bh(&hdev->fd_rule_lock); + + hlist_for_each_entry_safe(rule, node, hlist, rule_node) { + switch (rule->state) { + case HCLGE_FD_TO_ADD: + ret = hclge_fd_config_rule(hdev, rule); + if (ret) + goto out; + rule->state = HCLGE_FD_ACTIVE; + break; + case HCLGE_FD_TO_DEL: + ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, + rule->location, NULL, false); + if (ret) + goto out; + hclge_fd_dec_rule_cnt(hdev, rule->location); + hclge_fd_free_node(hdev, rule); + break; + default: + break; + } } +out: + if (ret) + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); + spin_unlock_bh(&hdev->fd_rule_lock); +} - return 0; +static void hclge_sync_fd_table(struct hclge_dev *hdev) +{ + if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) { + bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; + + hclge_clear_fd_rules_in_list(hdev, clear_list); + } + + hclge_sync_fd_user_def_cfg(hdev, false); + + hclge_sync_fd_list(hdev, &hdev->fd_rule_list); } static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle) @@ -6952,18 +7512,15 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - bool clear; hdev->fd_en = enable; - clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; - if (!enable) { - spin_lock_bh(&hdev->fd_rule_lock); - hclge_del_all_fd_entries(handle, clear); - spin_unlock_bh(&hdev->fd_rule_lock); - } else { + if (!enable) + set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state); + else hclge_restore_fd_entries(handle); - } + + hclge_task_schedule(hdev, 0); } static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) @@ -7124,19 +7681,19 @@ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) return ret; } -static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en, +static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en, enum hnae3_loop loop_mode) { -#define HCLGE_SERDES_RETRY_MS 10 -#define HCLGE_SERDES_RETRY_NUM 100 +#define HCLGE_COMMON_LB_RETRY_MS 10 +#define HCLGE_COMMON_LB_RETRY_NUM 100 - struct hclge_serdes_lb_cmd *req; + struct hclge_common_lb_cmd *req; struct hclge_desc desc; int ret, i = 0; u8 loop_mode_b; - req = (struct hclge_serdes_lb_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false); + req = (struct hclge_common_lb_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false); switch (loop_mode) { case HNAE3_LOOP_SERIAL_SERDES: @@ -7145,9 +7702,12 @@ static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en, case HNAE3_LOOP_PARALLEL_SERDES: loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; break; + case HNAE3_LOOP_PHY: + loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B; + break; default: dev_err(&hdev->pdev->dev, - "unsupported serdes loopback mode %d\n", loop_mode); + "unsupported common loopback mode %d\n", loop_mode); return -ENOTSUPP; } @@ -7161,39 +7721,39 @@ static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en, ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, - "serdes loopback set fail, ret = %d\n", ret); + "common loopback set fail, ret = %d\n", ret); return ret; } do { - msleep(HCLGE_SERDES_RETRY_MS); - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, + msleep(HCLGE_COMMON_LB_RETRY_MS); + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, - "serdes loopback get, ret = %d\n", ret); + "common loopback get, ret = %d\n", ret); return ret; } - } while (++i < HCLGE_SERDES_RETRY_NUM && - !(req->result & HCLGE_CMD_SERDES_DONE_B)); + } while (++i < HCLGE_COMMON_LB_RETRY_NUM && + !(req->result & HCLGE_CMD_COMMON_LB_DONE_B)); - if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) { - dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n"); + if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) { + dev_err(&hdev->pdev->dev, "common loopback set timeout\n"); return -EBUSY; - } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) { - dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n"); + } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) { + dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n"); return -EIO; } return ret; } -static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, +static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en, enum hnae3_loop loop_mode) { int ret; - ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode); + ret = hclge_cfg_common_loopback(hdev, en, loop_mode); if (ret) return ret; @@ -7242,8 +7802,12 @@ static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en) struct phy_device *phydev = hdev->hw.mac.phydev; int ret; - if (!phydev) + if (!phydev) { + if (hnae3_dev_phy_imp_supported(hdev)) + return hclge_set_common_loopback(hdev, en, + HNAE3_LOOP_PHY); return -ENOTSUPP; + } if (en) ret = hclge_enable_phy_loopback(hdev, phydev); @@ -7265,13 +7829,12 @@ static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en) return ret; } -static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id, - int stream_id, bool enable) +static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id, + u16 stream_id, bool enable) { struct hclge_desc desc; struct hclge_cfg_com_tqp_queue_cmd *req = (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; - int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); req->tqp_id = cpu_to_le16(tqp_id); @@ -7279,20 +7842,30 @@ static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id, if (enable) req->enable |= 1U << HCLGE_TQP_ENABLE_B; - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, - "Tqp enable fail, status =%d.\n", ret); - return ret; + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret; + u16 i; + + for (i = 0; i < handle->kinfo.num_tqps; i++) { + ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable); + if (ret) + return ret; + } + return 0; } static int hclge_set_loopback(struct hnae3_handle *handle, enum hnae3_loop loop_mode, bool en) { struct hclge_vport *vport = hclge_get_vport(handle); - struct hnae3_knic_private_info *kinfo; struct hclge_dev *hdev = vport->back; - int i, ret; + int ret; /* Loopback can be enabled in three places: SSU, MAC, and serdes. By * default, SSU loopback is enabled, so if the SMAC and the DMAC are @@ -7314,7 +7887,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle, break; case HNAE3_LOOP_SERIAL_SERDES: case HNAE3_LOOP_PARALLEL_SERDES: - ret = hclge_set_serdes_loopback(hdev, en, loop_mode); + ret = hclge_set_common_loopback(hdev, en, loop_mode); break; case HNAE3_LOOP_PHY: ret = hclge_set_phy_loopback(hdev, en); @@ -7329,14 +7902,12 @@ static int hclge_set_loopback(struct hnae3_handle *handle, if (ret) return ret; - kinfo = &vport->nic.kinfo; - for (i = 0; i < kinfo->num_tqps; i++) { - ret = hclge_tqp_enable(hdev, i, 0, en); - if (ret) - return ret; - } + ret = hclge_tqp_enable(handle, en); + if (ret) + dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n", + en ? "enable" : "disable", ret); - return 0; + return ret; } static int hclge_set_default_loopback(struct hclge_dev *hdev) @@ -7347,11 +7918,11 @@ static int hclge_set_default_loopback(struct hclge_dev *hdev) if (ret) return ret; - ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES); + ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES); if (ret) return ret; - return hclge_cfg_serdes_loopback(hdev, false, + return hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_PARALLEL_SERDES); } @@ -7423,11 +7994,10 @@ static void hclge_ae_stop(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - int i; set_bit(HCLGE_STATE_DOWN, &hdev->state); spin_lock_bh(&hdev->fd_rule_lock); - hclge_clear_arfs_rules(handle); + hclge_clear_arfs_rules(hdev); spin_unlock_bh(&hdev->fd_rule_lock); /* If it is not PF reset, the firmware will disable the MAC, @@ -7440,8 +8010,7 @@ static void hclge_ae_stop(struct hnae3_handle *handle) return; } - for (i = 0; i < handle->kinfo.num_tqps; i++) - hclge_reset_tqp(handle, i); + hclge_reset_tqp(handle); hclge_config_mac_tnl_int(hdev, false); @@ -7891,7 +8460,7 @@ int hclge_update_mac_list(struct hclge_vport *vport, /* if the mac addr is already in the mac list, no need to add a new * one into it, just check the mac addr state, convert it to a new - * new state, or just remove it, or do nothing. + * state, or just remove it, or do nothing. */ mac_node = hclge_find_mac_node(list, addr); if (mac_node) { @@ -8080,7 +8649,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, if (status) return status; status = hclge_add_mac_vlan_tbl(vport, &req, desc); - /* if already overflow, not to print each time */ if (status == -ENOSPC && !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) @@ -8129,7 +8697,6 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, else /* Not all the vfid is zero, update the vfid */ status = hclge_add_mac_vlan_tbl(vport, &req, desc); - } else if (status == -ENOENT) { status = 0; } @@ -8564,7 +9131,7 @@ static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx, return true; vf_idx += HCLGE_VF_VPORT_START_NUM; - for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) + for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) if (i != vf_idx && ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac)) return true; @@ -8758,6 +9325,29 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, return 0; } +static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *data = if_mii(ifr); + + if (!hnae3_dev_phy_imp_supported(hdev)) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = hdev->hw.mac.phy_addr; + /* this command reads phy id and register at the same time */ + fallthrough; + case SIOCGMIIREG: + data->val_out = hclge_read_phy_reg(hdev, data->reg_num); + return 0; + + case SIOCSMIIREG: + return hclge_write_phy_reg(hdev, data->reg_num, data->val_in); + default: + return -EOPNOTSUPP; + } +} + static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, int cmd) { @@ -8765,7 +9355,7 @@ static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, struct hclge_dev *hdev = vport->back; if (!hdev->hw.mac.phydev) - return -EOPNOTSUPP; + return hclge_mii_ioctl(hdev, ifr, cmd); return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); } @@ -8922,8 +9512,7 @@ static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid, } static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid, - bool is_kill, u16 vlan, - __be16 proto) + bool is_kill, u16 vlan) { struct hclge_vport *vport = &hdev->vport[vfid]; struct hclge_desc desc[2]; @@ -8989,8 +9578,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, if (is_kill && !vlan_id) return 0; - ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, - proto); + ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id); if (ret) { dev_err(&hdev->pdev->dev, "Set %u vport vlan filter config fail, ret =%d.\n", @@ -9440,7 +10028,7 @@ static void hclge_restore_hw_table(struct hclge_dev *hdev) hclge_restore_mac_table_common(vport); hclge_restore_vport_vlan_table(vport); set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); - + set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); hclge_restore_fd_entries(handle); } @@ -9796,7 +10384,7 @@ out: return ret; } -static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, +static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id, bool enable) { struct hclge_reset_tqp_queue_cmd *req; @@ -9852,94 +10440,114 @@ u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id) return tqp->index; } -int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) +static int hclge_reset_tqp_cmd(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - int reset_try_times = 0; + u16 reset_try_times = 0; int reset_status; u16 queue_gid; int ret; + u16 i; - queue_gid = hclge_covert_handle_qid_global(handle, queue_id); - - ret = hclge_tqp_enable(hdev, queue_id, 0, false); - if (ret) { - dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); - return ret; - } + for (i = 0; i < handle->kinfo.num_tqps; i++) { + queue_gid = hclge_covert_handle_qid_global(handle, i); + ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to send reset tqp cmd, ret = %d\n", + ret); + return ret; + } - ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); - if (ret) { - dev_err(&hdev->pdev->dev, - "Send reset tqp cmd fail, ret = %d\n", ret); - return ret; - } + while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { + reset_status = hclge_get_reset_status(hdev, queue_gid); + if (reset_status) + break; - while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { - reset_status = hclge_get_reset_status(hdev, queue_gid); - if (reset_status) - break; + /* Wait for tqp hw reset */ + usleep_range(1000, 1200); + } - /* Wait for tqp hw reset */ - usleep_range(1000, 1200); - } + if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { + dev_err(&hdev->pdev->dev, + "wait for tqp hw reset timeout\n"); + return -ETIME; + } - if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { - dev_err(&hdev->pdev->dev, "Reset TQP fail\n"); - return ret; + ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to deassert soft reset, ret = %d\n", + ret); + return ret; + } + reset_try_times = 0; } - - ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); - if (ret) - dev_err(&hdev->pdev->dev, - "Deassert the soft reset fail, ret = %d\n", ret); - - return ret; + return 0; } -void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) +static int hclge_reset_rcb(struct hnae3_handle *handle) { - struct hnae3_handle *handle = &vport->nic; +#define HCLGE_RESET_RCB_NOT_SUPPORT 0U +#define HCLGE_RESET_RCB_SUCCESS 1U + + struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - int reset_try_times = 0; - int reset_status; + struct hclge_reset_cmd *req; + struct hclge_desc desc; + u8 return_status; u16 queue_gid; int ret; - if (queue_id >= handle->kinfo.num_tqps) { - dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n", - queue_id); - return; - } + queue_gid = hclge_covert_handle_qid_global(handle, 0); - queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id); + req = (struct hclge_reset_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); + hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1); + req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid); + req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps); - ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { - dev_warn(&hdev->pdev->dev, - "Send reset tqp cmd fail, ret = %d\n", ret); - return; + dev_err(&hdev->pdev->dev, + "failed to send rcb reset cmd, ret = %d\n", ret); + return ret; } - while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { - reset_status = hclge_get_reset_status(hdev, queue_gid); - if (reset_status) - break; + return_status = req->fun_reset_rcb_return_status; + if (return_status == HCLGE_RESET_RCB_SUCCESS) + return 0; - /* Wait for tqp hw reset */ - usleep_range(1000, 1200); + if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) { + dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n", + return_status); + return -EIO; } - if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { - dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); - return; + /* if reset rcb cmd is unsupported, we need to send reset tqp cmd + * again to reset all tqps + */ + return hclge_reset_tqp_cmd(handle); +} + +int hclge_reset_tqp(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret; + + /* only need to disable PF's tqp */ + if (!vport->vport_id) { + ret = hclge_tqp_enable(handle, false); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to disable tqp, ret = %d\n", ret); + return ret; + } } - ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); - if (ret) - dev_warn(&hdev->pdev->dev, - "Deassert the soft reset fail, ret = %d\n", ret); + return hclge_reset_rcb(handle); } static u32 hclge_get_fw_version(struct hnae3_handle *handle) @@ -10012,9 +10620,10 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - struct phy_device *phydev = hdev->hw.mac.phydev; + u8 media_type = hdev->hw.mac.media_type; - *auto_neg = phydev ? hclge_get_autoneg(handle) : 0; + *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ? + hclge_get_autoneg(handle) : 0; if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { *rx_en = 0; @@ -10060,7 +10669,7 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, struct phy_device *phydev = hdev->hw.mac.phydev; u32 fc_autoneg; - if (phydev) { + if (phydev || hnae3_dev_phy_imp_supported(hdev)) { fc_autoneg = hclge_get_autoneg(handle); if (auto_neg != fc_autoneg) { dev_info(&hdev->pdev->dev, @@ -10079,7 +10688,7 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, hclge_record_user_pauseparam(hdev, rx_en, tx_en); - if (!auto_neg) + if (!auto_neg || hnae3_dev_phy_imp_supported(hdev)) return hclge_cfg_pauseparam(hdev, rx_en, tx_en); if (phydev) @@ -10181,7 +10790,6 @@ static void hclge_info_show(struct hclge_dev *hdev) dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); - dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport); dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs); dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size); @@ -10296,39 +10904,35 @@ static int hclge_init_client_instance(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev) { struct hclge_dev *hdev = ae_dev->priv; - struct hclge_vport *vport; - int i, ret; - - for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { - vport = &hdev->vport[i]; + struct hclge_vport *vport = &hdev->vport[0]; + int ret; - switch (client->type) { - case HNAE3_CLIENT_KNIC: - hdev->nic_client = client; - vport->nic.client = client; - ret = hclge_init_nic_client_instance(ae_dev, vport); - if (ret) - goto clear_nic; + switch (client->type) { + case HNAE3_CLIENT_KNIC: + hdev->nic_client = client; + vport->nic.client = client; + ret = hclge_init_nic_client_instance(ae_dev, vport); + if (ret) + goto clear_nic; - ret = hclge_init_roce_client_instance(ae_dev, vport); - if (ret) - goto clear_roce; + ret = hclge_init_roce_client_instance(ae_dev, vport); + if (ret) + goto clear_roce; - break; - case HNAE3_CLIENT_ROCE: - if (hnae3_dev_roce_supported(hdev)) { - hdev->roce_client = client; - vport->roce.client = client; - } + break; + case HNAE3_CLIENT_ROCE: + if (hnae3_dev_roce_supported(hdev)) { + hdev->roce_client = client; + vport->roce.client = client; + } - ret = hclge_init_roce_client_instance(ae_dev, vport); - if (ret) - goto clear_roce; + ret = hclge_init_roce_client_instance(ae_dev, vport); + if (ret) + goto clear_roce; - break; - default: - return -EINVAL; - } + break; + default: + return -EINVAL; } return 0; @@ -10347,32 +10951,27 @@ static void hclge_uninit_client_instance(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev) { struct hclge_dev *hdev = ae_dev->priv; - struct hclge_vport *vport; - int i; + struct hclge_vport *vport = &hdev->vport[0]; - for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { - vport = &hdev->vport[i]; - if (hdev->roce_client) { - clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); - while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) - msleep(HCLGE_WAIT_RESET_DONE); - - hdev->roce_client->ops->uninit_instance(&vport->roce, - 0); - hdev->roce_client = NULL; - vport->roce.client = NULL; - } - if (client->type == HNAE3_CLIENT_ROCE) - return; - if (hdev->nic_client && client->ops->uninit_instance) { - clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); - while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) - msleep(HCLGE_WAIT_RESET_DONE); - - client->ops->uninit_instance(&vport->nic, 0); - hdev->nic_client = NULL; - vport->nic.client = NULL; - } + if (hdev->roce_client) { + clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); + while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + msleep(HCLGE_WAIT_RESET_DONE); + + hdev->roce_client->ops->uninit_instance(&vport->roce, 0); + hdev->roce_client = NULL; + vport->roce.client = NULL; + } + if (client->type == HNAE3_CLIENT_ROCE) + return; + if (hdev->nic_client && client->ops->uninit_instance) { + clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); + while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + msleep(HCLGE_WAIT_RESET_DONE); + + client->ops->uninit_instance(&vport->nic, 0); + hdev->nic_client = NULL; + vport->nic.client = NULL; } } @@ -10491,10 +11090,11 @@ static void hclge_state_uninit(struct hclge_dev *hdev) cancel_delayed_work_sync(&hdev->service_task); } -static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev) +static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type) { -#define HCLGE_FLR_RETRY_WAIT_MS 500 -#define HCLGE_FLR_RETRY_CNT 5 +#define HCLGE_RESET_RETRY_WAIT_MS 500 +#define HCLGE_RESET_RETRY_CNT 5 struct hclge_dev *hdev = ae_dev->priv; int retry_cnt = 0; @@ -10503,30 +11103,32 @@ static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev) retry: down(&hdev->reset_sem); set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); - hdev->reset_type = HNAE3_FLR_RESET; + hdev->reset_type = rst_type; ret = hclge_reset_prepare(hdev); if (ret || hdev->reset_pending) { - dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n", + dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n", ret); if (hdev->reset_pending || - retry_cnt++ < HCLGE_FLR_RETRY_CNT) { + retry_cnt++ < HCLGE_RESET_RETRY_CNT) { dev_err(&hdev->pdev->dev, "reset_pending:0x%lx, retry_cnt:%d\n", hdev->reset_pending, retry_cnt); clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); up(&hdev->reset_sem); - msleep(HCLGE_FLR_RETRY_WAIT_MS); + msleep(HCLGE_RESET_RETRY_WAIT_MS); goto retry; } } - /* disable misc vector before FLR done */ + /* disable misc vector before reset done */ hclge_enable_vector(&hdev->misc_vector, false); set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); - hdev->rst_stats.flr_rst_cnt++; + + if (hdev->reset_type == HNAE3_FLR_RESET) + hdev->rst_stats.flr_rst_cnt++; } -static void hclge_flr_done(struct hnae3_ae_dev *ae_dev) +static void hclge_reset_done(struct hnae3_ae_dev *ae_dev) { struct hclge_dev *hdev = ae_dev->priv; int ret; @@ -10637,7 +11239,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) if (ret) goto err_msi_irq_uninit; - if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { + if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER && + !hnae3_dev_phy_imp_supported(hdev)) { ret = hclge_mac_mdio_config(hdev); if (ret) goto err_msi_irq_uninit; @@ -11030,6 +11633,13 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + ret = hclge_tp_port_init(hdev); + if (ret) { + dev_err(&pdev->dev, "failed to init tp port, ret = %d\n", + ret); + return ret; + } + ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); if (ret) { dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); @@ -11120,6 +11730,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_misc_affinity_teardown(hdev); hclge_state_uninit(hdev); hclge_uninit_mac_table(hdev); + hclge_del_all_fd_entries(hdev); if (mac->phydev) mdiobus_unregister(mac->mdio_bus); @@ -11379,7 +11990,6 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) #define REG_SEPARATOR_LINE 1 #define REG_NUM_REMAIN_MASK 3 -#define BD_LIST_MAX_NUM 30 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc) { @@ -11473,15 +12083,19 @@ static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len) { u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list); int data_len_per_desc, bd_num, i; - int bd_num_list[BD_LIST_MAX_NUM]; + int *bd_num_list; u32 data_len; int ret; + bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL); + if (!bd_num_list) + return -ENOMEM; + ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num); if (ret) { dev_err(&hdev->pdev->dev, "Get dfx reg bd num fail, status is %d.\n", ret); - return ret; + goto out; } data_len_per_desc = sizeof_field(struct hclge_desc, data); @@ -11492,6 +12106,8 @@ static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len) *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE; } +out: + kfree(bd_num_list); return ret; } @@ -11499,16 +12115,20 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data) { u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list); int bd_num, bd_num_max, buf_len, i; - int bd_num_list[BD_LIST_MAX_NUM]; struct hclge_desc *desc_src; + int *bd_num_list; u32 *reg = data; int ret; + bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL); + if (!bd_num_list) + return -ENOMEM; + ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num); if (ret) { dev_err(&hdev->pdev->dev, "Get dfx reg bd num fail, status is %d.\n", ret); - return ret; + goto out; } bd_num_max = bd_num_list[0]; @@ -11517,8 +12137,10 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data) buf_len = sizeof(*desc_src) * bd_num_max; desc_src = kzalloc(buf_len, GFP_KERNEL); - if (!desc_src) - return -ENOMEM; + if (!desc_src) { + ret = -ENOMEM; + goto out; + } for (i = 0; i < dfx_reg_type_num; i++) { bd_num = bd_num_list[i]; @@ -11534,6 +12156,8 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data) } kfree(desc_src); +out: + kfree(bd_num_list); return ret; } @@ -11877,8 +12501,8 @@ static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset, static const struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, - .flr_prepare = hclge_flr_prepare, - .flr_done = hclge_flr_done, + .reset_prepare = hclge_reset_prepare_general, + .reset_done = hclge_reset_done, .init_client_instance = hclge_init_client_instance, .uninit_client_instance = hclge_uninit_client_instance, .map_ring_to_vector = hclge_map_ring_to_vector, @@ -11943,7 +12567,6 @@ static const struct hnae3_ae_ops hclge_ops = { .get_link_mode = hclge_get_link_mode, .add_fd_entry = hclge_add_fd_entry, .del_fd_entry = hclge_del_fd_entry, - .del_all_fd_entries = hclge_del_all_fd_entries, .get_fd_rule_cnt = hclge_get_fd_rule_cnt, .get_fd_rule_info = hclge_get_fd_rule_info, .get_fd_all_rules = hclge_get_all_rules, @@ -11971,6 +12594,8 @@ static const struct hnae3_ae_ops hclge_ops = { .add_cls_flower = hclge_add_cls_flower, .del_cls_flower = hclge_del_cls_flower, .cls_flower_active = hclge_is_cls_flower_active, + .get_phy_link_ksettings = hclge_get_phy_link_ksettings, + .set_phy_link_ksettings = hclge_set_phy_link_ksettings, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 19d7f28773f3..ff1d47308c2d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -223,6 +223,9 @@ enum HCLGE_DEV_STATE { HCLGE_STATE_LINK_UPDATING, HCLGE_STATE_PROMISC_CHANGED, HCLGE_STATE_RST_FAIL, + HCLGE_STATE_FD_TBL_CHANGED, + HCLGE_STATE_FD_CLEAR_ALL, + HCLGE_STATE_FD_USER_DEF_CHANGED, HCLGE_STATE_MAX }; @@ -345,7 +348,6 @@ struct hclge_tc_info { }; struct hclge_cfg { - u8 vmdq_vport_num; u8 tc_num; u16 tqp_desc_num; u16 rx_buf_len; @@ -536,6 +538,9 @@ enum HCLGE_FD_TUPLE { MAX_TUPLE, }; +#define HCLGE_FD_TUPLE_USER_DEF_TUPLES \ + (BIT(INNER_L2_RSV) | BIT(INNER_L3_RSV) | BIT(INNER_L4_RSV)) + enum HCLGE_FD_META_DATA { PACKET_TYPE_ID, IP_FRAGEMENT, @@ -548,9 +553,21 @@ enum HCLGE_FD_META_DATA { MAX_META_DATA, }; +enum HCLGE_FD_KEY_OPT { + KEY_OPT_U8, + KEY_OPT_LE16, + KEY_OPT_LE32, + KEY_OPT_MAC, + KEY_OPT_IP, + KEY_OPT_VNI, +}; + struct key_info { u8 key_type; u8 key_length; /* use bit as unit */ + enum HCLGE_FD_KEY_OPT key_opt; + int offset; + int moffset; }; #define MAX_KEY_LENGTH 400 @@ -558,6 +575,11 @@ struct key_info { #define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4) #define MAX_META_DATA_LENGTH 32 +#define HCLGE_FD_MAX_USER_DEF_OFFSET 9000 +#define HCLGE_FD_USER_DEF_DATA GENMASK(15, 0) +#define HCLGE_FD_USER_DEF_OFFSET GENMASK(15, 0) +#define HCLGE_FD_USER_DEF_OFFSET_UNMASK GENMASK(15, 0) + /* assigned by firmware, the real filter number for each pf may be less */ #define MAX_FD_FILTER_NUM 4096 #define HCLGE_ARFS_EXPIRE_INTERVAL 5UL @@ -580,6 +602,33 @@ enum HCLGE_FD_ACTION { HCLGE_FD_ACTION_SELECT_TC, }; +enum HCLGE_FD_NODE_STATE { + HCLGE_FD_TO_ADD, + HCLGE_FD_TO_DEL, + HCLGE_FD_ACTIVE, + HCLGE_FD_DELETED, +}; + +enum HCLGE_FD_USER_DEF_LAYER { + HCLGE_FD_USER_DEF_NONE, + HCLGE_FD_USER_DEF_L2, + HCLGE_FD_USER_DEF_L3, + HCLGE_FD_USER_DEF_L4, +}; + +#define HCLGE_FD_USER_DEF_LAYER_NUM 3 +struct hclge_fd_user_def_cfg { + u16 ref_cnt; + u16 offset; +}; + +struct hclge_fd_user_def_info { + enum HCLGE_FD_USER_DEF_LAYER layer; + u16 data; + u16 data_mask; + u16 offset; +}; + struct hclge_fd_key_cfg { u8 key_sel; u8 inner_sipv6_word_en; @@ -596,6 +645,7 @@ struct hclge_fd_cfg { u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */ u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */ struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM]; + struct hclge_fd_user_def_cfg user_def_cfg[HCLGE_FD_USER_DEF_LAYER_NUM]; }; #define IPV4_INDEX 3 @@ -612,6 +662,9 @@ struct hclge_fd_rule_tuples { u16 dst_port; u16 vlan_tag1; u16 ether_proto; + u16 l2_user_def; + u16 l3_user_def; + u32 l4_user_def; u8 ip_tos; u8 ip_proto; }; @@ -630,11 +683,15 @@ struct hclge_fd_rule { struct { u16 flow_id; /* only used for arfs */ } arfs; + struct { + struct hclge_fd_user_def_info user_def; + } ep; }; u16 queue_id; u16 vf_id; u16 location; enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type; + enum HCLGE_FD_NODE_STATE state; u8 action; }; @@ -753,7 +810,6 @@ struct hclge_dev { struct hclge_rst_stats rst_stats; struct semaphore reset_sem; /* protect reset process */ u32 fw_version; - u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */ u16 num_tqps; /* Num task queue pairs of this PF */ u16 num_req_vfs; /* Num VFs requested for this PF */ @@ -997,8 +1053,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev); void hclge_rss_indir_init_cfg(struct hclge_dev *hdev); void hclge_mbx_handler(struct hclge_dev *hdev); -int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id); -void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id); +int hclge_reset_tqp(struct hnae3_handle *handle); int hclge_cfg_flowctrl(struct hclge_dev *hdev); int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id); int hclge_vport_start(struct hclge_vport *vport); @@ -1034,4 +1089,5 @@ void hclge_report_hw_error(struct hclge_dev *hdev, enum hnae3_hw_error_type type); void hclge_inform_vf_promisc_info(struct hclge_vport *vport); void hclge_dbg_dump_rst_info(struct hclge_dev *hdev); +int hclge_push_vf_link_status(struct hclge_vport *vport); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 51a36e74f088..5512ffe0a149 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -490,16 +490,14 @@ static void hclge_get_vf_media_type(struct hclge_vport *vport, resp_msg->len = HCLGE_VF_MEDIA_TYPE_LENGTH; } -static int hclge_get_link_info(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req) +int hclge_push_vf_link_status(struct hclge_vport *vport) { #define HCLGE_VF_LINK_STATE_UP 1U #define HCLGE_VF_LINK_STATE_DOWN 0U struct hclge_dev *hdev = vport->back; u16 link_status; - u8 msg_data[8]; - u8 dest_vfid; + u8 msg_data[9]; u16 duplex; /* mac.link can only be 0 or 1 */ @@ -520,11 +518,11 @@ static int hclge_get_link_info(struct hclge_vport *vport, memcpy(&msg_data[0], &link_status, sizeof(u16)); memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); memcpy(&msg_data[6], &duplex, sizeof(u16)); - dest_vfid = mbx_req->mbx_src_vfid; + msg_data[8] = HCLGE_MBX_PUSH_LINK_STATUS_EN; /* send this requested info to VF */ return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), - HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid); + HCLGE_MBX_LINK_STAT_CHANGE, vport->vport_id); } static void hclge_get_link_mode(struct hclge_vport *vport, @@ -550,14 +548,32 @@ static void hclge_get_link_mode(struct hclge_vport *vport, HCLGE_MBX_LINK_STAT_MODE, dest_vfid); } -static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req) +static int hclge_mbx_reset_vf_queue(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + struct hclge_respond_to_vf_msg *resp_msg) { +#define HCLGE_RESET_ALL_QUEUE_DONE 1U + struct hnae3_handle *handle = &vport->nic; + struct hclge_dev *hdev = vport->back; u16 queue_id; + int ret; memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id)); + resp_msg->data[0] = HCLGE_RESET_ALL_QUEUE_DONE; + resp_msg->len = sizeof(u8); + + /* pf will reset vf's all queues at a time. So it is unnecessary + * to reset queues if queue_id > 0, just return success. + */ + if (queue_id > 0) + return 0; - hclge_reset_vf_queue(vport, queue_id); + ret = hclge_reset_tqp(handle); + if (ret) + dev_err(&hdev->pdev->dev, "failed to reset vf %u queue, ret = %d\n", + vport->vport_id - HCLGE_VF_VPORT_START_NUM, ret); + + return ret; } static int hclge_reset_vf(struct hclge_vport *vport) @@ -776,14 +792,14 @@ void hclge_mbx_handler(struct hclge_dev *hdev) hclge_get_vf_tcinfo(vport, &resp_msg); break; case HCLGE_MBX_GET_LINK_STATUS: - ret = hclge_get_link_info(vport, req); + ret = hclge_push_vf_link_status(vport); if (ret) dev_err(&hdev->pdev->dev, "failed to inform link stat to VF, ret = %d\n", ret); break; case HCLGE_MBX_QUEUE_RESET: - hclge_mbx_reset_vf_queue(vport, req); + ret = hclge_mbx_reset_vf_queue(vport, req, &resp_msg); break; case HCLGE_MBX_RESET: ret = hclge_reset_vf(vport); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index e89820702540..08e88d9422cd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -268,3 +268,42 @@ void hclge_mac_stop_phy(struct hclge_dev *hdev) phy_stop(phydev); } + +u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr) +{ + struct hclge_phy_reg_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PHY_REG, true); + + req = (struct hclge_phy_reg_cmd *)desc.data; + req->reg_addr = cpu_to_le16(reg_addr); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to read phy reg, ret = %d.\n", ret); + + return le16_to_cpu(req->reg_val); +} + +int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val) +{ + struct hclge_phy_reg_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PHY_REG, false); + + req = (struct hclge_phy_reg_cmd *)desc.data; + req->reg_addr = cpu_to_le16(reg_addr); + req->reg_val = cpu_to_le16(val); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to write phy reg, ret = %d.\n", ret); + + return ret; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h index dd9a1218a7b0..fd0e20190b90 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h @@ -9,5 +9,7 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle); void hclge_mac_disconnect_phy(struct hnae3_handle *handle); void hclge_mac_start_phy(struct hclge_dev *hdev); void hclge_mac_stop_phy(struct hclge_dev *hdev); +u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr); +int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 151afd1f0688..ebb962bad451 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -631,13 +631,12 @@ static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport) return sum; } -static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) +static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hclge_dev *hdev = vport->back; u16 vport_max_rss_size; u16 max_rss_size; - u8 i; /* TC configuration is shared by PF/VF in one port, only allow * one tc for VF for simplicity. VF's vport_id is non zero. @@ -665,19 +664,18 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) kinfo->rss_size = kinfo->req_rss_size; } else if (kinfo->rss_size > max_rss_size || (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) { - /* if user not set rss, the rss_size should compare with the - * valid msi numbers to ensure one to one map between tqp and - * irq as default. - */ - if (!kinfo->req_rss_size) - max_rss_size = min_t(u16, max_rss_size, - (hdev->num_nic_msi - 1) / - kinfo->tc_info.num_tc); - /* Set to the maximum specification value (max_rss_size). */ kinfo->rss_size = max_rss_size; } +} + +static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + u8 i; + hclge_tm_update_kinfo_rss_size(vport); kinfo->num_tqps = hclge_vport_get_tqp_num(vport); vport->dwrr = 100; /* 100 percent as init */ vport->alloc_rss_size = kinfo->rss_size; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index 46700c427849..d8c5c5810b99 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -349,7 +349,6 @@ static void hclgevf_parse_capability(struct hclgevf_dev *hdev, u32 caps; caps = __le32_to_cpu(cmd->caps[0]); - if (hnae3_get_bit(caps, HCLGEVF_CAP_UDP_GSO_B)) set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps); if (hnae3_get_bit(caps, HCLGEVF_CAP_INT_QL_B)) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h index 8a37a22a176b..c6dc11b32aa7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -223,11 +223,14 @@ struct hclgevf_rss_indirection_table_cmd { }; #define HCLGEVF_RSS_TC_OFFSET_S 0 -#define HCLGEVF_RSS_TC_OFFSET_M (0x3ff << HCLGEVF_RSS_TC_OFFSET_S) +#define HCLGEVF_RSS_TC_OFFSET_M GENMASK(10, 0) +#define HCLGEVF_RSS_TC_SIZE_MSB_B 11 #define HCLGEVF_RSS_TC_SIZE_S 12 -#define HCLGEVF_RSS_TC_SIZE_M (0x7 << HCLGEVF_RSS_TC_SIZE_S) +#define HCLGEVF_RSS_TC_SIZE_M GENMASK(14, 12) #define HCLGEVF_RSS_TC_VALID_B 15 #define HCLGEVF_MAX_TC_NUM 8 +#define HCLGEVF_RSS_TC_SIZE_MSB_OFFSET 3 + struct hclgevf_rss_tc_mode_cmd { __le16 rss_tc_mode[HCLGEVF_MAX_TC_NUM]; u8 rsv[8]; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index e295d359e912..0db51ef15ef6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -497,7 +497,6 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) link_state = test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; - if (link_state != hdev->hw.mac.link) { client->ops->link_status_change(handle, !!link_state); if (rclient && rclient->ops->link_status_change) @@ -707,6 +706,9 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) (tc_valid[i] & 0x1)); hnae3_set_field(mode, HCLGEVF_RSS_TC_SIZE_M, HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); + hnae3_set_bit(mode, HCLGEVF_RSS_TC_SIZE_MSB_B, + tc_size[i] >> HCLGEVF_RSS_TC_SIZE_MSB_OFFSET & + 0x1); hnae3_set_field(mode, HCLGEVF_RSS_TC_OFFSET_M, HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); @@ -1241,12 +1243,11 @@ static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) } } -static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id, - int stream_id, bool enable) +static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id, + u16 stream_id, bool enable) { struct hclgevf_cfg_com_tqp_queue_cmd *req; struct hclgevf_desc desc; - int status; req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; @@ -1257,12 +1258,22 @@ static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id, if (enable) req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) - dev_err(&hdev->pdev->dev, - "TQP enable fail, status =%d.\n", status); + return hclgevf_cmd_send(&hdev->hw, &desc, 1); +} - return status; +static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int ret; + u16 i; + + for (i = 0; i < handle->kinfo.num_tqps; i++) { + ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable); + if (ret) + return ret; + } + + return 0; } static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) @@ -1711,20 +1722,39 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); } -static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) +static int hclgevf_reset_tqp(struct hnae3_handle *handle) { +#define HCLGEVF_RESET_ALL_QUEUE_DONE 1U struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclge_vf_to_pf_msg send_msg; + u8 return_status = 0; int ret; + u16 i; /* disable vf queue before send queue reset msg to PF */ - ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); - if (ret) + ret = hclgevf_tqp_enable(handle, false); + if (ret) { + dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n", + ret); return ret; + } hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); - memcpy(send_msg.data, &queue_id, sizeof(queue_id)); - return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); + + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status, + sizeof(return_status)); + if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE) + return ret; + + for (i = 1; i < handle->kinfo.num_tqps; i++) { + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); + memcpy(send_msg.data, &i, sizeof(i)); + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); + if (ret) + return ret; + } + + return 0; } static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) @@ -2084,10 +2114,11 @@ static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) writel(en ? 1 : 0, vector->addr); } -static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) +static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type) { -#define HCLGEVF_FLR_RETRY_WAIT_MS 500 -#define HCLGEVF_FLR_RETRY_CNT 5 +#define HCLGEVF_RESET_RETRY_WAIT_MS 500 +#define HCLGEVF_RESET_RETRY_CNT 5 struct hclgevf_dev *hdev = ae_dev->priv; int retry_cnt = 0; @@ -2096,29 +2127,31 @@ static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) retry: down(&hdev->reset_sem); set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); - hdev->reset_type = HNAE3_FLR_RESET; + hdev->reset_type = rst_type; ret = hclgevf_reset_prepare(hdev); if (ret) { - dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n", + dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n", ret); if (hdev->reset_pending || - retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) { + retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) { dev_err(&hdev->pdev->dev, "reset_pending:0x%lx, retry_cnt:%d\n", hdev->reset_pending, retry_cnt); clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); up(&hdev->reset_sem); - msleep(HCLGEVF_FLR_RETRY_WAIT_MS); + msleep(HCLGEVF_RESET_RETRY_WAIT_MS); goto retry; } } - /* disable misc vector before FLR done */ + /* disable misc vector before reset done */ hclgevf_enable_vector(&hdev->misc_vector, false); - hdev->rst_stats.flr_rst_cnt++; + + if (hdev->reset_type == HNAE3_FLR_RESET) + hdev->rst_stats.flr_rst_cnt++; } -static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) +static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev) { struct hclgevf_dev *hdev = ae_dev->priv; int ret; @@ -2307,10 +2340,11 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) hclgevf_tqps_update_stats(handle); - /* request the link status from the PF. PF would be able to tell VF - * about such updates in future so we might remove this later + /* VF does not need to request link status when this bit is set, because + * PF will push its link status to VFs when link status changed. */ - hclgevf_request_link_info(hdev); + if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state)) + hclgevf_request_link_info(hdev); hclgevf_update_link_mode(hdev); @@ -2356,7 +2390,6 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, /* fetch the events from their corresponding regs */ cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG); - if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); dev_info(&hdev->pdev->dev, @@ -2625,6 +2658,7 @@ static int hclgevf_ae_start(struct hnae3_handle *handle) struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); + clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state); hclgevf_reset_tqp_stats(handle); @@ -2638,14 +2672,11 @@ static int hclgevf_ae_start(struct hnae3_handle *handle) static void hclgevf_ae_stop(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - int i; set_bit(HCLGEVF_STATE_DOWN, &hdev->state); if (hdev->reset_type != HNAE3_VF_RESET) - for (i = 0; i < handle->kinfo.num_tqps; i++) - if (hclgevf_reset_tqp(handle, i)) - break; + hclgevf_reset_tqp(handle); hclgevf_reset_tqp_stats(handle); hclgevf_update_link_status(hdev, 0); @@ -3615,7 +3646,7 @@ static void hclgevf_get_link_mode(struct hnae3_handle *handle, } #define MAX_SEPARATE_NUM 4 -#define SEPARATOR_VALUE 0xFFFFFFFF +#define SEPARATOR_VALUE 0xFDFCFBFA #define REG_NUM_PER_LINE 4 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) @@ -3722,8 +3753,8 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, static const struct hnae3_ae_ops hclgevf_ops = { .init_ae_dev = hclgevf_init_ae_dev, .uninit_ae_dev = hclgevf_uninit_ae_dev, - .flr_prepare = hclgevf_flr_prepare, - .flr_done = hclgevf_flr_done, + .reset_prepare = hclgevf_reset_prepare_general, + .reset_done = hclgevf_reset_done, .init_client_instance = hclgevf_init_client_instance, .uninit_client_instance = hclgevf_uninit_client_instance, .start = hclgevf_ae_start, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 8c27ecd819af..265c9b0b4728 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -152,6 +152,7 @@ enum hclgevf_states { HCLGEVF_STATE_LINK_UPDATING, HCLGEVF_STATE_PROMISC_CHANGED, HCLGEVF_STATE_RST_FAIL, + HCLGEVF_STATE_PF_PUSH_LINK_STATUS, }; struct hclgevf_mac { @@ -176,9 +177,9 @@ struct hclgevf_hw { /* TQP stats */ struct hlcgevf_tqp_stats { - /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */ + /* query_tqp_tx_queue_statistics, opcode id: 0x0B03 */ u64 rcb_tx_ring_pktnum_rcd; /* 32bit */ - /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */ + /* query_tqp_rx_queue_statistics, opcode id: 0x0B13 */ u64 rcb_rx_ring_pktnum_rcd; /* 32bit */ }; @@ -192,7 +193,6 @@ struct hclgevf_tqp { }; struct hclgevf_cfg { - u8 vmdq_vport_num; u8 tc_num; u16 tqp_desc_num; u16 rx_buf_len; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index 5b2dcd97c107..9b17735b9f4c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -276,6 +276,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) u8 duplex; u32 speed; u32 tail; + u8 flag; u8 idx; /* we can safely clear it now as we are at start of the async message @@ -300,11 +301,16 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) link_status = msg_q[1]; memcpy(&speed, &msg_q[2], sizeof(speed)); duplex = (u8)msg_q[4]; + flag = (u8)msg_q[5]; /* update upper layer with new link link status */ hclgevf_update_link_status(hdev, link_status); hclgevf_update_speed_duplex(hdev, speed, duplex); + if (flag & HCLGE_MBX_PUSH_LINK_STATUS_EN) + set_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, + &hdev->state); + break; case HCLGE_MBX_LINK_STAT_MODE: idx = (u8)msg_q[1]; diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index 883d0d7c6858..3e54017a2a5b 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -279,7 +279,7 @@ static int hns_mdio_write(struct mii_bus *bus, static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum) { int ret; - u16 reg_val = 0; + u16 reg_val; u8 devad = ((regnum >> 16) & 0x1f); u8 is_c45 = !!(regnum & MII_ADDR_C45); u16 reg = (u16)(regnum & 0xffff); @@ -420,7 +420,7 @@ static int hns_mdio_probe(struct platform_device *pdev) { struct hns_mdio_device *mdio_dev; struct mii_bus *new_bus; - int ret = -ENODEV; + int ret; if (!pdev) { dev_err(NULL, "pdev is NULL!\r\n"); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c index c340d9acba80..dc024ef521c0 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c @@ -34,7 +34,7 @@ #include "hinic_rx.h" #include "hinic_dev.h" -#define SET_LINK_STR_MAX_LEN 128 +#define SET_LINK_STR_MAX_LEN 16 #define GET_SUPPORTED_MODE 0 #define GET_ADVERTISED_MODE 1 @@ -462,24 +462,19 @@ static int hinic_set_settings_to_hw(struct hinic_dev *nic_dev, { struct hinic_link_ksettings_info settings = {0}; char set_link_str[SET_LINK_STR_MAX_LEN] = {0}; + const char *autoneg_str; struct net_device *netdev = nic_dev->netdev; enum nic_speed_level speed_level = 0; int err; - err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN, "%s", - (set_settings & HILINK_LINK_SET_AUTONEG) ? - (autoneg ? "autong enable " : "autong disable ") : ""); - if (err < 0 || err >= SET_LINK_STR_MAX_LEN) { - netif_err(nic_dev, drv, netdev, "Failed to snprintf link state, function return(%d) and dest_len(%d)\n", - err, SET_LINK_STR_MAX_LEN); - return -EFAULT; - } + autoneg_str = (set_settings & HILINK_LINK_SET_AUTONEG) ? + (autoneg ? "autong enable " : "autong disable ") : ""; if (set_settings & HILINK_LINK_SET_SPEED) { speed_level = hinic_ethtool_to_hw_speed_level(speed); err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN, - "%sspeed %d ", set_link_str, speed); - if (err <= 0 || err >= SET_LINK_STR_MAX_LEN) { + "speed %d ", speed); + if (err >= SET_LINK_STR_MAX_LEN) { netif_err(nic_dev, drv, netdev, "Failed to snprintf link speed, function return(%d) and dest_len(%d)\n", err, SET_LINK_STR_MAX_LEN); return -EFAULT; @@ -494,11 +489,11 @@ static int hinic_set_settings_to_hw(struct hinic_dev *nic_dev, err = hinic_set_link_settings(nic_dev->hwdev, &settings); if (err != HINIC_MGMT_CMD_UNSUPPORTED) { if (err) - netif_err(nic_dev, drv, netdev, "Set %s failed\n", - set_link_str); + netif_err(nic_dev, drv, netdev, "Set %s%sfailed\n", + autoneg_str, set_link_str); else - netif_info(nic_dev, drv, netdev, "Set %s successfully\n", - set_link_str); + netif_info(nic_dev, drv, netdev, "Set %s%ssuccessfully\n", + autoneg_str, set_link_str); return err; } @@ -543,8 +538,8 @@ static void hinic_get_drvinfo(struct net_device *netdev, struct hinic_hwif *hwif = hwdev->hwif; int err; - strlcpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver)); - strlcpy(info->bus_info, pci_name(hwif->pdev), sizeof(info->bus_info)); + strscpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver)); + strscpy(info->bus_info, pci_name(hwif->pdev), sizeof(info->bus_info)); err = hinic_get_mgmt_version(nic_dev, mgmt_ver); if (err) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c index 4e4029d5c8e1..06586173add7 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c @@ -629,10 +629,8 @@ static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain, cmd_vaddr = dma_alloc_coherent(&pdev->dev, API_CMD_BUF_SIZE, &cmd_paddr, GFP_KERNEL); - if (!cmd_vaddr) { - dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n"); + if (!cmd_vaddr) return -ENOMEM; - } cell_ctxt = &chain->cell_ctxt[cell_idx]; @@ -679,10 +677,8 @@ static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain, node = dma_alloc_coherent(&pdev->dev, chain->cell_size, &node_paddr, GFP_KERNEL); - if (!node) { - dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n"); + if (!node) return -ENOMEM; - } node->read.hw_wb_resp_paddr = 0; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c index efbaed389440..cab38ff0713c 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c @@ -334,7 +334,7 @@ static void set_dma_attr(struct hinic_hwif *hwif, u32 entry_idx, } /** - * dma_attr_table_init - initialize the the default dma attributes + * dma_attr_table_init - initialize the default dma attributes * @hwif: the HW interface of a pci function device **/ static void dma_attr_init(struct hinic_hwif *hwif) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c index 819fa13034c0..817173f1fbb7 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c @@ -440,18 +440,14 @@ static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, struct hinic_recv_msg *recv_msg) { struct hinic_mgmt_msg_handle_work *mgmt_work = NULL; - struct pci_dev *pdev = pf_to_mgmt->hwif->pdev; mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL); - if (!mgmt_work) { - dev_err(&pdev->dev, "Allocate mgmt work memory failed\n"); + if (!mgmt_work) return; - } if (recv_msg->msg_len) { mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL); if (!mgmt_work->msg) { - dev_err(&pdev->dev, "Allocate mgmt msg memory failed\n"); kfree(mgmt_work); return; } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c index fcf7bfe4aa47..dcba4d009bad 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c @@ -414,7 +414,6 @@ int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif, rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size, &rq->pi_dma_addr, GFP_KERNEL); if (!rq->pi_virt_addr) { - dev_err(&pdev->dev, "Failed to allocate PI address\n"); err = -ENOMEM; goto err_pi_virt; } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index 070a7cc6392e..cce08647b9b2 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -137,10 +137,8 @@ static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq, int err; skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz); - if (!skb) { - netdev_err(rxq->netdev, "Failed to allocate Rx SKB\n"); + if (!skb) return NULL; - } addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz, DMA_FROM_DEVICE); @@ -212,10 +210,8 @@ static int rx_alloc_pkts(struct hinic_rxq *rxq) for (i = 0; i < free_wqebbs; i++) { skb = rx_alloc_skb(rxq, &dma_addr); - if (!skb) { - netdev_err(rxq->netdev, "Failed to alloc Rx skb\n"); + if (!skb) goto skb_out; - } hinic_set_sge(&sge, dma_addr, skb->len); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index 8da7d46363b2..710c4ff7bc0e 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -377,6 +377,7 @@ static int offload_csum(struct hinic_sq_task *task, u32 *queue_info, } else if (ip.v4->version == 6) { unsigned char *exthdr; __be16 frag_off; + l3_type = IPV6_PKT; tunnel_type = TUNNEL_UDP_CSUM; exthdr = ip.hdr + sizeof(*ip.v6); diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index c2e740475786..ea55314b209d 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -109,6 +109,7 @@ static const struct of_device_id ehea_device_table[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, ehea_device_table); static struct platform_driver ehea_driver = { .driver = { diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index ffb2a91750c7..5788bb956d73 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -827,6 +827,30 @@ static void release_napi(struct ibmvnic_adapter *adapter) adapter->napi_enabled = false; } +static const char *adapter_state_to_string(enum vnic_state state) +{ + switch (state) { + case VNIC_PROBING: + return "PROBING"; + case VNIC_PROBED: + return "PROBED"; + case VNIC_OPENING: + return "OPENING"; + case VNIC_OPEN: + return "OPEN"; + case VNIC_CLOSING: + return "CLOSING"; + case VNIC_CLOSED: + return "CLOSED"; + case VNIC_REMOVING: + return "REMOVING"; + case VNIC_REMOVED: + return "REMOVED"; + default: + return "UNKNOWN"; + } +} + static int ibmvnic_login(struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); @@ -905,7 +929,7 @@ static int ibmvnic_login(struct net_device *netdev) __ibmvnic_set_mac(netdev, adapter->mac_addr); - netdev_dbg(netdev, "[S:%d] Login succeeded\n", adapter->state); + netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state)); return 0; } @@ -1179,8 +1203,9 @@ static int ibmvnic_open(struct net_device *netdev) * honor our setting below. */ if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) { - netdev_dbg(netdev, "[S:%d FOP:%d] Resetting, deferring open\n", - adapter->state, adapter->failover_pending); + netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n", + adapter_state_to_string(adapter->state), + adapter->failover_pending); adapter->state = VNIC_OPEN; rc = 0; goto out; @@ -1344,8 +1369,9 @@ static int ibmvnic_close(struct net_device *netdev) struct ibmvnic_adapter *adapter = netdev_priv(netdev); int rc; - netdev_dbg(netdev, "[S:%d FOP:%d FRR:%d] Closing\n", - adapter->state, adapter->failover_pending, + netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n", + adapter_state_to_string(adapter->state), + adapter->failover_pending, adapter->force_reset_recovery); /* If device failover is pending, just set device state and return. @@ -1672,9 +1698,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - memcpy(dst + cur, - page_address(skb_frag_page(frag)) + - skb_frag_off(frag), skb_frag_size(frag)); + memcpy(dst + cur, skb_frag_address(frag), + skb_frag_size(frag)); cur += skb_frag_size(frag); } } else { @@ -1906,6 +1931,26 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p) return rc; } +static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason) +{ + switch (reason) { + case VNIC_RESET_FAILOVER: + return "FAILOVER"; + case VNIC_RESET_MOBILITY: + return "MOBILITY"; + case VNIC_RESET_FATAL: + return "FATAL"; + case VNIC_RESET_NON_FATAL: + return "NON_FATAL"; + case VNIC_RESET_TIMEOUT: + return "TIMEOUT"; + case VNIC_RESET_CHANGE_PARAM: + return "CHANGE_PARAM"; + default: + return "UNKNOWN"; + } +} + /* * do_reset returns zero if we are able to keep processing reset events, or * non-zero if we hit a fatal error and must halt. @@ -1919,9 +1964,11 @@ static int do_reset(struct ibmvnic_adapter *adapter, int rc; netdev_dbg(adapter->netdev, - "[S:%d FOP:%d] Reset reason %d, reset_state %d\n", - adapter->state, adapter->failover_pending, - rwi->reset_reason, reset_state); + "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n", + adapter_state_to_string(adapter->state), + adapter->failover_pending, + reset_reason_to_string(rwi->reset_reason), + adapter_state_to_string(reset_state)); adapter->reset_reason = rwi->reset_reason; /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */ @@ -1981,8 +2028,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, * from VNIC_CLOSING state. */ netdev_dbg(netdev, - "Open changed state from %d, updating.\n", - reset_state); + "Open changed state from %s, updating.\n", + adapter_state_to_string(reset_state)); reset_state = VNIC_OPEN; adapter->state = VNIC_CLOSING; } @@ -2119,8 +2166,9 @@ out: if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) rtnl_unlock(); - netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Reset done, rc %d\n", - adapter->state, adapter->failover_pending, rc); + netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n", + adapter_state_to_string(adapter->state), + adapter->failover_pending, rc); return rc; } @@ -2130,8 +2178,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter, struct net_device *netdev = adapter->netdev; int rc; - netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n", - rwi->reset_reason); + netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n", + reset_reason_to_string(rwi->reset_reason)); /* read the state and check (again) after getting rtnl */ reset_state = adapter->state; @@ -2197,8 +2245,9 @@ out: /* restore adapter state if reset failed */ if (rc) adapter->state = reset_state; - netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Hard reset done, rc %d\n", - adapter->state, adapter->failover_pending, rc); + netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n", + adapter_state_to_string(adapter->state), + adapter->failover_pending, rc); return rc; } @@ -2233,8 +2282,9 @@ static void __ibmvnic_reset(struct work_struct *work) adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); if (test_and_set_bit_lock(0, &adapter->resetting)) { - schedule_delayed_work(&adapter->ibmvnic_delayed_reset, - IBMVNIC_RESET_DELAY); + queue_delayed_work(system_long_wq, + &adapter->ibmvnic_delayed_reset, + IBMVNIC_RESET_DELAY); return; } @@ -2277,8 +2327,8 @@ static void __ibmvnic_reset(struct work_struct *work) if (rc) { /* give backing device time to settle down */ netdev_dbg(adapter->netdev, - "[S:%d] Hard reset failed, waiting 60 secs\n", - adapter->state); + "[S:%s] Hard reset failed, waiting 60 secs\n", + adapter_state_to_string(adapter->state)); set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(60 * HZ); } @@ -2306,8 +2356,9 @@ static void __ibmvnic_reset(struct work_struct *work) clear_bit_unlock(0, &adapter->resetting); netdev_dbg(adapter->netdev, - "[S:%d FRR:%d WFR:%d] Done processing resets\n", - adapter->state, adapter->force_reset_recovery, + "[S:%s FRR:%d WFR:%d] Done processing resets\n", + adapter_state_to_string(adapter->state), + adapter->force_reset_recovery, adapter->wait_for_reset); } @@ -2354,8 +2405,8 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, list_for_each(entry, &adapter->rwi_list) { tmp = list_entry(entry, struct ibmvnic_rwi, list); if (tmp->reset_reason == reason) { - netdev_dbg(netdev, "Skipping matching reset, reason=%d\n", - reason); + netdev_dbg(netdev, "Skipping matching reset, reason=%s\n", + reset_reason_to_string(reason)); ret = EBUSY; goto err; } @@ -2375,8 +2426,9 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, } rwi->reset_reason = reason; list_add_tail(&rwi->list, &adapter->rwi_list); - netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason); - schedule_work(&adapter->ibmvnic_reset); + netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n", + reset_reason_to_string(reason)); + queue_work(system_long_wq, &adapter->ibmvnic_reset); ret = 0; err: @@ -5445,7 +5497,7 @@ static ssize_t failover_store(struct device *dev, struct device_attribute *attr, if (rc) { netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n", rc); - return -EINVAL; + goto last_resort; } session_token = (__be64)retbuf[0]; @@ -5453,15 +5505,17 @@ static ssize_t failover_store(struct device *dev, struct device_attribute *attr, be64_to_cpu(session_token)); rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, H_SESSION_ERR_DETECTED, session_token, 0, 0); - if (rc) { - netdev_err(netdev, "Client initiated failover failed, rc %ld\n", + if (rc) + netdev_err(netdev, + "H_VIOCTL initiated failover failed, rc %ld\n", rc); - return -EINVAL; - } + +last_resort: + netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n"); + ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); return count; } - static DEVICE_ATTR_WO(failover); static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev) diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 806aa75a4e86..c1d39a748546 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -412,77 +412,6 @@ struct ibmvnic_control_ip_offload { struct ibmvnic_rc rc; } __packed __aligned(8); -struct ibmvnic_request_dump_size { - u8 first; - u8 cmd; - u8 reserved[6]; - __be32 len; - struct ibmvnic_rc rc; -} __packed __aligned(8); - -struct ibmvnic_request_dump { - u8 first; - u8 cmd; - u8 reserved1[2]; - __be32 ioba; - __be32 len; - u8 reserved2[4]; -} __packed __aligned(8); - -struct ibmvnic_request_dump_rsp { - u8 first; - u8 cmd; - u8 reserved[6]; - __be32 dumped_len; - struct ibmvnic_rc rc; -} __packed __aligned(8); - -struct ibmvnic_request_ras_comp_num { - u8 first; - u8 cmd; - u8 reserved1[2]; - __be32 num_components; - u8 reserved2[4]; - struct ibmvnic_rc rc; -} __packed __aligned(8); - -struct ibmvnic_request_ras_comps { - u8 first; - u8 cmd; - u8 reserved[2]; - __be32 ioba; - __be32 len; - struct ibmvnic_rc rc; -} __packed __aligned(8); - -struct ibmvnic_control_ras { - u8 first; - u8 cmd; - u8 correlator; - u8 level; - u8 op; -#define IBMVNIC_TRACE_LEVEL 1 -#define IBMVNIC_ERROR_LEVEL 2 -#define IBMVNIC_TRACE_PAUSE 3 -#define IBMVNIC_TRACE_RESUME 4 -#define IBMVNIC_TRACE_ON 5 -#define IBMVNIC_TRACE_OFF 6 -#define IBMVNIC_CHG_TRACE_BUFF_SZ 7 - u8 trace_buff_sz[3]; - u8 reserved[4]; - struct ibmvnic_rc rc; -} __packed __aligned(8); - -struct ibmvnic_collect_fw_trace { - u8 first; - u8 cmd; - u8 correlator; - u8 reserved; - __be32 ioba; - __be32 len; - struct ibmvnic_rc rc; -} __packed __aligned(8); - struct ibmvnic_request_statistics { u8 first; u8 cmd; @@ -494,15 +423,6 @@ struct ibmvnic_request_statistics { u8 reserved[4]; } __packed __aligned(8); -struct ibmvnic_request_debug_stats { - u8 first; - u8 cmd; - u8 reserved[2]; - __be32 ioba; - __be32 len; - struct ibmvnic_rc rc; -} __packed __aligned(8); - struct ibmvnic_error_indication { u8 first; u8 cmd; @@ -677,22 +597,8 @@ union ibmvnic_crq { struct ibmvnic_query_ip_offload query_ip_offload_rsp; struct ibmvnic_control_ip_offload control_ip_offload; struct ibmvnic_control_ip_offload control_ip_offload_rsp; - struct ibmvnic_request_dump_size request_dump_size; - struct ibmvnic_request_dump_size request_dump_size_rsp; - struct ibmvnic_request_dump request_dump; - struct ibmvnic_request_dump_rsp request_dump_rsp; - struct ibmvnic_request_ras_comp_num request_ras_comp_num; - struct ibmvnic_request_ras_comp_num request_ras_comp_num_rsp; - struct ibmvnic_request_ras_comps request_ras_comps; - struct ibmvnic_request_ras_comps request_ras_comps_rsp; - struct ibmvnic_control_ras control_ras; - struct ibmvnic_control_ras control_ras_rsp; - struct ibmvnic_collect_fw_trace collect_fw_trace; - struct ibmvnic_collect_fw_trace collect_fw_trace_rsp; struct ibmvnic_request_statistics request_statistics; struct ibmvnic_generic_crq request_statistics_rsp; - struct ibmvnic_request_debug_stats request_debug_stats; - struct ibmvnic_request_debug_stats request_debug_stats_rsp; struct ibmvnic_error_indication error_indication; struct ibmvnic_link_state_indication link_state_indication; struct ibmvnic_change_mac_addr change_mac_addr; diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 5aa86318ed3e..c1d155690341 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -294,6 +294,7 @@ config ICE tristate "Intel(R) Ethernet Connection E800 Series Support" default n depends on PCI_MSI + select DIMLIB select NET_DEVLINK select PLDMFW help diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c index 4c0c9433bd60..19cf36360933 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.c +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c @@ -1183,6 +1183,7 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw) break; case e1000_ms_auto: phy_data &= ~CR_1000T_MS_ENABLE; + break; default: break; } diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 0ac8d79a7987..590ad110d383 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -2745,7 +2745,7 @@ release: } /** - * e1000_k1_gig_workaround_lv - K1 Si workaround + * e1000_k1_workaround_lv - K1 Si workaround * @hw: pointer to the HW structure * * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps @@ -5220,7 +5220,7 @@ void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, } /** - * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 + * e1000e_igp3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 * @hw: pointer to the HW structure * * Workaround for 82566 power-down on D3 entry: diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index a0948002ddf8..88e9035b75cf 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -25,6 +25,7 @@ #include <linux/pm_runtime.h> #include <linux/aer.h> #include <linux/prefetch.h> +#include <linux/suspend.h> #include "e1000.h" @@ -5990,7 +5991,7 @@ static void e1000_reset_task(struct work_struct *work) } /** - * e1000_get_stats64 - Get System Network Statistics + * e1000e_get_stats64 - Get System Network Statistics * @netdev: network interface device structure * @stats: rtnl_link_stats64 pointer * @@ -6163,7 +6164,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, } /** - * e1000e_hwtstamp_ioctl - control hardware time stamping + * e1000e_hwtstamp_set - control hardware time stamping * @netdev: network interface device structure * @ifr: interface request * @@ -6821,7 +6822,7 @@ static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) } /** - * e1000e_disable_aspm_locked Disable ASPM states. + * e1000e_disable_aspm_locked - Disable ASPM states. * @pdev: pointer to PCI device struct * @state: bit-mask of ASPM states to disable * @@ -6922,6 +6923,12 @@ static int __e1000_resume(struct pci_dev *pdev) return 0; } +static __maybe_unused int e1000e_pm_prepare(struct device *dev) +{ + return pm_runtime_suspended(dev) && + pm_suspend_via_firmware(); +} + static __maybe_unused int e1000e_pm_suspend(struct device *dev) { struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); @@ -7630,9 +7637,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) e1000_print_device_info(adapter); - dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_SMART_PREPARE); - if (pci_dev_run_wake(pdev) && hw->mac.type < e1000_pch_cnp) + if (pci_dev_run_wake(pdev) && hw->mac.type != e1000_pch_cnp) pm_runtime_put_noidle(&pdev->dev); return 0; @@ -7855,6 +7862,7 @@ MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); static const struct dev_pm_ops e1000_pm_ops = { #ifdef CONFIG_PM_SLEEP + .prepare = e1000e_pm_prepare, .suspend = e1000e_pm_suspend, .resume = e1000e_pm_resume, .freeze = e1000e_pm_freeze, diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index bdd9dc163f15..1db35b2c7750 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -371,7 +371,7 @@ s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) } /** - * e1000e_write_phy_reg_igp - Write igp PHY register + * __e1000e_write_phy_reg_igp - Write igp PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index f3f671311855..9e79d672f4f1 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -142,7 +142,7 @@ static int e1000e_phc_get_syncdevicetime(ktime_t *device, } /** - * e1000e_phc_getsynctime - Reads the current system/device cross timestamp + * e1000e_phc_getcrosststamp - Reads the current system/device cross timestamp * @ptp: ptp clock structure * @xtstamp: structure containing timestamp * diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c index c45315472245..86397c564dfc 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c @@ -105,7 +105,7 @@ static int fm10k_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) } /** - * fm10k_dcbnl_ieee_getdcbx - get the DCBX configuration for the device + * fm10k_dcbnl_getdcbx - get the DCBX configuration for the device * @dev: netdev interface for the device * * Returns that we support only IEEE DCB for this interface @@ -116,7 +116,7 @@ static u8 fm10k_dcbnl_getdcbx(struct net_device __always_unused *dev) } /** - * fm10k_dcbnl_ieee_setdcbx - get the DCBX configuration for the device + * fm10k_dcbnl_setdcbx - get the DCBX configuration for the device * @dev: netdev interface for the device * @mode: new mode for this device * diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c index 1d27b2fb23af..5c77054d67c6 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c @@ -185,7 +185,7 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) } /** - * fm10k_dbg_free_q_vector_dir - setup debugfs for the q_vectors + * fm10k_dbg_q_vector_exit - setup debugfs for the q_vectors * @q_vector: q_vector to allocate directories for **/ void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 247f44f4cb30..3362f26d7f99 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1774,7 +1774,7 @@ static void fm10k_free_q_vectors(struct fm10k_intfc *interface) } /** - * f10k_reset_msix_capability - reset MSI-X capability + * fm10k_reset_msix_capability - reset MSI-X capability * @interface: board private structure to initialize * * Reset the MSI-X capability back to its starting state @@ -1787,7 +1787,7 @@ static void fm10k_reset_msix_capability(struct fm10k_intfc *interface) } /** - * f10k_init_msix_capability - configure MSI-X capability + * fm10k_init_msix_capability - configure MSI-X capability * @interface: board private structure to initialize * * Attempt to configure the interrupts using the best available diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index 8e2e92bf3cd4..30ca9ee1900b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -692,7 +692,7 @@ static bool fm10k_mbx_tx_complete(struct fm10k_mbx_info *mbx) } /** - * fm10k_mbx_deqeueue_rx - Dequeues the message from the head in the Rx FIFO + * fm10k_mbx_dequeue_rx - Dequeues the message from the head in the Rx FIFO * @hw: pointer to hardware structure * @mbx: pointer to mailbox * @@ -1039,6 +1039,7 @@ static s32 fm10k_mbx_create_reply(struct fm10k_hw *hw, case FM10K_STATE_CLOSED: /* generate new header based on data */ fm10k_mbx_create_disconnect_hdr(mbx); + break; default: break; } @@ -2017,6 +2018,7 @@ static s32 fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, case FM10K_STATE_CONNECT: /* Update remote value to match local value */ mbx->remote = mbx->local; + break; default: break; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index c0780c3624c8..af1b0cde3670 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1417,7 +1417,7 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results, } /** - * fm10k_update_stats_hw_pf - Updates hardware related statistics of PF + * fm10k_update_hw_stats_pf - Updates hardware related statistics of PF * @hw: pointer to hardware structure * @stats: pointer to the stats structure to update * diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 15f93b355099..9067cd3ce243 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -66,6 +66,8 @@ #define I40E_FDIR_RING_COUNT 32 #define I40E_MAX_AQ_BUF_SIZE 4096 #define I40E_AQ_LEN 256 +#define I40E_MIN_ARQ_LEN 1 +#define I40E_MIN_ASQ_LEN 2 #define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */ #define I40E_MAX_USER_PRIORITY 8 #define I40E_DEFAULT_TRAFFIC_CLASS BIT(0) diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index ec19e18305ec..41b813fe07a5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -2332,7 +2332,7 @@ i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, } /** - * i40e_get_vsi_params - get VSI configuration info + * i40e_aq_get_vsi_params - get VSI configuration info * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL @@ -2586,7 +2586,7 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) } /** - * i40e_updatelink_status - update status of the HW network link + * i40e_update_link_info - update status of the HW network link * @hw: pointer to the hw struct **/ noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw) @@ -5059,7 +5059,7 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) } /** - * i40e_blink_phy_led + * i40e_blink_phy_link_led * @hw: pointer to the HW structure * @time: time how long led will blinks in secs * @interval: gap between LED on and off in msecs diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 243b0d2b7b72..673f341f4c0c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -234,7 +234,7 @@ static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv, } /** - * i40e_parse_ieee_etsrec_tlv + * i40e_parse_ieee_tlv * @tlv: IEEE 802.1Qaz TLV * @dcbcfg: Local store to update ETS REC data * @@ -1588,7 +1588,7 @@ void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share, } /** - * i40e_dcb_hw_rx_ets_bw_config + * i40e_dcb_hw_rx_up2tc_config * @hw: pointer to the hw struct * @prio_tc: priority to tc assignment indexed by priority * diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index 0345132a0ef5..e32c61909b31 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -392,7 +392,7 @@ static void i40e_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, } /** - * i40e_dcbnl_set_pg_tc_cfg_tx - Set CEE PG Tx BW config + * i40e_dcbnl_set_pg_bwg_cfg_tx - Set CEE PG Tx BW config * @netdev: the corresponding netdev * @pgid: the corresponding traffic class * @bw_pct: the BW percentage for the specified traffic class diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c index 5e08f100c413..e1069ae658ad 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c @@ -77,7 +77,7 @@ static bool i40e_ddp_profiles_overlap(struct i40e_profile_info *new, } /** - * i40e_ddp_does_profiles_ - checks if DDP overlaps with existing one. + * i40e_ddp_does_profile_overlap - checks if DDP overlaps with existing one. * @hw: HW data structure * @pinfo: DDP profile information structure * diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index d627b59ad446..291e61ac3e44 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -654,7 +654,7 @@ static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf) } /** - * i40e_dbg_dump_stats - handles dump stats write into command datum + * i40e_dbg_dump_eth_stats - handles dump stats write into command datum * @pf: the i40e_pf created in command write * @estats: the eth stats structure to be dumped **/ @@ -1641,7 +1641,7 @@ static const struct file_operations i40e_dbg_command_fops = { static char i40e_dbg_netdev_ops_buf[256] = ""; /** - * i40e_dbg_netdev_ops - read for netdev_ops datum + * i40e_dbg_netdev_ops_read - read for netdev_ops datum * @filp: the opened file * @buffer: where to write the data for the user to read * @count: the size of the user's buffer diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 0e92668012e3..040a01400b85 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -212,7 +212,7 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[], } /** - * 40e_add_stat_strings - copy stat strings into ethtool buffer + * i40e_add_stat_strings - copy stat strings into ethtool buffer * @p: ethtool supplied buffer * @stats: stat definitions array * @@ -2409,21 +2409,15 @@ static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - char *p = (char *)data; unsigned int i; + u8 *p = data; - for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", - i40e_gstrings_priv_flags[i].flag_string); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) + ethtool_sprintf(&p, i40e_gstrings_priv_flags[i].flag_string); if (pf->hw.pf_id != 0) return; - for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", - i40e_gl_gstrings_priv_flags[i].flag_string); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) + ethtool_sprintf(&p, i40e_gl_gstrings_priv_flags[i].flag_string); } static void i40e_get_strings(struct net_device *netdev, u32 stringset, diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index a3da422ab05b..d6e92ecddfbd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -511,7 +511,7 @@ configure_lan_hmc_out: } /** - * i40e_delete_hmc_object - remove hmc objects + * i40e_delete_lan_hmc_object - remove hmc objects * @hw: pointer to the HW structure * @info: pointer to i40e_hmc_delete_obj_info struct * diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 527023ee4c07..c2d145a56b5e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6,6 +6,7 @@ #include <linux/pci.h> #include <linux/bpf.h> #include <generated/utsrelease.h> +#include <linux/crash_dump.h> /* Local includes */ #include "i40e.h" @@ -2023,7 +2024,7 @@ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi, } /** - * i40e_next_entry - Get the next non-broadcast filter from a list + * i40e_next_filter - Get the next non-broadcast filter from a list * @next: pointer to filter in list * * Returns the next non-broadcast filter in the list. Required so that we @@ -5203,7 +5204,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) } /** - * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes + * i40e_pf_get_tc_map - Get bitmap for enabled traffic classes * @pf: PF being queried * * Return a bitmap for enabled traffic classes for this PF. @@ -7338,7 +7339,7 @@ static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi) qcount = min_t(int, vsi->alloc_queue_pairs, i40e_pf_get_max_q_per_tc(vsi->back)); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - /* For the TC that is not enabled set the offset to to default + /* For the TC that is not enabled set the offset to default * queue and allocate one queue for the given TC. */ vsi->tc_config.tc_info[i].qoffset = 0; @@ -9466,7 +9467,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) } /** - * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed + * i40e_get_current_atr_cnt - Get the count of total FD ATR filters programmed * @pf: board private structure **/ u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) @@ -10623,7 +10624,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) * need to rebuild the switch model in the HW. * * If there were VEBs but the reconstitution failed, we'll try - * try to recover minimal use by getting the basic PF VSI working. + * to recover minimal use by getting the basic PF VSI working. */ if (vsi->uplink_seid != pf->mac_seid) { dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); @@ -11039,6 +11040,11 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) return -ENODATA; } + if (is_kdump_kernel()) { + vsi->num_tx_desc = I40E_MIN_NUM_DESCRIPTORS; + vsi->num_rx_desc = I40E_MIN_NUM_DESCRIPTORS; + } + return 0; } @@ -15342,8 +15348,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i40e_check_recovery_mode(pf); - hw->aq.num_arq_entries = I40E_AQ_LEN; - hw->aq.num_asq_entries = I40E_AQ_LEN; + if (is_kdump_kernel()) { + hw->aq.num_arq_entries = I40E_MIN_ARQ_LEN; + hw->aq.num_asq_entries = I40E_MIN_ASQ_LEN; + } else { + hw->aq.num_arq_entries = I40E_AQ_LEN; + hw->aq.num_asq_entries = I40E_AQ_LEN; + } hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; @@ -15506,6 +15517,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_switch_setup; + /* Reduce Tx and Rx pairs for kdump + * When MSI-X is enabled, it's not allowed to use more TC queue + * pairs than MSI-X vectors (pf->num_lan_msix) exist. Thus + * vsi->num_queue_pairs will be equal to pf->num_lan_msix, i.e., 1. + */ + if (is_kdump_kernel()) + pf->num_lan_msix = 1; + pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port; pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port; pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 7164f4ad8120..fe6dca846028 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -4,7 +4,7 @@ #include "i40e_prototype.h" /** - * i40e_init_nvm_ops - Initialize NVM function pointers + * i40e_init_nvm - Initialize NVM function pointers * @hw: pointer to the HW structure * * Setup the function pointers and the NVM info structure. Should be called diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 7a879614ca55..f1f6fc3744e9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -216,7 +216,7 @@ static int i40e_ptp_feature_enable(struct ptp_clock_info *ptp, } /** - * i40e_ptp_update_latch_events - Read I40E_PRTTSYN_STAT_1 and latch events + * i40e_ptp_get_rx_events - Read I40E_PRTTSYN_STAT_1 and latch events * @pf: the PF data structure * * This function reads I40E_PRTTSYN_STAT_1 and updates the corresponding timers diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 06b4271219b1..121cd99fdeff 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3331,7 +3331,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, } /** - * i40e_create_tx_ctx Build the Tx context descriptor + * i40e_create_tx_ctx - Build the Tx context descriptor * @tx_ring: ring to create the descriptor on * @cd_type_cmd_tso_mss: Quad Word 1 * @cd_tunneling: Quad Word 0 - bits 0-31 @@ -3833,8 +3833,8 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) * @frames: array of XDP buffer pointers * @flags: XDP extra info * - * Returns number of frames successfully sent. Frames that fail are - * free'ed via XDP return API. + * Returns number of frames successfully sent. Failed frames + * will be free'ed by XDP core. * * For error cases, a negative errno code is returned and no-frames * are transmitted (caller must handle freeing frames). @@ -3847,7 +3847,7 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_ring *xdp_ring; - int drops = 0; + int nxmit = 0; int i; if (test_bit(__I40E_VSI_DOWN, vsi->state)) @@ -3867,14 +3867,13 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, int err; err = i40e_xmit_xdp_ring(xdpf, xdp_ring); - if (err != I40E_XDP_TX) { - xdp_return_frame_rx_napi(xdpf); - drops++; - } + if (err != I40E_XDP_TX) + break; + nxmit++; } if (unlikely(flags & XDP_XMIT_FLUSH)) i40e_xdp_ring_update_tail(xdp_ring); - return n - drops; + return nxmit; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 5d301a466f5c..eff0a30790dd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -40,6 +40,66 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, } /** + * i40e_vc_link_speed2mbps + * converts i40e_aq_link_speed to integer value of Mbps + * @link_speed: the speed to convert + * + * return the speed as direct value of Mbps. + **/ +static u32 +i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed) +{ + switch (link_speed) { + case I40E_LINK_SPEED_100MB: + return SPEED_100; + case I40E_LINK_SPEED_1GB: + return SPEED_1000; + case I40E_LINK_SPEED_2_5GB: + return SPEED_2500; + case I40E_LINK_SPEED_5GB: + return SPEED_5000; + case I40E_LINK_SPEED_10GB: + return SPEED_10000; + case I40E_LINK_SPEED_20GB: + return SPEED_20000; + case I40E_LINK_SPEED_25GB: + return SPEED_25000; + case I40E_LINK_SPEED_40GB: + return SPEED_40000; + case I40E_LINK_SPEED_UNKNOWN: + return SPEED_UNKNOWN; + } + return SPEED_UNKNOWN; +} + +/** + * i40e_set_vf_link_state + * @vf: pointer to the VF structure + * @pfe: pointer to PF event structure + * @ls: pointer to link status structure + * + * set a link state on a single vf + **/ +static void i40e_set_vf_link_state(struct i40e_vf *vf, + struct virtchnl_pf_event *pfe, struct i40e_link_status *ls) +{ + u8 link_status = ls->link_info & I40E_AQ_LINK_UP; + + if (vf->link_forced) + link_status = vf->link_up; + + if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) { + pfe->event_data.link_event_adv.link_speed = link_status ? + i40e_vc_link_speed2mbps(ls->link_speed) : 0; + pfe->event_data.link_event_adv.link_status = link_status; + } else { + pfe->event_data.link_event.link_speed = link_status ? + i40e_virtchnl_link_speed(ls->link_speed) : 0; + pfe->event_data.link_event.link_status = link_status; + } +} + +/** * i40e_vc_notify_vf_link_state * @vf: pointer to the VF structure * @@ -55,16 +115,9 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = PF_EVENT_SEVERITY_INFO; - if (vf->link_forced) { - pfe.event_data.link_event.link_status = vf->link_up; - pfe.event_data.link_event.link_speed = - (vf->link_up ? i40e_virtchnl_link_speed(ls->link_speed) : 0); - } else { - pfe.event_data.link_event.link_status = - ls->link_info & I40E_AQ_LINK_UP; - pfe.event_data.link_event.link_speed = - i40e_virtchnl_link_speed(ls->link_speed); - } + + i40e_set_vf_link_state(vf, &pfe, ls); + i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, sizeof(pfe), NULL); } @@ -1949,6 +2002,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) VIRTCHNL_VF_OFFLOAD_VLAN; vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; + vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED; vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi->info.pvid) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; @@ -3696,26 +3750,8 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) } /* get link speed in MB to validate rate limit */ - switch (ls->link_speed) { - case VIRTCHNL_LINK_SPEED_100MB: - speed = SPEED_100; - break; - case VIRTCHNL_LINK_SPEED_1GB: - speed = SPEED_1000; - break; - case VIRTCHNL_LINK_SPEED_10GB: - speed = SPEED_10000; - break; - case VIRTCHNL_LINK_SPEED_20GB: - speed = SPEED_20000; - break; - case VIRTCHNL_LINK_SPEED_25GB: - speed = SPEED_25000; - break; - case VIRTCHNL_LINK_SPEED_40GB: - speed = SPEED_40000; - break; - default: + speed = i40e_vc_link_speed2mbps(ls->link_speed); + if (speed == SPEED_UNKNOWN) { dev_err(&pf->pdev->dev, "Cannot detect link speed\n"); aq_ret = I40E_ERR_PARAM; @@ -4464,23 +4500,17 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) switch (link) { case IFLA_VF_LINK_STATE_AUTO: vf->link_forced = false; - pfe.event_data.link_event.link_status = - pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; - pfe.event_data.link_event.link_speed = - (enum virtchnl_link_speed) - pf->hw.phy.link_info.link_speed; + i40e_set_vf_link_state(vf, &pfe, ls); break; case IFLA_VF_LINK_STATE_ENABLE: vf->link_forced = true; vf->link_up = true; - pfe.event_data.link_event.link_status = true; - pfe.event_data.link_event.link_speed = i40e_virtchnl_link_speed(ls->link_speed); + i40e_set_vf_link_state(vf, &pfe, ls); break; case IFLA_VF_LINK_STATE_DISABLE: vf->link_forced = true; vf->link_up = false; - pfe.event_data.link_event.link_status = false; - pfe.event_data.link_event.link_speed = 0; + i40e_set_vf_link_state(vf, &pfe, ls); break; default: ret = -EINVAL; diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 12ca84113587..46d884417c63 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -160,6 +160,13 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) xdp_prog = READ_ONCE(rx_ring->xdp_prog); act = bpf_prog_run_xdp(xdp_prog, xdp); + if (likely(act == XDP_REDIRECT)) { + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); + result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; + rcu_read_unlock(); + return result; + } + switch (act) { case XDP_PASS: break; @@ -167,10 +174,6 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); break; - case XDP_REDIRECT: - err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; - break; default: bpf_warn_invalid_xdp_action(act); fallthrough; @@ -625,7 +628,7 @@ void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring) } /** - * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown + * i40e_xsk_clean_tx_ring - Clean the XDP Tx ring on shutdown * @tx_ring: XDP Tx ring **/ void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring) diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile index c997063ed728..9c3e45c54d01 100644 --- a/drivers/net/ethernet/intel/iavf/Makefile +++ b/drivers/net/ethernet/intel/iavf/Makefile @@ -11,5 +11,6 @@ subdir-ccflags-y += -I$(src) obj-$(CONFIG_IAVF) += iavf.o -iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o \ +iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \ + iavf_adv_rss.o \ iavf_txrx.o iavf_common.o iavf_adminq.o iavf_client.o diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 8a65525a7c0d..e8bd04100ecd 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -37,6 +37,8 @@ #include "iavf_type.h" #include <linux/avf/virtchnl.h> #include "iavf_txrx.h" +#include "iavf_fdir.h" +#include "iavf_adv_rss.h" #define DEFAULT_DEBUG_LEVEL_SHIFT 3 #define PFX "iavf: " @@ -300,6 +302,10 @@ struct iavf_adapter { #define IAVF_FLAG_AQ_DISABLE_CHANNELS BIT(22) #define IAVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23) #define IAVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24) +#define IAVF_FLAG_AQ_ADD_FDIR_FILTER BIT(25) +#define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT(26) +#define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG BIT(27) +#define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG BIT(28) /* OS defined structs */ struct net_device *netdev; @@ -340,6 +346,10 @@ struct iavf_adapter { VIRTCHNL_VF_OFFLOAD_VLAN) #define ADV_LINK_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \ VIRTCHNL_VF_CAP_ADV_LINK_SPEED) +#define FDIR_FLTR_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_FDIR_PF) +#define ADV_RSS_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */ struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ struct virtchnl_version_info pf_version; @@ -362,6 +372,14 @@ struct iavf_adapter { /* lock to protect access to the cloud filter list */ spinlock_t cloud_filter_list_lock; u16 num_cloud_filters; + +#define IAVF_MAX_FDIR_FILTERS 128 /* max allowed Flow Director filters */ + u16 fdir_active_fltr; + struct list_head fdir_list_head; + spinlock_t fdir_fltr_lock; /* protect the Flow Director filter list */ + + struct list_head adv_rss_list_head; + spinlock_t adv_rss_lock; /* protect the RSS management list */ }; @@ -432,6 +450,10 @@ void iavf_enable_channels(struct iavf_adapter *adapter); void iavf_disable_channels(struct iavf_adapter *adapter); void iavf_add_cloud_filter(struct iavf_adapter *adapter); void iavf_del_cloud_filter(struct iavf_adapter *adapter); +void iavf_add_fdir_filter(struct iavf_adapter *adapter); +void iavf_del_fdir_filter(struct iavf_adapter *adapter); +void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter); +void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter); struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, const u8 *macaddr); #endif /* _IAVF_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c new file mode 100644 index 000000000000..6edbf134b73f --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021, Intel Corporation. */ + +/* advanced RSS configuration ethtool support for iavf */ + +#include "iavf.h" + +/** + * iavf_fill_adv_rss_ip4_hdr - fill the IPv4 RSS protocol header + * @hdr: the virtchnl message protocol header data structure + * @hash_flds: the RSS configuration protocol hash fields + */ +static void +iavf_fill_adv_rss_ip4_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) +{ + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4); + + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV4_SA) + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC); + + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV4_DA) + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST); +} + +/** + * iavf_fill_adv_rss_ip6_hdr - fill the IPv6 RSS protocol header + * @hdr: the virtchnl message protocol header data structure + * @hash_flds: the RSS configuration protocol hash fields + */ +static void +iavf_fill_adv_rss_ip6_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) +{ + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6); + + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV6_SA) + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC); + + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV6_DA) + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST); +} + +/** + * iavf_fill_adv_rss_tcp_hdr - fill the TCP RSS protocol header + * @hdr: the virtchnl message protocol header data structure + * @hash_flds: the RSS configuration protocol hash fields + */ +static void +iavf_fill_adv_rss_tcp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) +{ + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP); + + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT) + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT); + + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT) + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT); +} + +/** + * iavf_fill_adv_rss_udp_hdr - fill the UDP RSS protocol header + * @hdr: the virtchnl message protocol header data structure + * @hash_flds: the RSS configuration protocol hash fields + */ +static void +iavf_fill_adv_rss_udp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) +{ + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP); + + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT) + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT); + + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT) + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT); +} + +/** + * iavf_fill_adv_rss_sctp_hdr - fill the SCTP RSS protocol header + * @hdr: the virtchnl message protocol header data structure + * @hash_flds: the RSS configuration protocol hash fields + */ +static void +iavf_fill_adv_rss_sctp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) +{ + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP); + + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT) + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT); + + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT) + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT); +} + +/** + * iavf_fill_adv_rss_cfg_msg - fill the RSS configuration into virtchnl message + * @rss_cfg: the virtchnl message to be filled with RSS configuration setting + * @packet_hdrs: the RSS configuration protocol header types + * @hash_flds: the RSS configuration protocol hash fields + * + * Returns 0 if the RSS configuration virtchnl message is filled successfully + */ +int +iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg, + u32 packet_hdrs, u64 hash_flds) +{ + struct virtchnl_proto_hdrs *proto_hdrs = &rss_cfg->proto_hdrs; + struct virtchnl_proto_hdr *hdr; + + rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC; + + proto_hdrs->tunnel_level = 0; /* always outer layer */ + + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L3) { + case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4: + iavf_fill_adv_rss_ip4_hdr(hdr, hash_flds); + break; + case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6: + iavf_fill_adv_rss_ip6_hdr(hdr, hash_flds); + break; + default: + return -EINVAL; + } + + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L4) { + case IAVF_ADV_RSS_FLOW_SEG_HDR_TCP: + iavf_fill_adv_rss_tcp_hdr(hdr, hash_flds); + break; + case IAVF_ADV_RSS_FLOW_SEG_HDR_UDP: + iavf_fill_adv_rss_udp_hdr(hdr, hash_flds); + break; + case IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP: + iavf_fill_adv_rss_sctp_hdr(hdr, hash_flds); + break; + default: + return -EINVAL; + } + + return 0; +} + +/** + * iavf_find_adv_rss_cfg_by_hdrs - find RSS configuration with header type + * @adapter: pointer to the VF adapter structure + * @packet_hdrs: protocol header type to find. + * + * Returns pointer to advance RSS configuration if found or null + */ +struct iavf_adv_rss * +iavf_find_adv_rss_cfg_by_hdrs(struct iavf_adapter *adapter, u32 packet_hdrs) +{ + struct iavf_adv_rss *rss; + + list_for_each_entry(rss, &adapter->adv_rss_list_head, list) + if (rss->packet_hdrs == packet_hdrs) + return rss; + + return NULL; +} + +/** + * iavf_print_adv_rss_cfg + * @adapter: pointer to the VF adapter structure + * @rss: pointer to the advance RSS configuration to print + * @action: the string description about how to handle the RSS + * @result: the string description about the virtchnl result + * + * Print the advance RSS configuration + **/ +void +iavf_print_adv_rss_cfg(struct iavf_adapter *adapter, struct iavf_adv_rss *rss, + const char *action, const char *result) +{ + u32 packet_hdrs = rss->packet_hdrs; + u64 hash_flds = rss->hash_flds; + static char hash_opt[300]; + const char *proto; + + if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_TCP) + proto = "TCP"; + else if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_UDP) + proto = "UDP"; + else if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP) + proto = "SCTP"; + else + return; + + memset(hash_opt, 0, sizeof(hash_opt)); + + strcat(hash_opt, proto); + if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4) + strcat(hash_opt, "v4 "); + else + strcat(hash_opt, "v6 "); + + if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_SA | + IAVF_ADV_RSS_HASH_FLD_IPV6_SA)) + strcat(hash_opt, "IP SA,"); + if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_DA | + IAVF_ADV_RSS_HASH_FLD_IPV6_DA)) + strcat(hash_opt, "IP DA,"); + if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT | + IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT | + IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT)) + strcat(hash_opt, "src port,"); + if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT | + IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT | + IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT)) + strcat(hash_opt, "dst port,"); + + if (!action) + action = ""; + + if (!result) + result = ""; + + dev_info(&adapter->pdev->dev, "%s %s %s\n", action, hash_opt, result); +} diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h new file mode 100644 index 000000000000..4d3be11af7aa --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021, Intel Corporation. */ + +#ifndef _IAVF_ADV_RSS_H_ +#define _IAVF_ADV_RSS_H_ + +struct iavf_adapter; + +/* State of advanced RSS configuration */ +enum iavf_adv_rss_state_t { + IAVF_ADV_RSS_ADD_REQUEST, /* User requests to add RSS */ + IAVF_ADV_RSS_ADD_PENDING, /* RSS pending add by the PF */ + IAVF_ADV_RSS_DEL_REQUEST, /* Driver requests to delete RSS */ + IAVF_ADV_RSS_DEL_PENDING, /* RSS pending delete by the PF */ + IAVF_ADV_RSS_ACTIVE, /* RSS configuration is active */ +}; + +enum iavf_adv_rss_flow_seg_hdr { + IAVF_ADV_RSS_FLOW_SEG_HDR_NONE = 0x00000000, + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4 = 0x00000001, + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6 = 0x00000002, + IAVF_ADV_RSS_FLOW_SEG_HDR_TCP = 0x00000004, + IAVF_ADV_RSS_FLOW_SEG_HDR_UDP = 0x00000008, + IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP = 0x00000010, +}; + +#define IAVF_ADV_RSS_FLOW_SEG_HDR_L3 \ + (IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4 | \ + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6) + +#define IAVF_ADV_RSS_FLOW_SEG_HDR_L4 \ + (IAVF_ADV_RSS_FLOW_SEG_HDR_TCP | \ + IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | \ + IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP) + +enum iavf_adv_rss_flow_field { + /* L3 */ + IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_SA, + IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_DA, + IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV6_SA, + IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV6_DA, + /* L4 */ + IAVF_ADV_RSS_FLOW_FIELD_IDX_TCP_SRC_PORT, + IAVF_ADV_RSS_FLOW_FIELD_IDX_TCP_DST_PORT, + IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_SRC_PORT, + IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_DST_PORT, + IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_SRC_PORT, + IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_DST_PORT, + + /* The total number of enums must not exceed 64 */ + IAVF_ADV_RSS_FLOW_FIELD_IDX_MAX +}; + +#define IAVF_ADV_RSS_HASH_INVALID 0 +#define IAVF_ADV_RSS_HASH_FLD_IPV4_SA \ + BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_SA) +#define IAVF_ADV_RSS_HASH_FLD_IPV6_SA \ + BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV6_SA) +#define IAVF_ADV_RSS_HASH_FLD_IPV4_DA \ + BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_DA) +#define IAVF_ADV_RSS_HASH_FLD_IPV6_DA \ + BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV6_DA) +#define IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT \ + BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_TCP_SRC_PORT) +#define IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT \ + BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_TCP_DST_PORT) +#define IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT \ + BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_SRC_PORT) +#define IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT \ + BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_DST_PORT) +#define IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT \ + BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_SRC_PORT) +#define IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT \ + BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_DST_PORT) + +/* bookkeeping of advanced RSS configuration */ +struct iavf_adv_rss { + enum iavf_adv_rss_state_t state; + struct list_head list; + + u32 packet_hdrs; + u64 hash_flds; + + struct virtchnl_rss_cfg cfg_msg; +}; + +int +iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg, + u32 packet_hdrs, u64 hash_flds); +struct iavf_adv_rss * +iavf_find_adv_rss_cfg_by_hdrs(struct iavf_adapter *adapter, u32 packet_hdrs); +void +iavf_print_adv_rss_cfg(struct iavf_adapter *adapter, struct iavf_adv_rss *rss, + const char *action, const char *result); +#endif /* _IAVF_ADV_RSS_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index c93567f4d0f7..af43fbd8cb75 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -828,6 +828,872 @@ static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue, } /** + * iavf_fltr_to_ethtool_flow - convert filter type values to ethtool + * flow type values + * @flow: filter type to be converted + * + * Returns the corresponding ethtool flow type. + */ +static int iavf_fltr_to_ethtool_flow(enum iavf_fdir_flow_type flow) +{ + switch (flow) { + case IAVF_FDIR_FLOW_IPV4_TCP: + return TCP_V4_FLOW; + case IAVF_FDIR_FLOW_IPV4_UDP: + return UDP_V4_FLOW; + case IAVF_FDIR_FLOW_IPV4_SCTP: + return SCTP_V4_FLOW; + case IAVF_FDIR_FLOW_IPV4_AH: + return AH_V4_FLOW; + case IAVF_FDIR_FLOW_IPV4_ESP: + return ESP_V4_FLOW; + case IAVF_FDIR_FLOW_IPV4_OTHER: + return IPV4_USER_FLOW; + case IAVF_FDIR_FLOW_IPV6_TCP: + return TCP_V6_FLOW; + case IAVF_FDIR_FLOW_IPV6_UDP: + return UDP_V6_FLOW; + case IAVF_FDIR_FLOW_IPV6_SCTP: + return SCTP_V6_FLOW; + case IAVF_FDIR_FLOW_IPV6_AH: + return AH_V6_FLOW; + case IAVF_FDIR_FLOW_IPV6_ESP: + return ESP_V6_FLOW; + case IAVF_FDIR_FLOW_IPV6_OTHER: + return IPV6_USER_FLOW; + case IAVF_FDIR_FLOW_NON_IP_L2: + return ETHER_FLOW; + default: + /* 0 is undefined ethtool flow */ + return 0; + } +} + +/** + * iavf_ethtool_flow_to_fltr - convert ethtool flow type to filter enum + * @eth: Ethtool flow type to be converted + * + * Returns flow enum + */ +static enum iavf_fdir_flow_type iavf_ethtool_flow_to_fltr(int eth) +{ + switch (eth) { + case TCP_V4_FLOW: + return IAVF_FDIR_FLOW_IPV4_TCP; + case UDP_V4_FLOW: + return IAVF_FDIR_FLOW_IPV4_UDP; + case SCTP_V4_FLOW: + return IAVF_FDIR_FLOW_IPV4_SCTP; + case AH_V4_FLOW: + return IAVF_FDIR_FLOW_IPV4_AH; + case ESP_V4_FLOW: + return IAVF_FDIR_FLOW_IPV4_ESP; + case IPV4_USER_FLOW: + return IAVF_FDIR_FLOW_IPV4_OTHER; + case TCP_V6_FLOW: + return IAVF_FDIR_FLOW_IPV6_TCP; + case UDP_V6_FLOW: + return IAVF_FDIR_FLOW_IPV6_UDP; + case SCTP_V6_FLOW: + return IAVF_FDIR_FLOW_IPV6_SCTP; + case AH_V6_FLOW: + return IAVF_FDIR_FLOW_IPV6_AH; + case ESP_V6_FLOW: + return IAVF_FDIR_FLOW_IPV6_ESP; + case IPV6_USER_FLOW: + return IAVF_FDIR_FLOW_IPV6_OTHER; + case ETHER_FLOW: + return IAVF_FDIR_FLOW_NON_IP_L2; + default: + return IAVF_FDIR_FLOW_NONE; + } +} + +/** + * iavf_is_mask_valid - check mask field set + * @mask: full mask to check + * @field: field for which mask should be valid + * + * If the mask is fully set return true. If it is not valid for field return + * false. + */ +static bool iavf_is_mask_valid(u64 mask, u64 field) +{ + return (mask & field) == field; +} + +/** + * iavf_parse_rx_flow_user_data - deconstruct user-defined data + * @fsp: pointer to ethtool Rx flow specification + * @fltr: pointer to Flow Director filter for userdef data storage + * + * Returns 0 on success, negative error value on failure + */ +static int +iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, + struct iavf_fdir_fltr *fltr) +{ + struct iavf_flex_word *flex; + int i, cnt = 0; + + if (!(fsp->flow_type & FLOW_EXT)) + return 0; + + for (i = 0; i < IAVF_FLEX_WORD_NUM; i++) { +#define IAVF_USERDEF_FLEX_WORD_M GENMASK(15, 0) +#define IAVF_USERDEF_FLEX_OFFS_S 16 +#define IAVF_USERDEF_FLEX_OFFS_M GENMASK(31, IAVF_USERDEF_FLEX_OFFS_S) +#define IAVF_USERDEF_FLEX_FLTR_M GENMASK(31, 0) + u32 value = be32_to_cpu(fsp->h_ext.data[i]); + u32 mask = be32_to_cpu(fsp->m_ext.data[i]); + + if (!value || !mask) + continue; + + if (!iavf_is_mask_valid(mask, IAVF_USERDEF_FLEX_FLTR_M)) + return -EINVAL; + + /* 504 is the maximum value for offsets, and offset is measured + * from the start of the MAC address. + */ +#define IAVF_USERDEF_FLEX_MAX_OFFS_VAL 504 + flex = &fltr->flex_words[cnt++]; + flex->word = value & IAVF_USERDEF_FLEX_WORD_M; + flex->offset = (value & IAVF_USERDEF_FLEX_OFFS_M) >> + IAVF_USERDEF_FLEX_OFFS_S; + if (flex->offset > IAVF_USERDEF_FLEX_MAX_OFFS_VAL) + return -EINVAL; + } + + fltr->flex_cnt = cnt; + + return 0; +} + +/** + * iavf_fill_rx_flow_ext_data - fill the additional data + * @fsp: pointer to ethtool Rx flow specification + * @fltr: pointer to Flow Director filter to get additional data + */ +static void +iavf_fill_rx_flow_ext_data(struct ethtool_rx_flow_spec *fsp, + struct iavf_fdir_fltr *fltr) +{ + if (!fltr->ext_mask.usr_def[0] && !fltr->ext_mask.usr_def[1]) + return; + + fsp->flow_type |= FLOW_EXT; + + memcpy(fsp->h_ext.data, fltr->ext_data.usr_def, sizeof(fsp->h_ext.data)); + memcpy(fsp->m_ext.data, fltr->ext_mask.usr_def, sizeof(fsp->m_ext.data)); +} + +/** + * iavf_get_ethtool_fdir_entry - fill ethtool structure with Flow Director filter data + * @adapter: the VF adapter structure that contains filter list + * @cmd: ethtool command data structure to receive the filter data + * + * Returns 0 as expected for success by ethtool + */ +static int +iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + struct iavf_fdir_fltr *rule = NULL; + int ret = 0; + + if (!FDIR_FLTR_SUPPORT(adapter)) + return -EOPNOTSUPP; + + spin_lock_bh(&adapter->fdir_fltr_lock); + + rule = iavf_find_fdir_fltr_by_loc(adapter, fsp->location); + if (!rule) { + ret = -EINVAL; + goto release_lock; + } + + fsp->flow_type = iavf_fltr_to_ethtool_flow(rule->flow_type); + + memset(&fsp->m_u, 0, sizeof(fsp->m_u)); + memset(&fsp->m_ext, 0, sizeof(fsp->m_ext)); + + switch (fsp->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + fsp->h_u.tcp_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip; + fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip; + fsp->h_u.tcp_ip4_spec.psrc = rule->ip_data.src_port; + fsp->h_u.tcp_ip4_spec.pdst = rule->ip_data.dst_port; + fsp->h_u.tcp_ip4_spec.tos = rule->ip_data.tos; + fsp->m_u.tcp_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip; + fsp->m_u.tcp_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip; + fsp->m_u.tcp_ip4_spec.psrc = rule->ip_mask.src_port; + fsp->m_u.tcp_ip4_spec.pdst = rule->ip_mask.dst_port; + fsp->m_u.tcp_ip4_spec.tos = rule->ip_mask.tos; + break; + case AH_V4_FLOW: + case ESP_V4_FLOW: + fsp->h_u.ah_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip; + fsp->h_u.ah_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip; + fsp->h_u.ah_ip4_spec.spi = rule->ip_data.spi; + fsp->h_u.ah_ip4_spec.tos = rule->ip_data.tos; + fsp->m_u.ah_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip; + fsp->m_u.ah_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip; + fsp->m_u.ah_ip4_spec.spi = rule->ip_mask.spi; + fsp->m_u.ah_ip4_spec.tos = rule->ip_mask.tos; + break; + case IPV4_USER_FLOW: + fsp->h_u.usr_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip; + fsp->h_u.usr_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip; + fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip_data.l4_header; + fsp->h_u.usr_ip4_spec.tos = rule->ip_data.tos; + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + fsp->h_u.usr_ip4_spec.proto = rule->ip_data.proto; + fsp->m_u.usr_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip; + fsp->m_u.usr_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip; + fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->ip_mask.l4_header; + fsp->m_u.usr_ip4_spec.tos = rule->ip_mask.tos; + fsp->m_u.usr_ip4_spec.ip_ver = 0xFF; + fsp->m_u.usr_ip4_spec.proto = rule->ip_mask.proto; + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->h_u.tcp_ip6_spec.psrc = rule->ip_data.src_port; + fsp->h_u.tcp_ip6_spec.pdst = rule->ip_data.dst_port; + fsp->h_u.tcp_ip6_spec.tclass = rule->ip_data.tclass; + memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->m_u.tcp_ip6_spec.psrc = rule->ip_mask.src_port; + fsp->m_u.tcp_ip6_spec.pdst = rule->ip_mask.dst_port; + fsp->m_u.tcp_ip6_spec.tclass = rule->ip_mask.tclass; + break; + case AH_V6_FLOW: + case ESP_V6_FLOW: + memcpy(fsp->h_u.ah_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->h_u.ah_ip6_spec.spi = rule->ip_data.spi; + fsp->h_u.ah_ip6_spec.tclass = rule->ip_data.tclass; + memcpy(fsp->m_u.ah_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->m_u.ah_ip6_spec.spi = rule->ip_mask.spi; + fsp->m_u.ah_ip6_spec.tclass = rule->ip_mask.tclass; + break; + case IPV6_USER_FLOW: + memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip_data.l4_header; + fsp->h_u.usr_ip6_spec.tclass = rule->ip_data.tclass; + fsp->h_u.usr_ip6_spec.l4_proto = rule->ip_data.proto; + memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->ip_mask.l4_header; + fsp->m_u.usr_ip6_spec.tclass = rule->ip_mask.tclass; + fsp->m_u.usr_ip6_spec.l4_proto = rule->ip_mask.proto; + break; + case ETHER_FLOW: + fsp->h_u.ether_spec.h_proto = rule->eth_data.etype; + fsp->m_u.ether_spec.h_proto = rule->eth_mask.etype; + break; + default: + ret = -EINVAL; + break; + } + + iavf_fill_rx_flow_ext_data(fsp, rule); + + if (rule->action == VIRTCHNL_ACTION_DROP) + fsp->ring_cookie = RX_CLS_FLOW_DISC; + else + fsp->ring_cookie = rule->q_index; + +release_lock: + spin_unlock_bh(&adapter->fdir_fltr_lock); + return ret; +} + +/** + * iavf_get_fdir_fltr_ids - fill buffer with filter IDs of active filters + * @adapter: the VF adapter structure containing the filter list + * @cmd: ethtool command data structure + * @rule_locs: ethtool array passed in from OS to receive filter IDs + * + * Returns 0 as expected for success by ethtool + */ +static int +iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct iavf_fdir_fltr *fltr; + unsigned int cnt = 0; + int val = 0; + + if (!FDIR_FLTR_SUPPORT(adapter)) + return -EOPNOTSUPP; + + cmd->data = IAVF_MAX_FDIR_FILTERS; + + spin_lock_bh(&adapter->fdir_fltr_lock); + + list_for_each_entry(fltr, &adapter->fdir_list_head, list) { + if (cnt == cmd->rule_cnt) { + val = -EMSGSIZE; + goto release_lock; + } + rule_locs[cnt] = fltr->loc; + cnt++; + } + +release_lock: + spin_unlock_bh(&adapter->fdir_fltr_lock); + if (!val) + cmd->rule_cnt = cnt; + + return val; +} + +/** + * iavf_add_fdir_fltr_info - Set the input set for Flow Director filter + * @adapter: pointer to the VF adapter structure + * @fsp: pointer to ethtool Rx flow specification + * @fltr: filter structure + */ +static int +iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spec *fsp, + struct iavf_fdir_fltr *fltr) +{ + u32 flow_type, q_index = 0; + enum virtchnl_action act; + int err; + + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + act = VIRTCHNL_ACTION_DROP; + } else { + q_index = fsp->ring_cookie; + if (q_index >= adapter->num_active_queues) + return -EINVAL; + + act = VIRTCHNL_ACTION_QUEUE; + } + + fltr->action = act; + fltr->loc = fsp->location; + fltr->q_index = q_index; + + if (fsp->flow_type & FLOW_EXT) { + memcpy(fltr->ext_data.usr_def, fsp->h_ext.data, + sizeof(fltr->ext_data.usr_def)); + memcpy(fltr->ext_mask.usr_def, fsp->m_ext.data, + sizeof(fltr->ext_mask.usr_def)); + } + + flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); + fltr->flow_type = iavf_ethtool_flow_to_fltr(flow_type); + + switch (flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + fltr->ip_data.v4_addrs.src_ip = fsp->h_u.tcp_ip4_spec.ip4src; + fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst; + fltr->ip_data.src_port = fsp->h_u.tcp_ip4_spec.psrc; + fltr->ip_data.dst_port = fsp->h_u.tcp_ip4_spec.pdst; + fltr->ip_data.tos = fsp->h_u.tcp_ip4_spec.tos; + fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.tcp_ip4_spec.ip4src; + fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst; + fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc; + fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst; + fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos; + break; + case AH_V4_FLOW: + case ESP_V4_FLOW: + fltr->ip_data.v4_addrs.src_ip = fsp->h_u.ah_ip4_spec.ip4src; + fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.ah_ip4_spec.ip4dst; + fltr->ip_data.spi = fsp->h_u.ah_ip4_spec.spi; + fltr->ip_data.tos = fsp->h_u.ah_ip4_spec.tos; + fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.ah_ip4_spec.ip4src; + fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst; + fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi; + fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos; + break; + case IPV4_USER_FLOW: + fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src; + fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst; + fltr->ip_data.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes; + fltr->ip_data.tos = fsp->h_u.usr_ip4_spec.tos; + fltr->ip_data.proto = fsp->h_u.usr_ip4_spec.proto; + fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.usr_ip4_spec.ip4src; + fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst; + fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes; + fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos; + fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto; + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_data.src_port = fsp->h_u.tcp_ip6_spec.psrc; + fltr->ip_data.dst_port = fsp->h_u.tcp_ip6_spec.pdst; + fltr->ip_data.tclass = fsp->h_u.tcp_ip6_spec.tclass; + memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc; + fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst; + fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass; + break; + case AH_V6_FLOW: + case ESP_V6_FLOW: + memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.ah_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.ah_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_data.spi = fsp->h_u.ah_ip6_spec.spi; + fltr->ip_data.tclass = fsp->h_u.ah_ip6_spec.tclass; + memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.ah_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.ah_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi; + fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass; + break; + case IPV6_USER_FLOW: + memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_data.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes; + fltr->ip_data.tclass = fsp->h_u.usr_ip6_spec.tclass; + fltr->ip_data.proto = fsp->h_u.usr_ip6_spec.l4_proto; + memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes; + fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass; + fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto; + break; + case ETHER_FLOW: + fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto; + fltr->eth_mask.etype = fsp->m_u.ether_spec.h_proto; + break; + default: + /* not doing un-parsed flow types */ + return -EINVAL; + } + + if (iavf_fdir_is_dup_fltr(adapter, fltr)) + return -EEXIST; + + err = iavf_parse_rx_flow_user_data(fsp, fltr); + if (err) + return err; + + return iavf_fill_fdir_add_msg(adapter, fltr); +} + +/** + * iavf_add_fdir_ethtool - add Flow Director filter + * @adapter: pointer to the VF adapter structure + * @cmd: command to add Flow Director filter + * + * Returns 0 on success and negative values for failure + */ +static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct iavf_fdir_fltr *fltr; + int count = 50; + int err; + + if (!FDIR_FLTR_SUPPORT(adapter)) + return -EOPNOTSUPP; + + if (fsp->flow_type & FLOW_MAC_EXT) + return -EINVAL; + + if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) { + dev_err(&adapter->pdev->dev, + "Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n", + IAVF_MAX_FDIR_FILTERS); + return -ENOSPC; + } + + spin_lock_bh(&adapter->fdir_fltr_lock); + if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) { + dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n"); + spin_unlock_bh(&adapter->fdir_fltr_lock); + return -EEXIST; + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + fltr = kzalloc(sizeof(*fltr), GFP_KERNEL); + if (!fltr) + return -ENOMEM; + + while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, + &adapter->crit_section)) { + if (--count == 0) { + kfree(fltr); + return -EINVAL; + } + udelay(1); + } + + err = iavf_add_fdir_fltr_info(adapter, fsp, fltr); + if (err) + goto ret; + + spin_lock_bh(&adapter->fdir_fltr_lock); + iavf_fdir_list_add_fltr(adapter, fltr); + adapter->fdir_active_fltr++; + fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; + spin_unlock_bh(&adapter->fdir_fltr_lock); + + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + +ret: + if (err && fltr) + kfree(fltr); + + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + return err; +} + +/** + * iavf_del_fdir_ethtool - delete Flow Director filter + * @adapter: pointer to the VF adapter structure + * @cmd: command to delete Flow Director filter + * + * Returns 0 on success and negative values for failure + */ +static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + struct iavf_fdir_fltr *fltr = NULL; + int err = 0; + + if (!FDIR_FLTR_SUPPORT(adapter)) + return -EOPNOTSUPP; + + spin_lock_bh(&adapter->fdir_fltr_lock); + fltr = iavf_find_fdir_fltr_by_loc(adapter, fsp->location); + if (fltr) { + if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) { + fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; + } else { + err = -EBUSY; + } + } else if (adapter->fdir_active_fltr) { + err = -EINVAL; + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST) + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + + return err; +} + +/** + * iavf_adv_rss_parse_hdrs - parses headers from RSS hash input + * @cmd: ethtool rxnfc command + * + * This function parses the rxnfc command and returns intended + * header types for RSS configuration + */ +static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd) +{ + u32 hdrs = IAVF_ADV_RSS_FLOW_SEG_HDR_NONE; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; + break; + case UDP_V4_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; + break; + case SCTP_V4_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; + break; + case TCP_V6_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; + break; + case UDP_V6_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; + break; + case SCTP_V6_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; + break; + default: + break; + } + + return hdrs; +} + +/** + * iavf_adv_rss_parse_hash_flds - parses hash fields from RSS hash input + * @cmd: ethtool rxnfc command + * + * This function parses the rxnfc command and returns intended hash fields for + * RSS configuration + */ +static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd) +{ + u64 hfld = IAVF_ADV_RSS_HASH_INVALID; + + if (cmd->data & RXH_IP_SRC || cmd->data & RXH_IP_DST) { + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + if (cmd->data & RXH_IP_SRC) + hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_SA; + if (cmd->data & RXH_IP_DST) + hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_DA; + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + if (cmd->data & RXH_IP_SRC) + hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_SA; + if (cmd->data & RXH_IP_DST) + hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_DA; + break; + default: + break; + } + } + + if (cmd->data & RXH_L4_B_0_1 || cmd->data & RXH_L4_B_2_3) { + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (cmd->data & RXH_L4_B_0_1) + hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT; + if (cmd->data & RXH_L4_B_2_3) + hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT; + break; + case UDP_V4_FLOW: + case UDP_V6_FLOW: + if (cmd->data & RXH_L4_B_0_1) + hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT; + if (cmd->data & RXH_L4_B_2_3) + hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT; + break; + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + if (cmd->data & RXH_L4_B_0_1) + hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT; + if (cmd->data & RXH_L4_B_2_3) + hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT; + break; + default: + break; + } + } + + return hfld; +} + +/** + * iavf_set_adv_rss_hash_opt - Enable/Disable flow types for RSS hash + * @adapter: pointer to the VF adapter structure + * @cmd: ethtool rxnfc command + * + * Returns Success if the flow input set is supported. + */ +static int +iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct iavf_adv_rss *rss_old, *rss_new; + bool rss_new_add = false; + int count = 50, err = 0; + u64 hash_flds; + u32 hdrs; + + if (!ADV_RSS_SUPPORT(adapter)) + return -EOPNOTSUPP; + + hdrs = iavf_adv_rss_parse_hdrs(cmd); + if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE) + return -EINVAL; + + hash_flds = iavf_adv_rss_parse_hash_flds(cmd); + if (hash_flds == IAVF_ADV_RSS_HASH_INVALID) + return -EINVAL; + + rss_new = kzalloc(sizeof(*rss_new), GFP_KERNEL); + if (!rss_new) + return -ENOMEM; + + if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds)) { + kfree(rss_new); + return -EINVAL; + } + + while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, + &adapter->crit_section)) { + if (--count == 0) { + kfree(rss_new); + return -EINVAL; + } + + udelay(1); + } + + spin_lock_bh(&adapter->adv_rss_lock); + rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs); + if (rss_old) { + if (rss_old->state != IAVF_ADV_RSS_ACTIVE) { + err = -EBUSY; + } else if (rss_old->hash_flds != hash_flds) { + rss_old->state = IAVF_ADV_RSS_ADD_REQUEST; + rss_old->hash_flds = hash_flds; + memcpy(&rss_old->cfg_msg, &rss_new->cfg_msg, + sizeof(rss_new->cfg_msg)); + adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG; + } else { + err = -EEXIST; + } + } else { + rss_new_add = true; + rss_new->state = IAVF_ADV_RSS_ADD_REQUEST; + rss_new->packet_hdrs = hdrs; + rss_new->hash_flds = hash_flds; + list_add_tail(&rss_new->list, &adapter->adv_rss_list_head); + adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG; + } + spin_unlock_bh(&adapter->adv_rss_lock); + + if (!err) + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + + if (!rss_new_add) + kfree(rss_new); + + return err; +} + +/** + * iavf_get_adv_rss_hash_opt - Retrieve hash fields for a given flow-type + * @adapter: pointer to the VF adapter structure + * @cmd: ethtool rxnfc command + * + * Returns Success if the flow input set is supported. + */ +static int +iavf_get_adv_rss_hash_opt(struct iavf_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct iavf_adv_rss *rss; + u64 hash_flds; + u32 hdrs; + + if (!ADV_RSS_SUPPORT(adapter)) + return -EOPNOTSUPP; + + cmd->data = 0; + + hdrs = iavf_adv_rss_parse_hdrs(cmd); + if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE) + return -EINVAL; + + spin_lock_bh(&adapter->adv_rss_lock); + rss = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs); + if (rss) + hash_flds = rss->hash_flds; + else + hash_flds = IAVF_ADV_RSS_HASH_INVALID; + spin_unlock_bh(&adapter->adv_rss_lock); + + if (hash_flds == IAVF_ADV_RSS_HASH_INVALID) + return -EINVAL; + + if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_SA | + IAVF_ADV_RSS_HASH_FLD_IPV6_SA)) + cmd->data |= (u64)RXH_IP_SRC; + + if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_DA | + IAVF_ADV_RSS_HASH_FLD_IPV6_DA)) + cmd->data |= (u64)RXH_IP_DST; + + if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT | + IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT | + IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT)) + cmd->data |= (u64)RXH_L4_B_0_1; + + if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT | + IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT | + IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT)) + cmd->data |= (u64)RXH_L4_B_2_3; + + return 0; +} + +/** + * iavf_set_rxnfc - command to set Rx flow rules. + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * + * Returns 0 for success and negative values for errors + */ +static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = iavf_add_fdir_ethtool(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = iavf_del_fdir_ethtool(adapter, cmd); + break; + case ETHTOOL_SRXFH: + ret = iavf_set_adv_rss_hash_opt(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +/** * iavf_get_rxnfc - command to get RX flow classification rules * @netdev: network interface device structure * @cmd: ethtool rxnfc command @@ -846,9 +1712,21 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, cmd->data = adapter->num_active_queues; ret = 0; break; + case ETHTOOL_GRXCLSRLCNT: + if (!FDIR_FLTR_SUPPORT(adapter)) + break; + cmd->rule_cnt = adapter->fdir_active_fltr; + cmd->data = IAVF_MAX_FDIR_FILTERS; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = iavf_get_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = iavf_get_fdir_fltr_ids(adapter, cmd, (u32 *)rule_locs); + break; case ETHTOOL_GRXFH: - netdev_info(netdev, - "RSS hash info is not available to vf, use pf.\n"); + ret = iavf_get_adv_rss_hash_opt(adapter, cmd); break; default: break; @@ -1025,6 +1903,7 @@ static const struct ethtool_ops iavf_ethtool_ops = { .set_coalesce = iavf_set_coalesce, .get_per_queue_coalesce = iavf_get_per_queue_coalesce, .set_per_queue_coalesce = iavf_set_per_queue_coalesce, + .set_rxnfc = iavf_set_rxnfc, .get_rxnfc = iavf_get_rxnfc, .get_rxfh_indir_size = iavf_get_rxfh_indir_size, .get_rxfh = iavf_get_rxfh, diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c new file mode 100644 index 000000000000..6146203efd84 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c @@ -0,0 +1,779 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Intel Corporation. */ + +/* flow director ethtool support for iavf */ + +#include "iavf.h" + +#define GTPU_PORT 2152 +#define NAT_T_ESP_PORT 4500 +#define PFCP_PORT 8805 + +static const struct in6_addr ipv6_addr_full_mask = { + .in6_u = { + .u6_addr8 = { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } + } +}; + +/** + * iavf_pkt_udp_no_pay_len - the length of UDP packet without payload + * @fltr: Flow Director filter data structure + */ +static u16 iavf_pkt_udp_no_pay_len(struct iavf_fdir_fltr *fltr) +{ + return sizeof(struct ethhdr) + + (fltr->ip_ver == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) + + sizeof(struct udphdr); +} + +/** + * iavf_fill_fdir_gtpu_hdr - fill the GTP-U protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the GTP-U protocol header is set successfully + */ +static int +iavf_fill_fdir_gtpu_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1]; + struct virtchnl_proto_hdr *ghdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct virtchnl_proto_hdr *ehdr = NULL; /* Extension Header if it exists */ + u16 adj_offs, hdr_offs; + int i; + + VIRTCHNL_SET_PROTO_HDR_TYPE(ghdr, GTPU_IP); + + adj_offs = iavf_pkt_udp_no_pay_len(fltr); + + for (i = 0; i < fltr->flex_cnt; i++) { +#define IAVF_GTPU_HDR_TEID_OFFS0 4 +#define IAVF_GTPU_HDR_TEID_OFFS1 6 +#define IAVF_GTPU_HDR_N_PDU_AND_NEXT_EXTHDR_OFFS 10 +#define IAVF_GTPU_HDR_NEXT_EXTHDR_TYPE_MASK 0x00FF /* skip N_PDU */ +/* PDU Session Container Extension Header (PSC) */ +#define IAVF_GTPU_PSC_EXTHDR_TYPE 0x85 +#define IAVF_GTPU_HDR_PSC_PDU_TYPE_AND_QFI_OFFS 13 +#define IAVF_GTPU_HDR_PSC_PDU_QFI_MASK 0x3F /* skip Type */ +#define IAVF_GTPU_EH_QFI_IDX 1 + + if (fltr->flex_words[i].offset < adj_offs) + return -EINVAL; + + hdr_offs = fltr->flex_words[i].offset - adj_offs; + + switch (hdr_offs) { + case IAVF_GTPU_HDR_TEID_OFFS0: + case IAVF_GTPU_HDR_TEID_OFFS1: { + __be16 *pay_word = (__be16 *)ghdr->buffer; + + pay_word[hdr_offs >> 1] = htons(fltr->flex_words[i].word); + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(ghdr, GTPU_IP, TEID); + } + break; + case IAVF_GTPU_HDR_N_PDU_AND_NEXT_EXTHDR_OFFS: + if ((fltr->flex_words[i].word & + IAVF_GTPU_HDR_NEXT_EXTHDR_TYPE_MASK) != + IAVF_GTPU_PSC_EXTHDR_TYPE) + return -EOPNOTSUPP; + if (!ehdr) + ehdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + VIRTCHNL_SET_PROTO_HDR_TYPE(ehdr, GTPU_EH); + break; + case IAVF_GTPU_HDR_PSC_PDU_TYPE_AND_QFI_OFFS: + if (!ehdr) + return -EINVAL; + ehdr->buffer[IAVF_GTPU_EH_QFI_IDX] = + fltr->flex_words[i].word & + IAVF_GTPU_HDR_PSC_PDU_QFI_MASK; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(ehdr, GTPU_EH, QFI); + break; + default: + return -EINVAL; + } + } + + uhdr->field_selector = 0; /* The PF ignores the UDP header fields */ + + return 0; +} + +/** + * iavf_fill_fdir_pfcp_hdr - fill the PFCP protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the PFCP protocol header is set successfully + */ +static int +iavf_fill_fdir_pfcp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1]; + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + u16 adj_offs, hdr_offs; + int i; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP); + + adj_offs = iavf_pkt_udp_no_pay_len(fltr); + + for (i = 0; i < fltr->flex_cnt; i++) { +#define IAVF_PFCP_HDR_SFIELD_AND_MSG_TYPE_OFFS 0 + if (fltr->flex_words[i].offset < adj_offs) + return -EINVAL; + + hdr_offs = fltr->flex_words[i].offset - adj_offs; + + switch (hdr_offs) { + case IAVF_PFCP_HDR_SFIELD_AND_MSG_TYPE_OFFS: + hdr->buffer[0] = (fltr->flex_words[i].word >> 8) & 0xff; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD); + break; + default: + return -EINVAL; + } + } + + uhdr->field_selector = 0; /* The PF ignores the UDP header fields */ + + return 0; +} + +/** + * iavf_fill_fdir_nat_t_esp_hdr - fill the NAT-T-ESP protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the NAT-T-ESP protocol header is set successfully + */ +static int +iavf_fill_fdir_nat_t_esp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1]; + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + u16 adj_offs, hdr_offs; + u32 spi = 0; + int i; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP); + + adj_offs = iavf_pkt_udp_no_pay_len(fltr); + + for (i = 0; i < fltr->flex_cnt; i++) { +#define IAVF_NAT_T_ESP_SPI_OFFS0 0 +#define IAVF_NAT_T_ESP_SPI_OFFS1 2 + if (fltr->flex_words[i].offset < adj_offs) + return -EINVAL; + + hdr_offs = fltr->flex_words[i].offset - adj_offs; + + switch (hdr_offs) { + case IAVF_NAT_T_ESP_SPI_OFFS0: + spi |= fltr->flex_words[i].word << 16; + break; + case IAVF_NAT_T_ESP_SPI_OFFS1: + spi |= fltr->flex_words[i].word; + break; + default: + return -EINVAL; + } + } + + if (!spi) + return -EOPNOTSUPP; /* Not support IKE Header Format with SPI 0 */ + + *(__be32 *)hdr->buffer = htonl(spi); + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI); + + uhdr->field_selector = 0; /* The PF ignores the UDP header fields */ + + return 0; +} + +/** + * iavf_fill_fdir_udp_flex_pay_hdr - fill the UDP payload header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the UDP payload defined protocol header is set successfully + */ +static int +iavf_fill_fdir_udp_flex_pay_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + int err; + + switch (ntohs(fltr->ip_data.dst_port)) { + case GTPU_PORT: + err = iavf_fill_fdir_gtpu_hdr(fltr, proto_hdrs); + break; + case NAT_T_ESP_PORT: + err = iavf_fill_fdir_nat_t_esp_hdr(fltr, proto_hdrs); + break; + case PFCP_PORT: + err = iavf_fill_fdir_pfcp_hdr(fltr, proto_hdrs); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +/** + * iavf_fill_fdir_ip4_hdr - fill the IPv4 protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the IPv4 protocol header is set successfully + */ +static int +iavf_fill_fdir_ip4_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct iphdr *iph = (struct iphdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4); + + if (fltr->ip_mask.tos == U8_MAX) { + iph->tos = fltr->ip_data.tos; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP); + } + + if (fltr->ip_mask.proto == U8_MAX) { + iph->protocol = fltr->ip_data.proto; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT); + } + + if (fltr->ip_mask.v4_addrs.src_ip == htonl(U32_MAX)) { + iph->saddr = fltr->ip_data.v4_addrs.src_ip; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC); + } + + if (fltr->ip_mask.v4_addrs.dst_ip == htonl(U32_MAX)) { + iph->daddr = fltr->ip_data.v4_addrs.dst_ip; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST); + } + + fltr->ip_ver = 4; + + return 0; +} + +/** + * iavf_fill_fdir_ip6_hdr - fill the IPv6 protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the IPv6 protocol header is set successfully + */ +static int +iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct ipv6hdr *iph = (struct ipv6hdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6); + + if (fltr->ip_mask.tclass == U8_MAX) { + iph->priority = (fltr->ip_data.tclass >> 4) & 0xF; + iph->flow_lbl[0] = (fltr->ip_data.tclass << 4) & 0xF0; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC); + } + + if (fltr->ip_mask.proto == U8_MAX) { + iph->nexthdr = fltr->ip_data.proto; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT); + } + + if (!memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask, + sizeof(struct in6_addr))) { + memcpy(&iph->saddr, &fltr->ip_data.v6_addrs.src_ip, + sizeof(struct in6_addr)); + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC); + } + + if (!memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask, + sizeof(struct in6_addr))) { + memcpy(&iph->daddr, &fltr->ip_data.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST); + } + + fltr->ip_ver = 6; + + return 0; +} + +/** + * iavf_fill_fdir_tcp_hdr - fill the TCP protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the TCP protocol header is set successfully + */ +static int +iavf_fill_fdir_tcp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct tcphdr *tcph = (struct tcphdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP); + + if (fltr->ip_mask.src_port == htons(U16_MAX)) { + tcph->source = fltr->ip_data.src_port; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT); + } + + if (fltr->ip_mask.dst_port == htons(U16_MAX)) { + tcph->dest = fltr->ip_data.dst_port; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT); + } + + return 0; +} + +/** + * iavf_fill_fdir_udp_hdr - fill the UDP protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the UDP protocol header is set successfully + */ +static int +iavf_fill_fdir_udp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct udphdr *udph = (struct udphdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP); + + if (fltr->ip_mask.src_port == htons(U16_MAX)) { + udph->source = fltr->ip_data.src_port; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT); + } + + if (fltr->ip_mask.dst_port == htons(U16_MAX)) { + udph->dest = fltr->ip_data.dst_port; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT); + } + + if (!fltr->flex_cnt) + return 0; + + return iavf_fill_fdir_udp_flex_pay_hdr(fltr, proto_hdrs); +} + +/** + * iavf_fill_fdir_sctp_hdr - fill the SCTP protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the SCTP protocol header is set successfully + */ +static int +iavf_fill_fdir_sctp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct sctphdr *sctph = (struct sctphdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP); + + if (fltr->ip_mask.src_port == htons(U16_MAX)) { + sctph->source = fltr->ip_data.src_port; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT); + } + + if (fltr->ip_mask.dst_port == htons(U16_MAX)) { + sctph->dest = fltr->ip_data.dst_port; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT); + } + + return 0; +} + +/** + * iavf_fill_fdir_ah_hdr - fill the AH protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the AH protocol header is set successfully + */ +static int +iavf_fill_fdir_ah_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct ip_auth_hdr *ah = (struct ip_auth_hdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH); + + if (fltr->ip_mask.spi == htonl(U32_MAX)) { + ah->spi = fltr->ip_data.spi; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI); + } + + return 0; +} + +/** + * iavf_fill_fdir_esp_hdr - fill the ESP protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the ESP protocol header is set successfully + */ +static int +iavf_fill_fdir_esp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct ip_esp_hdr *esph = (struct ip_esp_hdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP); + + if (fltr->ip_mask.spi == htonl(U32_MAX)) { + esph->spi = fltr->ip_data.spi; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI); + } + + return 0; +} + +/** + * iavf_fill_fdir_l4_hdr - fill the L4 protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the L4 protocol header is set successfully + */ +static int +iavf_fill_fdir_l4_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr; + __be32 *l4_4_data; + + if (!fltr->ip_mask.proto) /* IPv4/IPv6 header only */ + return 0; + + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + l4_4_data = (__be32 *)hdr->buffer; + + /* L2TPv3 over IP with 'Session ID' */ + if (fltr->ip_data.proto == 115 && fltr->ip_mask.l4_header == htonl(U32_MAX)) { + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3); + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID); + + *l4_4_data = fltr->ip_data.l4_header; + } else { + return -EOPNOTSUPP; + } + + return 0; +} + +/** + * iavf_fill_fdir_eth_hdr - fill the Ethernet protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the Ethernet protocol header is set successfully + */ +static int +iavf_fill_fdir_eth_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct ethhdr *ehdr = (struct ethhdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH); + + if (fltr->eth_mask.etype == htons(U16_MAX)) { + if (fltr->eth_data.etype == htons(ETH_P_IP) || + fltr->eth_data.etype == htons(ETH_P_IPV6)) + return -EOPNOTSUPP; + + ehdr->h_proto = fltr->eth_data.etype; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE); + } + + return 0; +} + +/** + * iavf_fill_fdir_add_msg - fill the Flow Director filter into virtchnl message + * @adapter: pointer to the VF adapter structure + * @fltr: Flow Director filter data structure + * + * Returns 0 if the add Flow Director virtchnl message is filled successfully + */ +int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) +{ + struct virtchnl_fdir_add *vc_msg = &fltr->vc_add_msg; + struct virtchnl_proto_hdrs *proto_hdrs; + int err; + + proto_hdrs = &vc_msg->rule_cfg.proto_hdrs; + + err = iavf_fill_fdir_eth_hdr(fltr, proto_hdrs); /* L2 always exists */ + if (err) + return err; + + switch (fltr->flow_type) { + case IAVF_FDIR_FLOW_IPV4_TCP: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV4_UDP: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_udp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV4_SCTP: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV4_AH: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_ah_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV4_ESP: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_esp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV4_OTHER: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_l4_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV6_TCP: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV6_UDP: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_udp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV6_SCTP: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV6_AH: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_ah_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV6_ESP: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_esp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV6_OTHER: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_l4_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_NON_IP_L2: + break; + default: + err = -EINVAL; + break; + } + + if (err) + return err; + + vc_msg->vsi_id = adapter->vsi.id; + vc_msg->rule_cfg.action_set.count = 1; + vc_msg->rule_cfg.action_set.actions[0].type = fltr->action; + vc_msg->rule_cfg.action_set.actions[0].act_conf.queue.index = fltr->q_index; + + return 0; +} + +/** + * iavf_fdir_flow_proto_name - get the flow protocol name + * @flow_type: Flow Director filter flow type + **/ +static const char *iavf_fdir_flow_proto_name(enum iavf_fdir_flow_type flow_type) +{ + switch (flow_type) { + case IAVF_FDIR_FLOW_IPV4_TCP: + case IAVF_FDIR_FLOW_IPV6_TCP: + return "TCP"; + case IAVF_FDIR_FLOW_IPV4_UDP: + case IAVF_FDIR_FLOW_IPV6_UDP: + return "UDP"; + case IAVF_FDIR_FLOW_IPV4_SCTP: + case IAVF_FDIR_FLOW_IPV6_SCTP: + return "SCTP"; + case IAVF_FDIR_FLOW_IPV4_AH: + case IAVF_FDIR_FLOW_IPV6_AH: + return "AH"; + case IAVF_FDIR_FLOW_IPV4_ESP: + case IAVF_FDIR_FLOW_IPV6_ESP: + return "ESP"; + case IAVF_FDIR_FLOW_IPV4_OTHER: + case IAVF_FDIR_FLOW_IPV6_OTHER: + return "Other"; + case IAVF_FDIR_FLOW_NON_IP_L2: + return "Ethernet"; + default: + return NULL; + } +} + +/** + * iavf_print_fdir_fltr + * @adapter: adapter structure + * @fltr: Flow Director filter to print + * + * Print the Flow Director filter + **/ +void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) +{ + const char *proto = iavf_fdir_flow_proto_name(fltr->flow_type); + + if (!proto) + return; + + switch (fltr->flow_type) { + case IAVF_FDIR_FLOW_IPV4_TCP: + case IAVF_FDIR_FLOW_IPV4_UDP: + case IAVF_FDIR_FLOW_IPV4_SCTP: + dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: dst_port %hu src_port %hu\n", + fltr->loc, + &fltr->ip_data.v4_addrs.dst_ip, + &fltr->ip_data.v4_addrs.src_ip, + proto, + ntohs(fltr->ip_data.dst_port), + ntohs(fltr->ip_data.src_port)); + break; + case IAVF_FDIR_FLOW_IPV4_AH: + case IAVF_FDIR_FLOW_IPV4_ESP: + dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: SPI %u\n", + fltr->loc, + &fltr->ip_data.v4_addrs.dst_ip, + &fltr->ip_data.v4_addrs.src_ip, + proto, + ntohl(fltr->ip_data.spi)); + break; + case IAVF_FDIR_FLOW_IPV4_OTHER: + dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 proto: %u L4_bytes: 0x%x\n", + fltr->loc, + &fltr->ip_data.v4_addrs.dst_ip, + &fltr->ip_data.v4_addrs.src_ip, + fltr->ip_data.proto, + ntohl(fltr->ip_data.l4_header)); + break; + case IAVF_FDIR_FLOW_IPV6_TCP: + case IAVF_FDIR_FLOW_IPV6_UDP: + case IAVF_FDIR_FLOW_IPV6_SCTP: + dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: dst_port %hu src_port %hu\n", + fltr->loc, + &fltr->ip_data.v6_addrs.dst_ip, + &fltr->ip_data.v6_addrs.src_ip, + proto, + ntohs(fltr->ip_data.dst_port), + ntohs(fltr->ip_data.src_port)); + break; + case IAVF_FDIR_FLOW_IPV6_AH: + case IAVF_FDIR_FLOW_IPV6_ESP: + dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: SPI %u\n", + fltr->loc, + &fltr->ip_data.v6_addrs.dst_ip, + &fltr->ip_data.v6_addrs.src_ip, + proto, + ntohl(fltr->ip_data.spi)); + break; + case IAVF_FDIR_FLOW_IPV6_OTHER: + dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 proto: %u L4_bytes: 0x%x\n", + fltr->loc, + &fltr->ip_data.v6_addrs.dst_ip, + &fltr->ip_data.v6_addrs.src_ip, + fltr->ip_data.proto, + ntohl(fltr->ip_data.l4_header)); + break; + case IAVF_FDIR_FLOW_NON_IP_L2: + dev_info(&adapter->pdev->dev, "Rule ID: %u eth_type: 0x%x\n", + fltr->loc, + ntohs(fltr->eth_data.etype)); + break; + default: + break; + } +} + +/** + * iavf_fdir_is_dup_fltr - test if filter is already in list + * @adapter: pointer to the VF adapter structure + * @fltr: Flow Director filter data structure + * + * Returns true if the filter is found in the list + */ +bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) +{ + struct iavf_fdir_fltr *tmp; + + list_for_each_entry(tmp, &adapter->fdir_list_head, list) { + if (tmp->flow_type != fltr->flow_type) + continue; + + if (!memcmp(&tmp->eth_data, &fltr->eth_data, + sizeof(fltr->eth_data)) && + !memcmp(&tmp->ip_data, &fltr->ip_data, + sizeof(fltr->ip_data)) && + !memcmp(&tmp->ext_data, &fltr->ext_data, + sizeof(fltr->ext_data))) + return true; + } + + return false; +} + +/** + * iavf_find_fdir_fltr_by_loc - find filter with location + * @adapter: pointer to the VF adapter structure + * @loc: location to find. + * + * Returns pointer to Flow Director filter if found or null + */ +struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc) +{ + struct iavf_fdir_fltr *rule; + + list_for_each_entry(rule, &adapter->fdir_list_head, list) + if (rule->loc == loc) + return rule; + + return NULL; +} + +/** + * iavf_fdir_list_add_fltr - add a new node to the flow director filter list + * @adapter: pointer to the VF adapter structure + * @fltr: filter node to add to structure + */ +void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) +{ + struct iavf_fdir_fltr *rule, *parent = NULL; + + list_for_each_entry(rule, &adapter->fdir_list_head, list) { + if (rule->loc >= fltr->loc) + break; + parent = rule; + } + + if (parent) + list_add(&fltr->list, &parent->list); + else + list_add(&fltr->list, &adapter->fdir_list_head); +} diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h new file mode 100644 index 000000000000..33c55c366315 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021, Intel Corporation. */ + +#ifndef _IAVF_FDIR_H_ +#define _IAVF_FDIR_H_ + +struct iavf_adapter; + +/* State of Flow Director filter */ +enum iavf_fdir_fltr_state_t { + IAVF_FDIR_FLTR_ADD_REQUEST, /* User requests to add filter */ + IAVF_FDIR_FLTR_ADD_PENDING, /* Filter pending add by the PF */ + IAVF_FDIR_FLTR_DEL_REQUEST, /* User requests to delete filter */ + IAVF_FDIR_FLTR_DEL_PENDING, /* Filter pending delete by the PF */ + IAVF_FDIR_FLTR_ACTIVE, /* Filter is active */ +}; + +enum iavf_fdir_flow_type { + /* NONE - used for undef/error */ + IAVF_FDIR_FLOW_NONE = 0, + IAVF_FDIR_FLOW_IPV4_TCP, + IAVF_FDIR_FLOW_IPV4_UDP, + IAVF_FDIR_FLOW_IPV4_SCTP, + IAVF_FDIR_FLOW_IPV4_AH, + IAVF_FDIR_FLOW_IPV4_ESP, + IAVF_FDIR_FLOW_IPV4_OTHER, + IAVF_FDIR_FLOW_IPV6_TCP, + IAVF_FDIR_FLOW_IPV6_UDP, + IAVF_FDIR_FLOW_IPV6_SCTP, + IAVF_FDIR_FLOW_IPV6_AH, + IAVF_FDIR_FLOW_IPV6_ESP, + IAVF_FDIR_FLOW_IPV6_OTHER, + IAVF_FDIR_FLOW_NON_IP_L2, + /* MAX - this must be last and add anything new just above it */ + IAVF_FDIR_FLOW_PTYPE_MAX, +}; + +/* Must not exceed the array element number of '__be32 data[2]' in the ethtool + * 'struct ethtool_rx_flow_spec.m_ext.data[2]' to express the flex-byte (word). + */ +#define IAVF_FLEX_WORD_NUM 2 + +struct iavf_flex_word { + u16 offset; + u16 word; +}; + +struct iavf_ipv4_addrs { + __be32 src_ip; + __be32 dst_ip; +}; + +struct iavf_ipv6_addrs { + struct in6_addr src_ip; + struct in6_addr dst_ip; +}; + +struct iavf_fdir_eth { + __be16 etype; +}; + +struct iavf_fdir_ip { + union { + struct iavf_ipv4_addrs v4_addrs; + struct iavf_ipv6_addrs v6_addrs; + }; + __be16 src_port; + __be16 dst_port; + __be32 l4_header; /* first 4 bytes of the layer 4 header */ + __be32 spi; /* security parameter index for AH/ESP */ + union { + u8 tos; + u8 tclass; + }; + u8 proto; +}; + +struct iavf_fdir_extra { + u32 usr_def[IAVF_FLEX_WORD_NUM]; +}; + +/* bookkeeping of Flow Director filters */ +struct iavf_fdir_fltr { + enum iavf_fdir_fltr_state_t state; + struct list_head list; + + enum iavf_fdir_flow_type flow_type; + + struct iavf_fdir_eth eth_data; + struct iavf_fdir_eth eth_mask; + + struct iavf_fdir_ip ip_data; + struct iavf_fdir_ip ip_mask; + + struct iavf_fdir_extra ext_data; + struct iavf_fdir_extra ext_mask; + + enum virtchnl_action action; + + /* flex byte filter data */ + u8 ip_ver; /* used to adjust the flex offset, 4 : IPv4, 6 : IPv6 */ + u8 flex_cnt; + struct iavf_flex_word flex_words[IAVF_FLEX_WORD_NUM]; + + u32 flow_id; + + u32 loc; /* Rule location inside the flow table */ + u32 q_index; + + struct virtchnl_fdir_add vc_add_msg; +}; + +int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); +void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); +bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); +void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); +struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc); +#endif /* _IAVF_FDIR_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index dc5b3c06d1e0..e612c24fa384 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -959,8 +959,10 @@ void iavf_down(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct iavf_vlan_filter *vlf; - struct iavf_mac_filter *f; struct iavf_cloud_filter *cf; + struct iavf_fdir_fltr *fdir; + struct iavf_mac_filter *f; + struct iavf_adv_rss *rss; if (adapter->state <= __IAVF_DOWN_PENDING) return; @@ -996,6 +998,19 @@ void iavf_down(struct iavf_adapter *adapter) } spin_unlock_bh(&adapter->cloud_filter_list_lock); + /* remove all Flow Director filters */ + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry(fdir, &adapter->fdir_list_head, list) { + fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + /* remove all advance RSS configuration */ + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry(rss, &adapter->adv_rss_list_head, list) + rss->state = IAVF_ADV_RSS_DEL_REQUEST; + spin_unlock_bh(&adapter->adv_rss_lock); + if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) && adapter->state != __IAVF_RESETTING) { /* cancel any current operation */ @@ -1007,6 +1022,8 @@ void iavf_down(struct iavf_adapter *adapter) adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER; adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; } @@ -1629,6 +1646,22 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) iavf_add_cloud_filter(adapter); return 0; } + if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) { + iavf_add_fdir_filter(adapter); + return IAVF_SUCCESS; + } + if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) { + iavf_del_fdir_filter(adapter); + return IAVF_SUCCESS; + } + if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) { + iavf_add_adv_rss_cfg(adapter); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) { + iavf_del_adv_rss_cfg(adapter); + return 0; + } return -EAGAIN; } @@ -2529,7 +2562,7 @@ validate_bw: } /** - * iavf_validate_channel_config - validate queue mapping info + * iavf_validate_ch_config - validate queue mapping info * @adapter: board private structure * @mqprio_qopt: queue parameters * @@ -3525,6 +3558,8 @@ int iavf_process_config(struct iavf_adapter *adapter) /* Enable cloud filter if ADQ is supported */ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) hw_features |= NETIF_F_HW_TC; + if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO) + hw_features |= NETIF_F_GSO_UDP_L4; netdev->hw_features |= hw_features; @@ -3738,10 +3773,14 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&adapter->mac_vlan_list_lock); spin_lock_init(&adapter->cloud_filter_list_lock); + spin_lock_init(&adapter->fdir_fltr_lock); + spin_lock_init(&adapter->adv_rss_lock); INIT_LIST_HEAD(&adapter->mac_filter_list); INIT_LIST_HEAD(&adapter->vlan_filter_list); INIT_LIST_HEAD(&adapter->cloud_filter_list); + INIT_LIST_HEAD(&adapter->fdir_list_head); + INIT_LIST_HEAD(&adapter->adv_rss_list_head); INIT_WORK(&adapter->reset_task, iavf_reset_task); INIT_WORK(&adapter->adminq_task, iavf_adminq_task); @@ -3845,7 +3884,9 @@ static void iavf_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_fdir_fltr *fdir, *fdirtmp; struct iavf_vlan_filter *vlf, *vlftmp; + struct iavf_adv_rss *rss, *rsstmp; struct iavf_mac_filter *f, *ftmp; struct iavf_cloud_filter *cf, *cftmp; struct iavf_hw *hw = &adapter->hw; @@ -3899,8 +3940,6 @@ static void iavf_remove(struct pci_dev *pdev) iounmap(hw->hw_addr); pci_release_regions(pdev); - iavf_free_all_tx_resources(adapter); - iavf_free_all_rx_resources(adapter); iavf_free_queues(adapter); kfree(adapter->vf_res); spin_lock_bh(&adapter->mac_vlan_list_lock); @@ -3926,6 +3965,21 @@ static void iavf_remove(struct pci_dev *pdev) } spin_unlock_bh(&adapter->cloud_filter_list_lock); + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) { + list_del(&fdir->list); + kfree(fdir); + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head, + list) { + list_del(&rss->list); + kfree(rss); + } + spin_unlock_bh(&adapter->adv_rss_lock); + free_netdev(netdev); pci_disable_pcie_error_reporting(pdev); diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index ffaf2742a2e0..3525eab8e9f9 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -1905,13 +1905,20 @@ static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len, /* determine offset of inner transport header */ l4_offset = l4.hdr - skb->data; - /* remove payload length from inner checksum */ paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); - /* compute length of segmentation header */ - *hdr_len = (l4.tcp->doff * 4) + l4_offset; + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + /* compute length of UDP segmentation header */ + *hdr_len = (u8)sizeof(l4.udp) + l4_offset; + } else { + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + /* compute length of TCP segmentation header */ + *hdr_len = (u8)((l4.tcp->doff * 4) + l4_offset); + } /* pull values out of skb_shinfo */ gso_size = skb_shinfo(skb)->gso_size; @@ -2098,7 +2105,7 @@ static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, } /** - * iavf_create_tx_ctx Build the Tx context descriptor + * iavf_create_tx_ctx - Build the Tx context descriptor * @tx_ring: ring to create the descriptor on * @cd_type_cmd_tso_mss: Quad Word 1 * @cd_tunneling: Quad Word 0 - bits 0-31 diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 647e7fde11b4..0eab3c43bdc5 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -140,6 +140,9 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter) VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | VIRTCHNL_VF_OFFLOAD_ADQ | + VIRTCHNL_VF_OFFLOAD_USO | + VIRTCHNL_VF_OFFLOAD_FDIR_PF | + VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF | VIRTCHNL_VF_CAP_ADV_LINK_SPEED; adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; @@ -1005,7 +1008,7 @@ iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter, } /** - * iavf_enable_channel + * iavf_enable_channels * @adapter: adapter structure * * Request that the PF enable channels as specified by @@ -1046,7 +1049,7 @@ void iavf_enable_channels(struct iavf_adapter *adapter) } /** - * iavf_disable_channel + * iavf_disable_channels * @adapter: adapter structure * * Request that the PF disable channels that are configured @@ -1198,6 +1201,200 @@ void iavf_del_cloud_filter(struct iavf_adapter *adapter) } /** + * iavf_add_fdir_filter + * @adapter: the VF adapter structure + * + * Request that the PF add Flow Director filters as specified + * by the user via ethtool. + **/ +void iavf_add_fdir_filter(struct iavf_adapter *adapter) +{ + struct iavf_fdir_fltr *fdir; + struct virtchnl_fdir_add *f; + bool process_fltr = false; + int len; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n", + adapter->current_op); + return; + } + + len = sizeof(struct virtchnl_fdir_add); + f = kzalloc(len, GFP_KERNEL); + if (!f) + return; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry(fdir, &adapter->fdir_list_head, list) { + if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) { + process_fltr = true; + fdir->state = IAVF_FDIR_FLTR_ADD_PENDING; + memcpy(f, &fdir->vc_add_msg, len); + break; + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + if (!process_fltr) { + /* prevent iavf_add_fdir_filter() from being called when there + * are no filters to add + */ + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER; + kfree(f); + return; + } + adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len); + kfree(f); +} + +/** + * iavf_del_fdir_filter + * @adapter: the VF adapter structure + * + * Request that the PF delete Flow Director filters as specified + * by the user via ethtool. + **/ +void iavf_del_fdir_filter(struct iavf_adapter *adapter) +{ + struct iavf_fdir_fltr *fdir; + struct virtchnl_fdir_del f; + bool process_fltr = false; + int len; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n", + adapter->current_op); + return; + } + + len = sizeof(struct virtchnl_fdir_del); + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry(fdir, &adapter->fdir_list_head, list) { + if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) { + process_fltr = true; + memset(&f, 0, len); + f.vsi_id = fdir->vc_add_msg.vsi_id; + f.flow_id = fdir->flow_id; + fdir->state = IAVF_FDIR_FLTR_DEL_PENDING; + break; + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + if (!process_fltr) { + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER; + return; + } + + adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len); +} + +/** + * iavf_add_adv_rss_cfg + * @adapter: the VF adapter structure + * + * Request that the PF add RSS configuration as specified + * by the user via ethtool. + **/ +void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter) +{ + struct virtchnl_rss_cfg *rss_cfg; + struct iavf_adv_rss *rss; + bool process_rss = false; + int len; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot add RSS configuration, command %d pending\n", + adapter->current_op); + return; + } + + len = sizeof(struct virtchnl_rss_cfg); + rss_cfg = kzalloc(len, GFP_KERNEL); + if (!rss_cfg) + return; + + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { + if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) { + process_rss = true; + rss->state = IAVF_ADV_RSS_ADD_PENDING; + memcpy(rss_cfg, &rss->cfg_msg, len); + iavf_print_adv_rss_cfg(adapter, rss, + "Input set change for", + "is pending"); + break; + } + } + spin_unlock_bh(&adapter->adv_rss_lock); + + if (process_rss) { + adapter->current_op = VIRTCHNL_OP_ADD_RSS_CFG; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_RSS_CFG, + (u8 *)rss_cfg, len); + } else { + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_ADV_RSS_CFG; + } + + kfree(rss_cfg); +} + +/** + * iavf_del_adv_rss_cfg + * @adapter: the VF adapter structure + * + * Request that the PF delete RSS configuration as specified + * by the user via ethtool. + **/ +void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter) +{ + struct virtchnl_rss_cfg *rss_cfg; + struct iavf_adv_rss *rss; + bool process_rss = false; + int len; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot remove RSS configuration, command %d pending\n", + adapter->current_op); + return; + } + + len = sizeof(struct virtchnl_rss_cfg); + rss_cfg = kzalloc(len, GFP_KERNEL); + if (!rss_cfg) + return; + + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { + if (rss->state == IAVF_ADV_RSS_DEL_REQUEST) { + process_rss = true; + rss->state = IAVF_ADV_RSS_DEL_PENDING; + memcpy(rss_cfg, &rss->cfg_msg, len); + break; + } + } + spin_unlock_bh(&adapter->adv_rss_lock); + + if (process_rss) { + adapter->current_op = VIRTCHNL_OP_DEL_RSS_CFG; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_RSS_CFG, + (u8 *)rss_cfg, len); + } else { + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; + } + + kfree(rss_cfg); +} + +/** * iavf_request_reset * @adapter: adapter structure * @@ -1357,6 +1554,84 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, } } break; + case VIRTCHNL_OP_ADD_FDIR_FILTER: { + struct iavf_fdir_fltr *fdir, *fdir_tmp; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry_safe(fdir, fdir_tmp, + &adapter->fdir_list_head, + list) { + if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { + dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n", + iavf_stat_str(&adapter->hw, + v_retval)); + iavf_print_fdir_fltr(adapter, fdir); + if (msglen) + dev_err(&adapter->pdev->dev, + "%s\n", msg); + list_del(&fdir->list); + kfree(fdir); + adapter->fdir_active_fltr--; + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + } + break; + case VIRTCHNL_OP_DEL_FDIR_FILTER: { + struct iavf_fdir_fltr *fdir; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry(fdir, &adapter->fdir_list_head, + list) { + if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { + fdir->state = IAVF_FDIR_FLTR_ACTIVE; + dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n", + iavf_stat_str(&adapter->hw, + v_retval)); + iavf_print_fdir_fltr(adapter, fdir); + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + } + break; + case VIRTCHNL_OP_ADD_RSS_CFG: { + struct iavf_adv_rss *rss, *rss_tmp; + + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry_safe(rss, rss_tmp, + &adapter->adv_rss_list_head, + list) { + if (rss->state == IAVF_ADV_RSS_ADD_PENDING) { + iavf_print_adv_rss_cfg(adapter, rss, + "Failed to change the input set for", + NULL); + list_del(&rss->list); + kfree(rss); + } + } + spin_unlock_bh(&adapter->adv_rss_lock); + } + break; + case VIRTCHNL_OP_DEL_RSS_CFG: { + struct iavf_adv_rss *rss; + + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry(rss, &adapter->adv_rss_list_head, + list) { + if (rss->state == IAVF_ADV_RSS_DEL_PENDING) { + rss->state = IAVF_ADV_RSS_ACTIVE; + dev_err(&adapter->pdev->dev, "Failed to delete RSS configuration, error %s\n", + iavf_stat_str(&adapter->hw, + v_retval)); + } + } + spin_unlock_bh(&adapter->adv_rss_lock); + } + break; + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: + dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); + break; default: dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", v_retval, iavf_stat_str(&adapter->hw, v_retval), @@ -1490,6 +1765,87 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, } } break; + case VIRTCHNL_OP_ADD_FDIR_FILTER: { + struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg; + struct iavf_fdir_fltr *fdir, *fdir_tmp; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry_safe(fdir, fdir_tmp, + &adapter->fdir_list_head, + list) { + if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { + if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) { + dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n", + fdir->loc); + fdir->state = IAVF_FDIR_FLTR_ACTIVE; + fdir->flow_id = add_fltr->flow_id; + } else { + dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n", + add_fltr->status); + iavf_print_fdir_fltr(adapter, fdir); + list_del(&fdir->list); + kfree(fdir); + adapter->fdir_active_fltr--; + } + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + } + break; + case VIRTCHNL_OP_DEL_FDIR_FILTER: { + struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg; + struct iavf_fdir_fltr *fdir, *fdir_tmp; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head, + list) { + if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { + if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) { + dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n", + fdir->loc); + list_del(&fdir->list); + kfree(fdir); + adapter->fdir_active_fltr--; + } else { + fdir->state = IAVF_FDIR_FLTR_ACTIVE; + dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n", + del_fltr->status); + iavf_print_fdir_fltr(adapter, fdir); + } + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + } + break; + case VIRTCHNL_OP_ADD_RSS_CFG: { + struct iavf_adv_rss *rss; + + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { + if (rss->state == IAVF_ADV_RSS_ADD_PENDING) { + iavf_print_adv_rss_cfg(adapter, rss, + "Input set change for", + "successful"); + rss->state = IAVF_ADV_RSS_ACTIVE; + } + } + spin_unlock_bh(&adapter->adv_rss_lock); + } + break; + case VIRTCHNL_OP_DEL_RSS_CFG: { + struct iavf_adv_rss *rss, *rss_tmp; + + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry_safe(rss, rss_tmp, + &adapter->adv_rss_list_head, list) { + if (rss->state == IAVF_ADV_RSS_DEL_PENDING) { + list_del(&rss->list); + kfree(rss); + } + } + spin_unlock_bh(&adapter->adv_rss_lock); + } + break; default: if (adapter->current_op && (v_opcode != adapter->current_op)) dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 73da4f71f530..07fe857e9e3a 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -26,7 +26,8 @@ ice-y := ice_main.o \ ice_fw_update.o \ ice_lag.o \ ice_ethtool.o -ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o +ice-$(CONFIG_PCI_IOV) += ice_virtchnl_allowlist.o +ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice_virtchnl_fdir.o ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 17101c45cbcd..e35db3ff583b 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -36,6 +36,7 @@ #include <linux/bpf.h> #include <linux/avf/virtchnl.h> #include <linux/cpu_rmap.h> +#include <linux/dim.h> #include <net/devlink.h> #include <net/ipv6.h> #include <net/xdp_sock.h> @@ -44,6 +45,9 @@ #include <net/gre.h> #include <net/udp_tunnel.h> #include <net/vxlan.h> +#if IS_ENABLED(CONFIG_DCB) +#include <scsi/iscsi_proto.h> +#endif /* CONFIG_DCB */ #include "ice_devids.h" #include "ice_type.h" #include "ice_txrx.h" @@ -73,7 +77,7 @@ #define ICE_MIN_LAN_TXRX_MSIX 1 #define ICE_MIN_LAN_OICR_MSIX 1 #define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX) -#define ICE_FDIR_MSIX 1 +#define ICE_FDIR_MSIX 2 #define ICE_NO_VSI 0xffff #define ICE_VSI_MAP_CONTIG 0 #define ICE_VSI_MAP_SCATTER 1 @@ -84,9 +88,12 @@ #define ICE_MAX_LG_RSS_QS 256 #define ICE_RES_VALID_BIT 0x8000 #define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1) +/* All VF control VSIs share the same IRQ, so assign a unique ID for them */ +#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_MISC_VEC_ID - 1) #define ICE_INVAL_Q_INDEX 0xffff #define ICE_INVAL_VFID 256 +#define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */ #define ICE_MAX_RESET_WAIT 20 #define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4) @@ -190,53 +197,58 @@ struct ice_sw { u8 dflt_vsi_ena:1; /* true if above dflt_vsi is enabled */ }; -enum ice_state { - __ICE_TESTING, - __ICE_DOWN, - __ICE_NEEDS_RESTART, - __ICE_PREPARED_FOR_RESET, /* set by driver when prepared */ - __ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */ - __ICE_PFR_REQ, /* set by driver and peers */ - __ICE_CORER_REQ, /* set by driver and peers */ - __ICE_GLOBR_REQ, /* set by driver and peers */ - __ICE_CORER_RECV, /* set by OICR handler */ - __ICE_GLOBR_RECV, /* set by OICR handler */ - __ICE_EMPR_RECV, /* set by OICR handler */ - __ICE_SUSPENDED, /* set on module remove path */ - __ICE_RESET_FAILED, /* set by reset/rebuild */ +enum ice_pf_state { + ICE_TESTING, + ICE_DOWN, + ICE_NEEDS_RESTART, + ICE_PREPARED_FOR_RESET, /* set by driver when prepared */ + ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */ + ICE_PFR_REQ, /* set by driver and peers */ + ICE_CORER_REQ, /* set by driver and peers */ + ICE_GLOBR_REQ, /* set by driver and peers */ + ICE_CORER_RECV, /* set by OICR handler */ + ICE_GLOBR_RECV, /* set by OICR handler */ + ICE_EMPR_RECV, /* set by OICR handler */ + ICE_SUSPENDED, /* set on module remove path */ + ICE_RESET_FAILED, /* set by reset/rebuild */ /* When checking for the PF to be in a nominal operating state, the * bits that are grouped at the beginning of the list need to be - * checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will + * checked. Bits occurring before ICE_STATE_NOMINAL_CHECK_BITS will * be checked. If you need to add a bit into consideration for nominal * operating state, it must be added before - * __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position + * ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position * without appropriate consideration. */ - __ICE_STATE_NOMINAL_CHECK_BITS, - __ICE_ADMINQ_EVENT_PENDING, - __ICE_MAILBOXQ_EVENT_PENDING, - __ICE_MDD_EVENT_PENDING, - __ICE_VFLR_EVENT_PENDING, - __ICE_FLTR_OVERFLOW_PROMISC, - __ICE_VF_DIS, - __ICE_CFG_BUSY, - __ICE_SERVICE_SCHED, - __ICE_SERVICE_DIS, - __ICE_FD_FLUSH_REQ, - __ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */ - __ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */ - __ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */ - __ICE_LINK_DEFAULT_OVERRIDE_PENDING, - __ICE_PHY_INIT_COMPLETE, - __ICE_STATE_NBITS /* must be last */ + ICE_STATE_NOMINAL_CHECK_BITS, + ICE_ADMINQ_EVENT_PENDING, + ICE_MAILBOXQ_EVENT_PENDING, + ICE_MDD_EVENT_PENDING, + ICE_VFLR_EVENT_PENDING, + ICE_FLTR_OVERFLOW_PROMISC, + ICE_VF_DIS, + ICE_CFG_BUSY, + ICE_SERVICE_SCHED, + ICE_SERVICE_DIS, + ICE_FD_FLUSH_REQ, + ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */ + ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */ + ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */ + ICE_LINK_DEFAULT_OVERRIDE_PENDING, + ICE_PHY_INIT_COMPLETE, + ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */ + ICE_STATE_NBITS /* must be last */ }; -enum ice_vsi_flags { - ICE_VSI_FLAG_UMAC_FLTR_CHANGED, - ICE_VSI_FLAG_MMAC_FLTR_CHANGED, - ICE_VSI_FLAG_VLAN_FLTR_CHANGED, - ICE_VSI_FLAG_PROMISC_CHANGED, - ICE_VSI_FLAG_NBITS /* must be last */ +enum ice_vsi_state { + ICE_VSI_DOWN, + ICE_VSI_NEEDS_RESTART, + ICE_VSI_NETDEV_ALLOCD, + ICE_VSI_NETDEV_REGISTERED, + ICE_VSI_UMAC_FLTR_CHANGED, + ICE_VSI_MMAC_FLTR_CHANGED, + ICE_VSI_VLAN_FLTR_CHANGED, + ICE_VSI_PROMISC_CHANGED, + ICE_VSI_STATE_NBITS /* must be last */ }; /* struct that defines a VSI, associated with a dev */ @@ -252,14 +264,12 @@ struct ice_vsi { irqreturn_t (*irq_handler)(int irq, void *data); u64 tx_linearize; - DECLARE_BITMAP(state, __ICE_STATE_NBITS); - DECLARE_BITMAP(flags, ICE_VSI_FLAG_NBITS); + DECLARE_BITMAP(state, ICE_VSI_STATE_NBITS); unsigned int current_netdev_flags; u32 tx_restart; u32 tx_busy; u32 rx_buf_failed; u32 rx_page_failed; - u32 rx_gro_dropped; u16 num_q_vectors; u16 base_vector; /* IRQ base for OS reserved vectors */ enum ice_vsi_type type; @@ -342,7 +352,7 @@ struct ice_q_vector { u16 reg_idx; u8 num_ring_rx; /* total number of Rx rings in vector */ u8 num_ring_tx; /* total number of Tx rings in vector */ - u8 itr_countdown; /* when 0 should adjust adaptive ITR */ + u8 wb_on_itr:1; /* if true, WB on ITR is enabled */ /* in usecs, need to use ice_intrl_to_usecs_reg() before writing this * value to the device */ @@ -357,6 +367,8 @@ struct ice_q_vector { struct irq_affinity_notify affinity_notify; char name[ICE_INT_NAME_STR_LEN]; + + u16 total_events; /* net_dim(): number of interrupts processed */ } ____cacheline_internodealigned_in_smp; enum ice_pf_flags { @@ -414,7 +426,8 @@ struct ice_pf { u16 num_msix_per_vf; /* used to ratelimit the MDD event logging */ unsigned long last_printed_mdd_jiffies; - DECLARE_BITMAP(state, __ICE_STATE_NBITS); + DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT); + DECLARE_BITMAP(state, ICE_STATE_NBITS); DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */ unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */ @@ -499,7 +512,7 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | (itr << GLINT_DYN_CTL_ITR_INDX_S); if (vsi) - if (test_bit(__ICE_DOWN, vsi->state)) + if (test_bit(ICE_VSI_DOWN, vsi->state)) return; wr32(hw, GLINT_DYN_CTL(vector), val); } @@ -616,8 +629,10 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi); int ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); -int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); -int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); +int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size); +int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size); +int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed); +int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed); void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset); void ice_print_link_msg(struct ice_vsi *vsi, bool isup); diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 80186589153b..5cdfe406af84 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -877,16 +877,18 @@ struct ice_aqc_get_phy_caps { __le16 param0; /* 18.0 - Report qualified modules */ #define ICE_AQC_GET_PHY_RQM BIT(0) - /* 18.1 - 18.2 : Report mode - * 00b - Report NVM capabilities - * 01b - Report topology capabilities - * 10b - Report SW configured + /* 18.1 - 18.3 : Report mode + * 000b - Report NVM capabilities + * 001b - Report topology capabilities + * 010b - Report SW configured + * 100b - Report default capabilities */ -#define ICE_AQC_REPORT_MODE_S 1 -#define ICE_AQC_REPORT_MODE_M (3 << ICE_AQC_REPORT_MODE_S) -#define ICE_AQC_REPORT_NVM_CAP 0 -#define ICE_AQC_REPORT_TOPO_CAP BIT(1) -#define ICE_AQC_REPORT_SW_CFG BIT(2) +#define ICE_AQC_REPORT_MODE_S 1 +#define ICE_AQC_REPORT_MODE_M (7 << ICE_AQC_REPORT_MODE_S) +#define ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA 0 +#define ICE_AQC_REPORT_TOPO_CAP_MEDIA BIT(1) +#define ICE_AQC_REPORT_ACTIVE_CFG BIT(2) +#define ICE_AQC_REPORT_DFLT_CFG BIT(3) __le32 reserved1; __le32 addr_high; __le32 addr_low; @@ -1407,8 +1409,7 @@ struct ice_aqc_nvm_comp_tbl { u8 cvs[]; /* Component Version String */ } __packed; -/* - * Send to PF command (indirect 0x0801) ID is only used by PF +/* Send to PF command (indirect 0x0801) ID is only used by PF * * Send to VF command (indirect 0x0802) ID is only used by PF * @@ -1790,6 +1791,7 @@ struct ice_pkg_ver { }; #define ICE_PKG_NAME_SIZE 32 +#define ICE_SEG_ID_SIZE 28 #define ICE_SEG_NAME_SIZE 28 struct ice_aqc_get_pkg_info { diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c index 6560acd76c94..88d98c9e5f91 100644 --- a/drivers/net/ethernet/intel/ice/ice_arfs.c +++ b/drivers/net/ethernet/intel/ice/ice_arfs.c @@ -581,8 +581,7 @@ void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) return; netdev = vsi->netdev; - if (!netdev || !netdev->rx_cpu_rmap || - netdev->reg_state != NETREG_REGISTERED) + if (!netdev || !netdev->rx_cpu_rmap) return; free_irq_cpu_rmap(netdev->rx_cpu_rmap); @@ -604,8 +603,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) pf = vsi->back; netdev = vsi->netdev; - if (!pf || !netdev || !vsi->num_q_vectors || - vsi->netdev->reg_state != NETREG_REGISTERED) + if (!pf || !netdev || !vsi->num_q_vectors) return -EINVAL; netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n", diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 1148d768f8ed..5985a7e5ca8a 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -113,6 +113,9 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) q_vector->v_idx = v_idx; q_vector->tx.itr_setting = ICE_DFLT_TX_ITR; q_vector->rx.itr_setting = ICE_DFLT_RX_ITR; + q_vector->tx.itr_mode = ITR_DYNAMIC; + q_vector->rx.itr_mode = ITR_DYNAMIC; + if (vsi->type == ICE_VSI_VF) goto out; /* only set affinity_mask if the CPU is online */ @@ -215,6 +218,26 @@ static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc) } /** + * ice_cfg_xps_tx_ring - Configure XPS for a Tx ring + * @ring: The Tx ring to configure + * + * This enables/disables XPS for a given Tx descriptor ring + * based on the TCs enabled for the VSI that ring belongs to. + */ +static void ice_cfg_xps_tx_ring(struct ice_ring *ring) +{ + if (!ring->q_vector || !ring->netdev) + return; + + /* We only initialize XPS once, so as not to overwrite user settings */ + if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state)) + return; + + netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask, + ring->q_index); +} + +/** * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance * @ring: The Tx ring to configure * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized @@ -664,6 +687,9 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 pf_q; u8 tc; + /* Configure XPS */ + ice_cfg_xps_tx_ring(ring); + pf_q = ring->reg_idx; ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); /* copy context contents into the qg_buf */ @@ -717,25 +743,13 @@ void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector) { ice_cfg_itr_gran(hw); - if (q_vector->num_ring_rx) { - struct ice_ring_container *rc = &q_vector->rx; - - rc->target_itr = ITR_TO_REG(rc->itr_setting); - rc->next_update = jiffies + 1; - rc->current_itr = rc->target_itr; - wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), - ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); - } + if (q_vector->num_ring_rx) + ice_write_itr(&q_vector->rx, q_vector->rx.itr_setting); - if (q_vector->num_ring_tx) { - struct ice_ring_container *rc = &q_vector->tx; + if (q_vector->num_ring_tx) + ice_write_itr(&q_vector->tx, q_vector->tx.itr_setting); - rc->target_itr = ITR_TO_REG(rc->itr_setting); - rc->next_update = jiffies + 1; - rc->current_itr = rc->target_itr; - wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), - ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); - } + ice_write_intrl(q_vector, q_vector->intrl); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index a20edf1538a0..e93b1e40f627 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -158,6 +158,10 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, return ICE_ERR_PARAM; hw = pi->hw; + if (report_mode == ICE_AQC_REPORT_DFLT_CFG && + !ice_fw_supports_report_dflt_cfg(hw)) + return ICE_ERR_PARAM; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); if (qual_mods) @@ -191,7 +195,7 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n", pcaps->module_type[2]); - if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) { + if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); memcpy(pi->phy.link_info.module_type, &pcaps->module_type, @@ -922,7 +926,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw) /* Initialize port_info struct with PHY capabilities */ status = ice_aq_get_phy_caps(hw->port_info, false, - ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); + ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, + NULL); devm_kfree(ice_hw_to_dev(hw), pcaps); if (status) dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", @@ -1293,6 +1298,85 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = { DEFINE_MUTEX(ice_global_cfg_lock_sw); /** + * ice_should_retry_sq_send_cmd + * @opcode: AQ opcode + * + * Decide if we should retry the send command routine for the ATQ, depending + * on the opcode. + */ +static bool ice_should_retry_sq_send_cmd(u16 opcode) +{ + switch (opcode) { + case ice_aqc_opc_get_link_topo: + case ice_aqc_opc_lldp_stop: + case ice_aqc_opc_lldp_start: + case ice_aqc_opc_lldp_filter_ctrl: + return true; + } + + return false; +} + +/** + * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) + * @hw: pointer to the HW struct + * @cq: pointer to the specific Control queue + * @desc: prefilled descriptor describing the command + * @buf: buffer to use for indirect commands (or NULL for direct commands) + * @buf_size: size of buffer for indirect commands (or 0 for direct commands) + * @cd: pointer to command details structure + * + * Retry sending the FW Admin Queue command, multiple times, to the FW Admin + * Queue if the EBUSY AQ error is returned. + */ +static enum ice_status +ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, + struct ice_aq_desc *desc, void *buf, u16 buf_size, + struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc_cpy; + enum ice_status status; + bool is_cmd_for_retry; + u8 *buf_cpy = NULL; + u8 idx = 0; + u16 opcode; + + opcode = le16_to_cpu(desc->opcode); + is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); + memset(&desc_cpy, 0, sizeof(desc_cpy)); + + if (is_cmd_for_retry) { + if (buf) { + buf_cpy = kzalloc(buf_size, GFP_KERNEL); + if (!buf_cpy) + return ICE_ERR_NO_MEMORY; + } + + memcpy(&desc_cpy, desc, sizeof(desc_cpy)); + } + + do { + status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); + + if (!is_cmd_for_retry || !status || + hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) + break; + + if (buf_cpy) + memcpy(buf, buf_cpy, buf_size); + + memcpy(desc, &desc_cpy, sizeof(desc_cpy)); + + mdelay(ICE_SQ_SEND_DELAY_TIME_MS); + + } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); + + kfree(buf_cpy); + + return status; +} + +/** * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue * @hw: pointer to the HW struct * @desc: descriptor describing the command @@ -1333,7 +1417,7 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, break; } - status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd); + status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); if (lock_acquired) mutex_unlock(&ice_global_cfg_lock_sw); @@ -2655,7 +2739,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi) if (!pcaps) return ICE_ERR_NO_MEMORY; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); devm_kfree(ice_hw_to_dev(hw), pcaps); @@ -2815,8 +2899,8 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) return ICE_ERR_NO_MEMORY; /* Get the current PHY config */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, - NULL); + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, + pcaps, NULL); if (status) { *aq_failures = ICE_SET_FC_AQ_FAIL_GET; goto out; @@ -2929,17 +3013,6 @@ ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, cfg->link_fec_opt = caps->link_fec_options; cfg->module_compliance_enforcement = caps->module_compliance_enforcement; - - if (ice_fw_supports_link_override(pi->hw)) { - struct ice_link_default_override_tlv tlv; - - if (ice_get_link_default_override(&tlv, pi)) - return; - - if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) - cfg->module_compliance_enforcement |= - ICE_LINK_OVERRIDE_STRICT_MODE; - } } /** @@ -2954,16 +3027,21 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, { struct ice_aqc_get_phy_caps_data *pcaps; enum ice_status status; + struct ice_hw *hw; if (!pi || !cfg) return ICE_ERR_BAD_PTR; + hw = pi->hw; + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); if (!pcaps) return ICE_ERR_NO_MEMORY; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, - NULL); + status = ice_aq_get_phy_caps(pi, false, + (ice_fw_supports_report_dflt_cfg(hw) ? + ICE_AQC_REPORT_DFLT_CFG : + ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); if (status) goto out; @@ -3002,7 +3080,8 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, break; } - if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) { + if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && + !ice_fw_supports_report_dflt_cfg(hw)) { struct ice_link_default_override_tlv tlv; if (ice_get_link_default_override(&tlv, pi)) @@ -3186,7 +3265,7 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); cmd = &desc.params.read_write_sff_param; - desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF); + desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); cmd->lport_num = (u8)(lport & 0xff); cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) & @@ -3206,23 +3285,33 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, /** * __ice_aq_get_set_rss_lut * @hw: pointer to the hardware structure - * @vsi_id: VSI FW index - * @lut_type: LUT table type - * @lut: pointer to the LUT buffer provided by the caller - * @lut_size: size of the LUT buffer - * @glob_lut_idx: global LUT index + * @params: RSS LUT parameters * @set: set true to set the table, false to get the table * * Internal function to get (0x0B05) or set (0x0B03) RSS look up table */ static enum ice_status -__ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, - u16 lut_size, u8 glob_lut_idx, bool set) +__ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set) { + u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle; struct ice_aqc_get_set_rss_lut *cmd_resp; struct ice_aq_desc desc; enum ice_status status; - u16 flags = 0; + u8 *lut; + + if (!params) + return ICE_ERR_PARAM; + + vsi_handle = params->vsi_handle; + lut = params->lut; + + if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) + return ICE_ERR_PARAM; + + lut_size = params->lut_size; + lut_type = params->lut_type; + glob_lut_idx = params->global_lut_id; + vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); cmd_resp = &desc.params.get_set_rss_lut; @@ -3296,43 +3385,27 @@ ice_aq_get_set_rss_lut_exit: /** * ice_aq_get_rss_lut * @hw: pointer to the hardware structure - * @vsi_handle: software VSI handle - * @lut_type: LUT table type - * @lut: pointer to the LUT buffer provided by the caller - * @lut_size: size of the LUT buffer + * @get_params: RSS LUT parameters used to specify which RSS LUT to get * * get the RSS lookup table, PF or VSI type */ enum ice_status -ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, - u8 *lut, u16 lut_size) +ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) { - if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) - return ICE_ERR_PARAM; - - return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle), - lut_type, lut, lut_size, 0, false); + return __ice_aq_get_set_rss_lut(hw, get_params, false); } /** * ice_aq_set_rss_lut * @hw: pointer to the hardware structure - * @vsi_handle: software VSI handle - * @lut_type: LUT table type - * @lut: pointer to the LUT buffer provided by the caller - * @lut_size: size of the LUT buffer + * @set_params: RSS LUT parameters used to specify how to set the RSS LUT * * set the RSS lookup table, PF or VSI type */ enum ice_status -ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, - u8 *lut, u16 lut_size) +ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) { - if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) - return ICE_ERR_PARAM; - - return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle), - lut_type, lut, lut_size, 0, true); + return __ice_aq_get_set_rss_lut(hw, set_params, true); } /** @@ -4373,7 +4446,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, } /** - * ice_fw_supports_lldp_fltr - check NVM version supports lldp_fltr_ctrl + * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl * @hw: pointer to HW struct */ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) @@ -4418,3 +4491,23 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); } + +/** + * ice_fw_supports_report_dflt_cfg + * @hw: pointer to the hardware structure + * + * Checks if the firmware supports report default configuration + */ +bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) +{ + if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) { + if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN) + return true; + if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN && + hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH) + return true; + } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) { + return true; + } + return false; +} diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index baf4064fcbfe..7a9d2dfb21a2 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -11,6 +11,9 @@ #include "ice_switch.h" #include <linux/avf/virtchnl.h> +#define ICE_SQ_SEND_DELAY_TIME_MS 10 +#define ICE_SQ_SEND_MAX_EXECUTE 3 + enum ice_status ice_init_hw(struct ice_hw *hw); void ice_deinit_hw(struct ice_hw *hw); enum ice_status ice_check_reset(struct ice_hw *hw); @@ -51,11 +54,9 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, u32 rxq_index); enum ice_status -ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut, - u16 lut_size); +ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params); enum ice_status -ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut, - u16 lut_size); +ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params); enum ice_status ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys); @@ -178,4 +179,5 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw); enum ice_status ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add); +bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw); #endif /* _ICE_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index b2d8a5932b1d..87b33bdd4960 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -892,7 +892,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) * ice_sq_send_cmd - send command to Control Queue (ATQ) * @hw: pointer to the HW struct * @cq: pointer to the specific Control queue - * @desc: prefilled descriptor describing the command (non DMA mem) + * @desc: prefilled descriptor describing the command * @buf: buffer to use for indirect commands (or NULL for direct commands) * @buf_size: size of buffer for indirect commands (or 0 for direct commands) * @cd: pointer to command details structure @@ -1097,6 +1097,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_rq_event_info *e, u16 *pending) { u16 ntc = cq->rq.next_to_clean; + enum ice_aq_err rq_last_status; enum ice_status ret_code = 0; struct ice_aq_desc *desc; struct ice_dma_mem *bi; @@ -1130,13 +1131,12 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, desc = ICE_CTL_Q_DESC(cq->rq, ntc); desc_idx = ntc; - cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); + rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); flags = le16_to_cpu(desc->flags); if (flags & ICE_AQ_FLAG_ERR) { ret_code = ICE_ERR_AQ_ERROR; ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n", - le16_to_cpu(desc->opcode), - cq->rq_last_status); + le16_to_cpu(desc->opcode), rq_last_status); } memcpy(&e->desc, desc, sizeof(e->desc)); datalen = le16_to_cpu(desc->datalen); diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h index 68866f4f0eb0..fe75871e48ca 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.h +++ b/drivers/net/ethernet/intel/ice/ice_controlq.h @@ -14,8 +14,8 @@ (&(((struct ice_aq_desc *)((R).desc_buf.va))[i])) #define ICE_CTL_Q_DESC_UNUSED(R) \ - (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ - (R)->next_to_clean - (R)->next_to_use - 1) + ((u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1)) /* Defines that help manage the driver vs FW API checks. * Take a look at ice_aq_ver_check in ice_controlq.c for actual usage. @@ -83,7 +83,6 @@ struct ice_rq_event_info { /* Control Queue information */ struct ice_ctl_q_info { enum ice_ctl_q qtype; - enum ice_aq_err rq_last_status; /* last status on receive queue */ struct ice_ctl_q_ring rq; /* receive queue */ struct ice_ctl_q_ring sq; /* send queue */ u32 sq_cmd_timeout; /* send queue cmd write back timeout */ diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index 28e834a128c0..849fcf605479 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -804,7 +804,7 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_FCOE_M; ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_FCOE_S; ice_app_sel_type = ICE_APP_SEL_ETHTYPE; - ice_app_prot_id_type = ICE_APP_PROT_ID_FCOE; + ice_app_prot_id_type = ETH_P_FCOE; } else if (i == 1) { /* iSCSI APP */ ice_aqc_cee_status_mask = ICE_AQC_CEE_ISCSI_STATUS_M; @@ -812,14 +812,14 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_ISCSI_M; ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_ISCSI_S; ice_app_sel_type = ICE_APP_SEL_TCPIP; - ice_app_prot_id_type = ICE_APP_PROT_ID_ISCSI; + ice_app_prot_id_type = ISCSI_LISTEN_PORT; for (j = 0; j < cmp_dcbcfg->numapps; j++) { u16 prot_id = cmp_dcbcfg->app[j].prot_id; u8 sel = cmp_dcbcfg->app[j].selector; if (sel == ICE_APP_SEL_TCPIP && - (prot_id == ICE_APP_PROT_ID_ISCSI || + (prot_id == ISCSI_LISTEN_PORT || prot_id == ICE_APP_PROT_ID_ISCSI_860)) { ice_app_prot_id_type = prot_id; break; @@ -832,7 +832,7 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_FIP_M; ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_FIP_S; ice_app_sel_type = ICE_APP_SEL_ETHTYPE; - ice_app_prot_id_type = ICE_APP_PROT_ID_FIP; + ice_app_prot_id_type = ETH_P_FIP; } status = (tlv_status & ice_aqc_cee_status_mask) >> @@ -857,7 +857,7 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, } /** - * ice_get_ieee_dcb_cfg + * ice_get_ieee_or_cee_dcb_cfg * @pi: port information structure * @dcbx_mode: mode of DCBX (IEEE or CEE) * diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 1e8f71ffc8ce..df02cffdf209 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -563,7 +563,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked) dcbcfg->numapps = 1; dcbcfg->app[0].selector = ICE_APP_SEL_ETHTYPE; dcbcfg->app[0].priority = 3; - dcbcfg->app[0].prot_id = ICE_APP_PROT_ID_FCOE; + dcbcfg->app[0].prot_id = ETH_P_FCOE; ret = ice_pf_dcb_cfg(pf, dcbcfg, locked); kfree(dcbcfg); diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 32ba71a16165..d9ddd0bcf65f 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -60,7 +60,6 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = { ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed), ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), - ICE_VSI_STAT("rx_gro_dropped", rx_gro_dropped), ICE_VSI_STAT("tx_errors", eth_stats.tx_errors), ICE_VSI_STAT("tx_linearize", tx_linearize), ICE_VSI_STAT("tx_busy", tx_busy), @@ -807,7 +806,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test, if (eth_test->flags == ETH_TEST_FL_OFFLINE) { netdev_info(netdev, "offline testing starting\n"); - set_bit(__ICE_TESTING, pf->state); + set_bit(ICE_TESTING, pf->state); if (ice_active_vfs(pf)) { dev_warn(dev, "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n"); @@ -817,7 +816,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test, data[ICE_ETH_TEST_LOOP] = 1; data[ICE_ETH_TEST_LINK] = 1; eth_test->flags |= ETH_TEST_FL_FAILED; - clear_bit(__ICE_TESTING, pf->state); + clear_bit(ICE_TESTING, pf->state); goto skip_ol_tests; } /* If the device is online then take it offline */ @@ -838,7 +837,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test, data[ICE_ETH_TEST_REG]) eth_test->flags |= ETH_TEST_FL_FAILED; - clear_bit(__ICE_TESTING, pf->state); + clear_bit(ICE_TESTING, pf->state); if (if_running) { int status = ice_open(netdev); @@ -871,68 +870,47 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; - char *p = (char *)data; unsigned int i; + u8 *p = data; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < ICE_VSI_STATS_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", - ice_gstrings_vsi_stats[i].stat_string); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < ICE_VSI_STATS_LEN; i++) + ethtool_sprintf(&p, + ice_gstrings_vsi_stats[i].stat_string); ice_for_each_alloc_txq(vsi, i) { - snprintf(p, ETH_GSTRING_LEN, - "tx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); } ice_for_each_alloc_rxq(vsi, i) { - snprintf(p, ETH_GSTRING_LEN, - "rx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); } if (vsi->type != ICE_VSI_PF) return; - for (i = 0; i < ICE_PF_STATS_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", - ice_gstrings_pf_stats[i].stat_string); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < ICE_PF_STATS_LEN; i++) + ethtool_sprintf(&p, + ice_gstrings_pf_stats[i].stat_string); for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { - snprintf(p, ETH_GSTRING_LEN, - "tx_priority_%u_xon.nic", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, - "tx_priority_%u_xoff.nic", i); - p += ETH_GSTRING_LEN; + ethtool_sprintf(&p, "tx_priority_%u_xon.nic", i); + ethtool_sprintf(&p, "tx_priority_%u_xoff.nic", i); } for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { - snprintf(p, ETH_GSTRING_LEN, - "rx_priority_%u_xon.nic", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, - "rx_priority_%u_xoff.nic", i); - p += ETH_GSTRING_LEN; + ethtool_sprintf(&p, "rx_priority_%u_xon.nic", i); + ethtool_sprintf(&p, "rx_priority_%u_xoff.nic", i); } break; case ETH_SS_TEST: memcpy(data, ice_gstrings_test, ICE_TEST_LEN * ETH_GSTRING_LEN); break; case ETH_SS_PRIV_FLAGS: - for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", - ice_gstrings_priv_flags[i].name); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) + ethtool_sprintf(&p, ice_gstrings_priv_flags[i].name); break; default: break; @@ -1081,7 +1059,7 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) if (!caps) return -ENOMEM; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL); if (status) { err = -EAGAIN; @@ -1116,24 +1094,15 @@ static int ice_nway_reset(struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; - struct ice_port_info *pi; - enum ice_status status; + int err; - pi = vsi->port_info; /* If VSI state is up, then restart autoneg with link up */ - if (!test_bit(__ICE_DOWN, vsi->back->state)) - status = ice_aq_set_link_restart_an(pi, true, NULL); + if (!test_bit(ICE_DOWN, vsi->back->state)) + err = ice_set_link(vsi, true); else - status = ice_aq_set_link_restart_an(pi, false, NULL); + err = ice_set_link(vsi, false); - if (status) { - netdev_info(netdev, "link restart failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(pi->hw->adminq.sq_last_status)); - return -EIO; - } - - return 0; + return err; } /** @@ -1475,8 +1444,8 @@ void ice_mask_min_supported_speeds(u64 phy_types_high, u64 *phy_types_low) do { \ if (req_speeds & (aq_link_speed) || \ (!req_speeds && \ - (adv_phy_type_lo & phy_type_mask_lo || \ - adv_phy_type_hi & phy_type_mask_hi))) \ + (advert_phy_type_lo & phy_type_mask_lo || \ + advert_phy_type_hi & phy_type_mask_hi))) \ ethtool_link_ksettings_add_link_mode(ks, advertising,\ ethtool_link_mode); \ } while (0) @@ -1493,10 +1462,10 @@ ice_phy_type_to_ethtool(struct net_device *netdev, struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; + u64 advert_phy_type_lo = 0; + u64 advert_phy_type_hi = 0; u64 phy_type_mask_lo = 0; u64 phy_type_mask_hi = 0; - u64 adv_phy_type_lo = 0; - u64 adv_phy_type_hi = 0; u64 phy_types_high = 0; u64 phy_types_low = 0; u16 req_speeds; @@ -1514,28 +1483,35 @@ ice_phy_type_to_ethtool(struct net_device *netdev, * requested by user. */ if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) { - struct ice_link_default_override_tlv *ldo; - - ldo = &pf->link_dflt_override; phy_types_low = le64_to_cpu(pf->nvm_phy_type_lo); phy_types_high = le64_to_cpu(pf->nvm_phy_type_hi); ice_mask_min_supported_speeds(phy_types_high, &phy_types_low); - - /* If override enabled and PHY mask set, then - * Advertising link mode is the intersection of the PHY - * types without media and the override PHY mask. + /* determine advertised modes based on link override only + * if it's supported and if the FW doesn't abstract the + * driver from having to account for link overrides */ - if (ldo->options & ICE_LINK_OVERRIDE_EN && - (ldo->phy_type_low || ldo->phy_type_high)) { - adv_phy_type_lo = - le64_to_cpu(pf->nvm_phy_type_lo) & - ldo->phy_type_low; - adv_phy_type_hi = - le64_to_cpu(pf->nvm_phy_type_hi) & - ldo->phy_type_high; + if (ice_fw_supports_link_override(&pf->hw) && + !ice_fw_supports_report_dflt_cfg(&pf->hw)) { + struct ice_link_default_override_tlv *ldo; + + ldo = &pf->link_dflt_override; + /* If override enabled and PHY mask set, then + * Advertising link mode is the intersection of the PHY + * types without media and the override PHY mask. + */ + if (ldo->options & ICE_LINK_OVERRIDE_EN && + (ldo->phy_type_low || ldo->phy_type_high)) { + advert_phy_type_lo = + le64_to_cpu(pf->nvm_phy_type_lo) & + ldo->phy_type_low; + advert_phy_type_hi = + le64_to_cpu(pf->nvm_phy_type_hi) & + ldo->phy_type_high; + } } } else { + /* strict mode */ phy_types_low = vsi->port_info->phy.phy_type_low; phy_types_high = vsi->port_info->phy.phy_type_high; } @@ -1543,9 +1519,9 @@ ice_phy_type_to_ethtool(struct net_device *netdev, /* If Advertising link mode PHY type is not using override PHY type, * then use PHY type with media. */ - if (!adv_phy_type_lo && !adv_phy_type_hi) { - adv_phy_type_lo = vsi->port_info->phy.phy_type_low; - adv_phy_type_hi = vsi->port_info->phy.phy_type_high; + if (!advert_phy_type_lo && !advert_phy_type_hi) { + advert_phy_type_lo = vsi->port_info->phy.phy_type_low; + advert_phy_type_hi = vsi->port_info->phy.phy_type_high; } ethtool_link_ksettings_zero_link_mode(ks, supported); @@ -2021,7 +1997,7 @@ ice_get_link_ksettings(struct net_device *netdev, return -ENOMEM; status = ice_aq_get_phy_caps(vsi->port_info, false, - ICE_AQC_REPORT_SW_CFG, caps, NULL); + ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); if (status) { err = -EIO; goto done; @@ -2058,7 +2034,7 @@ ice_get_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); status = ice_aq_get_phy_caps(vsi->port_info, false, - ICE_AQC_REPORT_TOPO_CAP, caps, NULL); + ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL); if (status) { err = -EIO; goto done; @@ -2225,13 +2201,14 @@ ice_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *ks) { struct ice_netdev_priv *np = netdev_priv(netdev); - struct ethtool_link_ksettings safe_ks, copy_ks; - struct ice_aqc_get_phy_caps_data *abilities; u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT; - u16 adv_link_speed, curr_link_speed, idx; + struct ethtool_link_ksettings copy_ks = *ks; + struct ethtool_link_ksettings safe_ks = {}; + struct ice_aqc_get_phy_caps_data *phy_caps; struct ice_aqc_set_phy_cfg_data config; + u16 adv_link_speed, curr_link_speed; struct ice_pf *pf = np->vsi->back; - struct ice_port_info *p; + struct ice_port_info *pi; u8 autoneg_changed = 0; enum ice_status status; u64 phy_type_high = 0; @@ -2239,46 +2216,37 @@ ice_set_link_ksettings(struct net_device *netdev, int err = 0; bool linkup; - p = np->vsi->port_info; - - if (!p) - return -EOPNOTSUPP; + pi = np->vsi->port_info; - /* Check if this is LAN VSI */ - ice_for_each_vsi(pf, idx) - if (pf->vsi[idx]->type == ICE_VSI_PF) { - if (np->vsi != pf->vsi[idx]) - return -EOPNOTSUPP; - break; - } + if (!pi) + return -EIO; - if (p->phy.media_type != ICE_MEDIA_BASET && - p->phy.media_type != ICE_MEDIA_FIBER && - p->phy.media_type != ICE_MEDIA_BACKPLANE && - p->phy.media_type != ICE_MEDIA_DA && - p->phy.link_info.link_info & ICE_AQ_LINK_UP) + if (pi->phy.media_type != ICE_MEDIA_BASET && + pi->phy.media_type != ICE_MEDIA_FIBER && + pi->phy.media_type != ICE_MEDIA_BACKPLANE && + pi->phy.media_type != ICE_MEDIA_DA && + pi->phy.link_info.link_info & ICE_AQ_LINK_UP) return -EOPNOTSUPP; - abilities = kzalloc(sizeof(*abilities), GFP_KERNEL); - if (!abilities) + phy_caps = kzalloc(sizeof(*phy_caps), GFP_KERNEL); + if (!phy_caps) return -ENOMEM; /* Get the PHY capabilities based on media */ - status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_TOPO_CAP, - abilities, NULL); + if (ice_fw_supports_report_dflt_cfg(pi->hw)) + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, + phy_caps, NULL); + else + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + phy_caps, NULL); if (status) { - err = -EAGAIN; + err = -EIO; goto done; } - /* copy the ksettings to copy_ks to avoid modifying the original */ - memcpy(©_ks, ks, sizeof(copy_ks)); - /* save autoneg out of ksettings */ autoneg = copy_ks.base.autoneg; - memset(&safe_ks, 0, sizeof(safe_ks)); - /* Get link modes supported by hardware.*/ ice_phy_type_to_ethtool(netdev, &safe_ks); @@ -2290,7 +2258,7 @@ ice_set_link_ksettings(struct net_device *netdev, __ETHTOOL_LINK_MODE_MASK_NBITS)) { if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); - err = -EINVAL; + err = -EOPNOTSUPP; goto done; } @@ -2314,7 +2282,7 @@ ice_set_link_ksettings(struct net_device *netdev, goto done; } - while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { + while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { timeout--; if (!timeout) { err = -EBUSY; @@ -2327,26 +2295,26 @@ ice_set_link_ksettings(struct net_device *netdev, * configuration is initialized during probe from PHY capabilities * software mode, and updated on set PHY configuration. */ - memcpy(&config, &p->phy.curr_user_phy_cfg, sizeof(config)); + config = pi->phy.curr_user_phy_cfg; config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; /* Check autoneg */ - err = ice_setup_autoneg(p, &safe_ks, &config, autoneg, &autoneg_changed, + err = ice_setup_autoneg(pi, &safe_ks, &config, autoneg, &autoneg_changed, netdev); if (err) goto done; /* Call to get the current link speed */ - p->phy.get_link_info = true; - status = ice_get_link_status(p, &linkup); + pi->phy.get_link_info = true; + status = ice_get_link_status(pi, &linkup); if (status) { - err = -EAGAIN; + err = -EIO; goto done; } - curr_link_speed = p->phy.link_info.link_speed; + curr_link_speed = pi->phy.link_info.link_speed; adv_link_speed = ice_ksettings_find_adv_link_speed(ks); /* If speed didn't get set, set it to what it currently is. @@ -2365,7 +2333,7 @@ ice_set_link_ksettings(struct net_device *netdev, } /* save the requested speeds */ - p->phy.link_info.req_speeds = adv_link_speed; + pi->phy.link_info.req_speeds = adv_link_speed; /* set link and auto negotiation so changes take effect */ config.caps |= ICE_AQ_PHY_ENA_LINK; @@ -2373,7 +2341,7 @@ ice_set_link_ksettings(struct net_device *netdev, /* check if there is a PHY type for the requested advertised speed */ if (!(phy_type_low || phy_type_high)) { netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); - err = -EAGAIN; + err = -EOPNOTSUPP; goto done; } @@ -2381,9 +2349,9 @@ ice_set_link_ksettings(struct net_device *netdev, * for set PHY configuration */ config.phy_type_high = cpu_to_le64(phy_type_high) & - abilities->phy_type_high; + phy_caps->phy_type_high; config.phy_type_low = cpu_to_le64(phy_type_low) & - abilities->phy_type_low; + phy_caps->phy_type_low; if (!(config.phy_type_high || config.phy_type_low)) { /* If there is no intersection and lenient mode is enabled, then @@ -2397,13 +2365,13 @@ ice_set_link_ksettings(struct net_device *netdev, pf->nvm_phy_type_lo; } else { netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); - err = -EAGAIN; + err = -EOPNOTSUPP; goto done; } } /* If link is up put link down */ - if (p->phy.link_info.link_info & ICE_AQ_LINK_UP) { + if (pi->phy.link_info.link_info & ICE_AQ_LINK_UP) { /* Tell the OS link is going down, the link will go * back up when fw says it is ready asynchronously */ @@ -2413,18 +2381,18 @@ ice_set_link_ksettings(struct net_device *netdev, } /* make the aq call */ - status = ice_aq_set_phy_cfg(&pf->hw, p, &config, NULL); + status = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL); if (status) { netdev_info(netdev, "Set phy config failed,\n"); - err = -EAGAIN; + err = -EIO; goto done; } /* Save speed request */ - p->phy.curr_user_speed_req = adv_link_speed; + pi->phy.curr_user_speed_req = adv_link_speed; done: - kfree(abilities); - clear_bit(__ICE_CFG_BUSY, pf->state); + kfree(phy_caps); + clear_bit(ICE_CFG_BUSY, pf->state); return err; } @@ -2780,7 +2748,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) if (ice_xsk_any_rx_ring_ena(vsi)) return -EBUSY; - while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { + while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { timeout--; if (!timeout) return -EBUSY; @@ -2907,7 +2875,7 @@ process_link: /* Bring interface down, copy in the new ring info, then restore the * interface. if VSI is up, bring it down and then back up */ - if (!test_and_set_bit(__ICE_DOWN, vsi->state)) { + if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { ice_down(vsi); if (tx_rings) { @@ -2959,7 +2927,7 @@ free_tx: } done: - clear_bit(__ICE_CFG_BUSY, pf->state); + clear_bit(ICE_CFG_BUSY, pf->state); return err; } @@ -2993,7 +2961,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) return; /* Get current PHY config */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, NULL); if (status) goto out; @@ -3060,7 +3028,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) return -ENOMEM; /* Get current PHY config */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, NULL); if (status) { kfree(pcaps); @@ -3078,7 +3046,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) } /* If we have link and don't have autoneg */ - if (!test_bit(__ICE_DOWN, pf->state) && + if (!test_bit(ICE_DOWN, pf->state) && !(hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) { /* Send message that it might not necessarily work*/ netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n"); @@ -3161,7 +3129,7 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; - int ret = 0, i; + int err, i; u8 *lut; if (hfunc) @@ -3180,17 +3148,20 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) if (!lut) return -ENOMEM; - if (ice_get_rss(vsi, key, lut, vsi->rss_table_size)) { - ret = -EIO; + err = ice_get_rss_key(vsi, key); + if (err) + goto out; + + err = ice_get_rss_lut(vsi, lut, vsi->rss_table_size); + if (err) goto out; - } for (i = 0; i < vsi->rss_table_size; i++) indir[i] = (u32)(lut[i]); out: kfree(lut); - return ret; + return err; } /** @@ -3211,7 +3182,7 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct device *dev; - u8 *seed = NULL; + int err; dev = ice_pf_to_dev(pf); if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) @@ -3232,7 +3203,10 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, return -ENOMEM; } memcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE); - seed = vsi->rss_hkey_user; + + err = ice_set_rss_key(vsi, vsi->rss_hkey_user); + if (err) + return err; } if (!vsi->rss_lut_user) { @@ -3253,8 +3227,9 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, vsi->rss_size); } - if (ice_set_rss(vsi, seed, vsi->rss_lut_user, vsi->rss_table_size)) - return -EIO; + err = ice_set_rss_lut(vsi, vsi->rss_lut_user, vsi->rss_table_size); + if (err) + return err; return 0; } @@ -3350,10 +3325,9 @@ static int ice_get_valid_rss_size(struct ice_hw *hw, int new_size) static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size) { struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; struct ice_hw *hw; - int err = 0; + int err; u8 *lut; dev = ice_pf_to_dev(pf); @@ -3374,14 +3348,10 @@ static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size) /* create/set RSS LUT */ ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); - status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, lut, - vsi->rss_table_size); - if (status) { - dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n", - ice_stat_str(status), + err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size); + if (err) + dev_err(dev, "Cannot set RSS lut, err %d aq_err %s\n", err, ice_aq_str(hw->adminq.sq_last_status)); - err = -EIO; - } kfree(lut); return err; @@ -3540,13 +3510,13 @@ ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type, switch (c_type) { case ICE_RX_CONTAINER: - ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting); - ec->rx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC; + ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc); + ec->rx_coalesce_usecs = rc->itr_setting; ec->rx_coalesce_usecs_high = rc->ring->q_vector->intrl; break; case ICE_TX_CONTAINER: - ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting); - ec->tx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC; + ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc); + ec->tx_coalesce_usecs = rc->itr_setting; break; default: dev_dbg(ice_pf_to_dev(pf), "Invalid c_type %d\n", c_type); @@ -3664,11 +3634,16 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, ICE_MAX_INTRL); return -EINVAL; } + if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl && + (ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce)) { + netdev_info(vsi->netdev, "Invalid value, %s-usecs-high cannot be changed if adaptive-tx or adaptive-rx is enabled\n", + c_type_str); + return -EINVAL; + } if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) { rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high; - wr32(&pf->hw, GLINT_RATE(rc->ring->q_vector->reg_idx), - ice_intrl_usec_to_reg(ec->rx_coalesce_usecs_high, - pf->hw.intrl_gran)); + ice_write_intrl(rc->ring->q_vector, + ec->rx_coalesce_usecs_high); } use_adaptive_coalesce = ec->use_adaptive_rx_coalesce; @@ -3686,7 +3661,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, return -EINVAL; } - itr_setting = rc->itr_setting & ~ICE_ITR_DYNAMIC; + itr_setting = rc->itr_setting; if (coalesce_usecs != itr_setting && use_adaptive_coalesce) { netdev_info(vsi->netdev, "%s interrupt throttling cannot be changed if adaptive-%s is enabled\n", c_type_str, c_type_str); @@ -3700,12 +3675,18 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, } if (use_adaptive_coalesce) { - rc->itr_setting |= ICE_ITR_DYNAMIC; + rc->itr_mode = ITR_DYNAMIC; } else { - /* save the user set usecs */ + rc->itr_mode = ITR_STATIC; + /* store user facing value how it was set */ rc->itr_setting = coalesce_usecs; - /* device ITR granularity is in 2 usec increments */ - rc->target_itr = ITR_REG_ALIGN(rc->itr_setting); + /* write the change to the register */ + ice_write_itr(rc, coalesce_usecs); + /* force writes to take effect immediately, the flush shouldn't + * be done in the functions above because the intent is for + * them to do lazy writes. + */ + ice_flush(&pf->hw); } return 0; @@ -3767,8 +3748,6 @@ ice_print_if_odd_usecs(struct net_device *netdev, u16 itr_setting, if (use_adaptive_coalesce) return; - itr_setting = ITR_TO_REG(itr_setting); - if (itr_setting != coalesce_usecs && (coalesce_usecs % 2)) netdev_info(netdev, "User set %s-usecs to %d, device only supports even values. Rounding down and attempting to set %s-usecs to %d\n", c_type_str, coalesce_usecs, c_type_str, @@ -3823,7 +3802,6 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, return -EINVAL; set_complete: - return 0; } @@ -3936,30 +3914,33 @@ ice_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { struct ice_netdev_priv *np = netdev_priv(netdev); +#define SFF_READ_BLOCK_SIZE 8 + u8 value[SFF_READ_BLOCK_SIZE] = { 0 }; u8 addr = ICE_I2C_EEPROM_DEV_ADDR; struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; enum ice_status status; bool is_sfp = false; - unsigned int i; + unsigned int i, j; u16 offset = 0; - u8 value = 0; u8 page = 0; - status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, - &value, 1, 0, NULL); - if (status) - return -EIO; - if (!ee || !ee->len || !data) return -EINVAL; - if (value == ICE_MODULE_TYPE_SFP) + status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, value, 1, 0, + NULL); + if (status) + return -EIO; + + if (value[0] == ICE_MODULE_TYPE_SFP) is_sfp = true; - for (i = 0; i < ee->len; i++) { + memset(data, 0, ee->len); + for (i = 0; i < ee->len; i += SFF_READ_BLOCK_SIZE) { offset = i + ee->offset; + page = 0; /* Check if we need to access the other memory page */ if (is_sfp) { @@ -3975,11 +3956,37 @@ ice_get_module_eeprom(struct net_device *netdev, } } - status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, !is_sfp, - &value, 1, 0, NULL); - if (status) - value = 0; - data[i] = value; + /* Bit 2 of EEPROM address 0x02 declares upper + * pages are disabled on QSFP modules. + * SFP modules only ever use page 0. + */ + if (page == 0 || !(data[0x2] & 0x4)) { + /* If i2c bus is busy due to slow page change or + * link management access, call can fail. This is normal. + * So we retry this a few times. + */ + for (j = 0; j < 4; j++) { + status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, + !is_sfp, value, + SFF_READ_BLOCK_SIZE, + 0, NULL); + netdev_dbg(netdev, "SFF %02X %02X %02X %X = %02X%02X%02X%02X.%02X%02X%02X%02X (%X)\n", + addr, offset, page, is_sfp, + value[0], value[1], value[2], value[3], + value[4], value[5], value[6], value[7], + status); + if (status) { + usleep_range(1500, 2500); + memset(value, 0, SFF_READ_BLOCK_SIZE); + continue; + } + break; + } + + /* Make sure we have enough room for the new block */ + if ((i + SFF_READ_BLOCK_SIZE) < ee->len) + memcpy(data + i, value, SFF_READ_BLOCK_SIZE); + } } return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index 192729546bbf..16de603b280c 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -1452,7 +1452,7 @@ int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) return -EBUSY; } - if (test_bit(__ICE_FD_FLUSH_REQ, pf->state)) + if (test_bit(ICE_FD_FLUSH_REQ, pf->state)) return -EBUSY; mutex_lock(&hw->fdir_fltr_lock); @@ -1679,6 +1679,10 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) input->flex_offset = userdata.flex_offset; } + input->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS; + input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE; + input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL; + /* input struct is added to the HW filter list */ ice_fdir_update_list_entry(pf, input, fsp->location); diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c index 59c0c6a0f8c5..59ef68f072c0 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_fdir.c @@ -40,6 +40,204 @@ static const u8 ice_fdir_ipv4_pkt[] = { 0x00, 0x00 }; +static const u8 ice_fdir_udp4_gtpu4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x4c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00, + 0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x1c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp4_gtpu4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x58, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00, + 0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x28, 0x00, 0x00, 0x40, 0x00, 0x40, 0x06, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_icmp4_gtpu4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x4c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00, + 0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x1c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv4_gtpu4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x44, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00, + 0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv4_l2tpv3_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x73, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv6_l2tpv3_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x73, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv4_esp_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x32, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00 +}; + +static const u8 ice_fdir_ipv6_esp_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x32, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv4_ah_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x33, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00 +}; + +static const u8 ice_fdir_ipv6_ah_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x33, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv4_nat_t_esp_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x1C, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x11, 0x94, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv6_nat_t_esp_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x08, 0x11, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x11, 0x94, 0x00, 0x00, 0x00, 0x08, +}; + +static const u8 ice_fdir_ipv4_pfcp_node_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x2C, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x22, 0x65, 0x22, 0x65, 0x00, 0x00, + 0x00, 0x00, 0x20, 0x00, 0x00, 0x10, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv4_pfcp_session_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x2C, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x22, 0x65, 0x22, 0x65, 0x00, 0x00, + 0x00, 0x00, 0x21, 0x00, 0x00, 0x10, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv6_pfcp_node_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x18, 0x11, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x65, + 0x22, 0x65, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, + 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv6_pfcp_session_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x18, 0x11, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x65, + 0x22, 0x65, 0x00, 0x00, 0x00, 0x00, 0x21, 0x00, + 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_non_ip_l2_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + static const u8 ice_fdir_tcpv6_pkt[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, @@ -239,6 +437,111 @@ static const struct ice_fdir_base_pkt ice_fdir_pkt[] = { sizeof(ice_fdir_ip4_tun_pkt), ice_fdir_ip4_tun_pkt, }, { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP, + sizeof(ice_fdir_udp4_gtpu4_pkt), + ice_fdir_udp4_gtpu4_pkt, + sizeof(ice_fdir_udp4_gtpu4_pkt), + ice_fdir_udp4_gtpu4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP, + sizeof(ice_fdir_tcp4_gtpu4_pkt), + ice_fdir_tcp4_gtpu4_pkt, + sizeof(ice_fdir_tcp4_gtpu4_pkt), + ice_fdir_tcp4_gtpu4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP, + sizeof(ice_fdir_icmp4_gtpu4_pkt), + ice_fdir_icmp4_gtpu4_pkt, + sizeof(ice_fdir_icmp4_gtpu4_pkt), + ice_fdir_icmp4_gtpu4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER, + sizeof(ice_fdir_ipv4_gtpu4_pkt), + ice_fdir_ipv4_gtpu4_pkt, + sizeof(ice_fdir_ipv4_gtpu4_pkt), + ice_fdir_ipv4_gtpu4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3, + sizeof(ice_fdir_ipv4_l2tpv3_pkt), ice_fdir_ipv4_l2tpv3_pkt, + sizeof(ice_fdir_ipv4_l2tpv3_pkt), ice_fdir_ipv4_l2tpv3_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3, + sizeof(ice_fdir_ipv6_l2tpv3_pkt), ice_fdir_ipv6_l2tpv3_pkt, + sizeof(ice_fdir_ipv6_l2tpv3_pkt), ice_fdir_ipv6_l2tpv3_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_ESP, + sizeof(ice_fdir_ipv4_esp_pkt), ice_fdir_ipv4_esp_pkt, + sizeof(ice_fdir_ipv4_esp_pkt), ice_fdir_ipv4_esp_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_ESP, + sizeof(ice_fdir_ipv6_esp_pkt), ice_fdir_ipv6_esp_pkt, + sizeof(ice_fdir_ipv6_esp_pkt), ice_fdir_ipv6_esp_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_AH, + sizeof(ice_fdir_ipv4_ah_pkt), ice_fdir_ipv4_ah_pkt, + sizeof(ice_fdir_ipv4_ah_pkt), ice_fdir_ipv4_ah_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_AH, + sizeof(ice_fdir_ipv6_ah_pkt), ice_fdir_ipv6_ah_pkt, + sizeof(ice_fdir_ipv6_ah_pkt), ice_fdir_ipv6_ah_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP, + sizeof(ice_fdir_ipv4_nat_t_esp_pkt), + ice_fdir_ipv4_nat_t_esp_pkt, + sizeof(ice_fdir_ipv4_nat_t_esp_pkt), + ice_fdir_ipv4_nat_t_esp_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP, + sizeof(ice_fdir_ipv6_nat_t_esp_pkt), + ice_fdir_ipv6_nat_t_esp_pkt, + sizeof(ice_fdir_ipv6_nat_t_esp_pkt), + ice_fdir_ipv6_nat_t_esp_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE, + sizeof(ice_fdir_ipv4_pfcp_node_pkt), + ice_fdir_ipv4_pfcp_node_pkt, + sizeof(ice_fdir_ipv4_pfcp_node_pkt), + ice_fdir_ipv4_pfcp_node_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION, + sizeof(ice_fdir_ipv4_pfcp_session_pkt), + ice_fdir_ipv4_pfcp_session_pkt, + sizeof(ice_fdir_ipv4_pfcp_session_pkt), + ice_fdir_ipv4_pfcp_session_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE, + sizeof(ice_fdir_ipv6_pfcp_node_pkt), + ice_fdir_ipv6_pfcp_node_pkt, + sizeof(ice_fdir_ipv6_pfcp_node_pkt), + ice_fdir_ipv6_pfcp_node_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION, + sizeof(ice_fdir_ipv6_pfcp_session_pkt), + ice_fdir_ipv6_pfcp_session_pkt, + sizeof(ice_fdir_ipv6_pfcp_session_pkt), + ice_fdir_ipv6_pfcp_session_pkt, + }, + { + ICE_FLTR_PTYPE_NON_IP_L2, + sizeof(ice_fdir_non_ip_l2_pkt), ice_fdir_non_ip_l2_pkt, + sizeof(ice_fdir_non_ip_l2_pkt), ice_fdir_non_ip_l2_pkt, + }, + { ICE_FLTR_PTYPE_NONF_IPV6_TCP, sizeof(ice_fdir_tcpv6_pkt), ice_fdir_tcpv6_pkt, sizeof(ice_fdir_tcp6_tun_pkt), ice_fdir_tcp6_tun_pkt, @@ -374,21 +677,31 @@ ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input, if (input->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) { fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_YES; fdir_fltr_ctx.qindex = 0; + } else if (input->dest_ctl == + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER) { + fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_NO; + fdir_fltr_ctx.qindex = 0; } else { + if (input->dest_ctl == + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP) + fdir_fltr_ctx.toq = input->q_region; fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_NO; fdir_fltr_ctx.qindex = input->q_index; } - fdir_fltr_ctx.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS; + fdir_fltr_ctx.cnt_ena = input->cnt_ena; fdir_fltr_ctx.cnt_index = input->cnt_index; fdir_fltr_ctx.fd_vsi = ice_get_hw_vsi_num(hw, input->dest_vsi); fdir_fltr_ctx.evict_ena = ICE_FXD_FLTR_QW0_EVICT_ENA_FALSE; - fdir_fltr_ctx.toq_prio = 3; + if (input->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER) + fdir_fltr_ctx.toq_prio = 0; + else + fdir_fltr_ctx.toq_prio = 3; fdir_fltr_ctx.pcmd = add ? ICE_FXD_FLTR_QW1_PCMD_ADD : ICE_FXD_FLTR_QW1_PCMD_REMOVE; fdir_fltr_ctx.swap = ICE_FXD_FLTR_QW1_SWAP_NOT_SET; fdir_fltr_ctx.comp_q = ICE_FXD_FLTR_QW0_COMP_Q_ZERO; - fdir_fltr_ctx.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL; - fdir_fltr_ctx.fdid_prio = 3; + fdir_fltr_ctx.comp_report = input->comp_report; + fdir_fltr_ctx.fdid_prio = input->fdid_prio; fdir_fltr_ctx.desc_prof = 1; fdir_fltr_ctx.desc_prof_prio = 3; ice_set_fd_desc_val(&fdir_fltr_ctx, fdesc); @@ -471,6 +784,55 @@ static void ice_pkt_insert_ipv6_addr(u8 *pkt, int offset, __be32 *addr) } /** + * ice_pkt_insert_u6_qfi - insert a u6 value QFI into a memory buffer for GTPU + * @pkt: packet buffer + * @offset: offset into buffer + * @data: 8 bit value to convert and insert into pkt at offset + * + * This function is designed for inserting QFI (6 bits) for GTPU. + */ +static void ice_pkt_insert_u6_qfi(u8 *pkt, int offset, u8 data) +{ + u8 ret; + + ret = (data & 0x3F) + (*(pkt + offset) & 0xC0); + memcpy(pkt + offset, &ret, sizeof(ret)); +} + +/** + * ice_pkt_insert_u8 - insert a u8 value into a memory buffer. + * @pkt: packet buffer + * @offset: offset into buffer + * @data: 8 bit value to convert and insert into pkt at offset + */ +static void ice_pkt_insert_u8(u8 *pkt, int offset, u8 data) +{ + memcpy(pkt + offset, &data, sizeof(data)); +} + +/** + * ice_pkt_insert_u8_tc - insert a u8 value into a memory buffer for TC ipv6. + * @pkt: packet buffer + * @offset: offset into buffer + * @data: 8 bit value to convert and insert into pkt at offset + * + * This function is designed for inserting Traffic Class (TC) for IPv6, + * since that TC is not aligned in number of bytes. Here we split it out + * into two part and fill each byte with data copy from pkt, then insert + * the two bytes data one by one. + */ +static void ice_pkt_insert_u8_tc(u8 *pkt, int offset, u8 data) +{ + u8 high, low; + + high = (data >> 4) + (*(pkt + offset) & 0xF0); + memcpy(pkt + offset, &high, sizeof(high)); + + low = (*(pkt + offset + 1) & 0x0F) + ((data & 0x0F) << 4); + memcpy(pkt + offset + 1, &low, sizeof(low)); +} + +/** * ice_pkt_insert_u16 - insert a be16 value into a memory buffer * @pkt: packet buffer * @offset: offset into buffer @@ -493,6 +855,16 @@ static void ice_pkt_insert_u32(u8 *pkt, int offset, __be32 data) } /** + * ice_pkt_insert_mac_addr - insert a MAC addr into a memory buffer. + * @pkt: packet buffer + * @addr: MAC address to convert and insert into pkt at offset + */ +static void ice_pkt_insert_mac_addr(u8 *pkt, u8 *addr) +{ + ether_addr_copy(pkt, addr); +} + +/** * ice_fdir_get_gen_prgm_pkt - generate a training packet * @hw: pointer to the hardware structure * @input: flow director filter data structure @@ -520,11 +892,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, case IPPROTO_SCTP: flow = ICE_FLTR_PTYPE_NONF_IPV4_SCTP; break; - case IPPROTO_IP: + default: flow = ICE_FLTR_PTYPE_NONF_IPV4_OTHER; break; - default: - return ICE_ERR_PARAM; } } else if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) { switch (input->ip.v6.proto) { @@ -537,11 +907,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, case IPPROTO_SCTP: flow = ICE_FLTR_PTYPE_NONF_IPV6_SCTP; break; - case IPPROTO_IP: + default: flow = ICE_FLTR_PTYPE_NONF_IPV6_OTHER; break; - default: - return ICE_ERR_PARAM; } } else { flow = input->flow_type; @@ -580,6 +948,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, input->ip.v4.dst_ip); ice_pkt_insert_u16(loc, ICE_IPV4_TCP_SRC_PORT_OFFSET, input->ip.v4.dst_port); + ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos); + ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); if (frag) loc[20] = ICE_FDIR_IPV4_PKT_FLAG_DF; break; @@ -592,6 +963,11 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, input->ip.v4.dst_ip); ice_pkt_insert_u16(loc, ICE_IPV4_UDP_SRC_PORT_OFFSET, input->ip.v4.dst_port); + ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos); + ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); + ice_pkt_insert_mac_addr(loc + ETH_ALEN, + input->ext_data.src_mac); break; case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, @@ -602,13 +978,87 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, input->ip.v4.dst_ip); ice_pkt_insert_u16(loc, ICE_IPV4_SCTP_SRC_PORT_OFFSET, input->ip.v4.dst_port); + ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos); + ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); break; case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, input->ip.v4.src_ip); ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, input->ip.v4.dst_ip); - ice_pkt_insert_u16(loc, ICE_IPV4_PROTO_OFFSET, 0); + ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos); + ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl); + ice_pkt_insert_u8(loc, ICE_IPV4_PROTO_OFFSET, + input->ip.v4.proto); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER: + ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u32(loc, ICE_IPV4_GTPU_TEID_OFFSET, + input->gtpu_data.teid); + ice_pkt_insert_u6_qfi(loc, ICE_IPV4_GTPU_QFI_OFFSET, + input->gtpu_data.qfi); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3: + ice_pkt_insert_u32(loc, ICE_IPV4_L2TPV3_SESS_ID_OFFSET, + input->l2tpv3_data.session_id); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3: + ice_pkt_insert_u32(loc, ICE_IPV6_L2TPV3_SESS_ID_OFFSET, + input->l2tpv3_data.session_id); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_ESP: + ice_pkt_insert_u32(loc, ICE_IPV4_ESP_SPI_OFFSET, + input->ip.v4.sec_parm_idx); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_ESP: + ice_pkt_insert_u32(loc, ICE_IPV6_ESP_SPI_OFFSET, + input->ip.v6.sec_parm_idx); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_AH: + ice_pkt_insert_u32(loc, ICE_IPV4_AH_SPI_OFFSET, + input->ip.v4.sec_parm_idx); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_AH: + ice_pkt_insert_u32(loc, ICE_IPV6_AH_SPI_OFFSET, + input->ip.v6.sec_parm_idx); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP: + ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u32(loc, ICE_IPV4_NAT_T_ESP_SPI_OFFSET, + input->ip.v4.sec_parm_idx); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP: + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, + input->ip.v6.src_ip); + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET, + input->ip.v6.dst_ip); + ice_pkt_insert_u32(loc, ICE_IPV6_NAT_T_ESP_SPI_OFFSET, + input->ip.v6.sec_parm_idx); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE: + case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION: + ice_pkt_insert_u16(loc, ICE_IPV4_UDP_SRC_PORT_OFFSET, + input->ip.v4.dst_port); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE: + case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION: + ice_pkt_insert_u16(loc, ICE_IPV6_UDP_SRC_PORT_OFFSET, + input->ip.v6.dst_port); + break; + case ICE_FLTR_PTYPE_NON_IP_L2: + ice_pkt_insert_u16(loc, ICE_MAC_ETHTYPE_OFFSET, + input->ext_data.ether_type); break; case ICE_FLTR_PTYPE_NONF_IPV6_TCP: ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, @@ -619,6 +1069,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, input->ip.v6.src_port); ice_pkt_insert_u16(loc, ICE_IPV6_TCP_SRC_PORT_OFFSET, input->ip.v6.dst_port); + ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc); + ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); break; case ICE_FLTR_PTYPE_NONF_IPV6_UDP: ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, @@ -629,6 +1082,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, input->ip.v6.src_port); ice_pkt_insert_u16(loc, ICE_IPV6_UDP_SRC_PORT_OFFSET, input->ip.v6.dst_port); + ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc); + ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); break; case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, @@ -639,12 +1095,20 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, input->ip.v6.src_port); ice_pkt_insert_u16(loc, ICE_IPV6_SCTP_SRC_PORT_OFFSET, input->ip.v6.dst_port); + ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc); + ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); break; case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, input->ip.v6.src_ip); ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET, input->ip.v6.dst_ip); + ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc); + ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim); + ice_pkt_insert_u8(loc, ICE_IPV6_PROTO_OFFSET, + input->ip.v6.proto); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); break; default: return ICE_ERR_PARAM; @@ -671,7 +1135,7 @@ bool ice_fdir_has_frag(enum ice_fltr_ptype flow) } /** - * ice_fdir_find_by_idx - find filter with idx + * ice_fdir_find_fltr_by_idx - find filter with idx * @hw: pointer to hardware structure * @fltr_idx: index to find. * diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h index 1c587766daab..d2d40e18ae8a 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.h +++ b/drivers/net/ethernet/intel/ice/ice_fdir.h @@ -25,6 +25,25 @@ #define ICE_IPV6_UDP_DST_PORT_OFFSET 56 #define ICE_IPV6_SCTP_SRC_PORT_OFFSET 54 #define ICE_IPV6_SCTP_DST_PORT_OFFSET 56 +#define ICE_MAC_ETHTYPE_OFFSET 12 +#define ICE_IPV4_TOS_OFFSET 15 +#define ICE_IPV4_TTL_OFFSET 22 +#define ICE_IPV6_TC_OFFSET 14 +#define ICE_IPV6_HLIM_OFFSET 21 +#define ICE_IPV6_PROTO_OFFSET 20 +#define ICE_IPV4_GTPU_TEID_OFFSET 46 +#define ICE_IPV4_GTPU_QFI_OFFSET 56 +#define ICE_IPV4_L2TPV3_SESS_ID_OFFSET 34 +#define ICE_IPV6_L2TPV3_SESS_ID_OFFSET 54 +#define ICE_IPV4_ESP_SPI_OFFSET 34 +#define ICE_IPV6_ESP_SPI_OFFSET 54 +#define ICE_IPV4_AH_SPI_OFFSET 38 +#define ICE_IPV6_AH_SPI_OFFSET 58 +#define ICE_IPV4_NAT_T_ESP_SPI_OFFSET 42 +#define ICE_IPV6_NAT_T_ESP_SPI_OFFSET 62 + +#define ICE_FDIR_MAX_FLTRS 16384 + /* IP v4 has 2 flag bits that enable fragment processing: DF and MF. DF * requests that the packet not be fragmented. MF indicates that a packet has * been fragmented. @@ -34,6 +53,8 @@ enum ice_fltr_prgm_desc_dest { ICE_FLTR_PRGM_DESC_DEST_DROP_PKT, ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX, + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP, + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER, }; enum ice_fltr_prgm_desc_fd_status { @@ -86,6 +107,7 @@ struct ice_fdir_v4 { u8 tos; u8 ip_ver; u8 proto; + u8 ttl; }; #define ICE_IPV6_ADDR_LEN_AS_U32 4 @@ -99,10 +121,35 @@ struct ice_fdir_v6 { __be32 sec_parm_idx; /* security parameter index */ u8 tc; u8 proto; + u8 hlim; +}; + +struct ice_fdir_udp_gtp { + u8 flags; + u8 msg_type; + __be16 rsrvd_len; + __be32 teid; + __be16 rsrvd_seq_nbr; + u8 rsrvd_n_pdu_nbr; + u8 rsrvd_next_ext_type; + u8 rsvrd_ext_len; + u8 pdu_type:4, + spare:4; + u8 ppp:1, + rqi:1, + qfi:6; + u32 rsvrd; + u8 next_ext; +}; + +struct ice_fdir_l2tpv3 { + __be32 session_id; }; struct ice_fdir_extra { u8 dst_mac[ETH_ALEN]; /* dest MAC address */ + u8 src_mac[ETH_ALEN]; /* src MAC address */ + __be16 ether_type; /* for NON_IP_L2 */ u32 usr_def[2]; /* user data */ __be16 vlan_type; /* VLAN ethertype */ __be16 vlan_tag; /* VLAN tag info */ @@ -117,11 +164,19 @@ struct ice_fdir_fltr { struct ice_fdir_v6 v6; } ip, mask; + struct ice_fdir_udp_gtp gtpu_data; + struct ice_fdir_udp_gtp gtpu_mask; + + struct ice_fdir_l2tpv3 l2tpv3_data; + struct ice_fdir_l2tpv3 l2tpv3_mask; + struct ice_fdir_extra ext_data; struct ice_fdir_extra ext_mask; /* flex byte filter data */ __be16 flex_word; + /* queue region size (=2^q_region) */ + u8 q_region; u16 flex_offset; u16 flex_fltr; @@ -129,9 +184,12 @@ struct ice_fdir_fltr { u16 q_index; u16 dest_vsi; u8 dest_ctl; + u8 cnt_ena; u8 fltr_status; u16 cnt_index; u32 fltr_id; + u8 fdid_prio; + u8 comp_report; }; /* Dummy packet filter definition structure */ diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 5e1fd30c0a0f..06ac9badee77 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -334,6 +334,7 @@ ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset) if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) return NULL; + /* cppcheck-suppress nullPointer */ if (index > ICE_MAX_BST_TCAMS_IN_BUF) return NULL; @@ -404,6 +405,7 @@ ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index, if (!section) return NULL; + /* cppcheck-suppress nullPointer */ if (index > ICE_MAX_LABELS_IN_BUF) return NULL; @@ -1063,32 +1065,36 @@ ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) static enum ice_status ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) { - struct ice_global_metadata_seg *meta_seg; struct ice_generic_seg_hdr *seg_hdr; if (!pkg_hdr) return ICE_ERR_PARAM; - meta_seg = (struct ice_global_metadata_seg *) - ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr); - if (meta_seg) { - hw->pkg_ver = meta_seg->pkg_ver; - memcpy(hw->pkg_name, meta_seg->pkg_name, sizeof(hw->pkg_name)); + seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); + if (seg_hdr) { + struct ice_meta_sect *meta; + struct ice_pkg_enum state; + + memset(&state, 0, sizeof(state)); + + /* Get package information from the Metadata Section */ + meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state, + ICE_SID_METADATA); + if (!meta) { + ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n"); + return ICE_ERR_CFG; + } + + hw->pkg_ver = meta->ver; + memcpy(hw->pkg_name, meta->name, sizeof(meta->name)); ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", - meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor, - meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft, - meta_seg->pkg_name); - } else { - ice_debug(hw, ICE_DBG_INIT, "Did not find metadata segment in driver package\n"); - return ICE_ERR_CFG; - } + meta->ver.major, meta->ver.minor, meta->ver.update, + meta->ver.draft, meta->name); - seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); - if (seg_hdr) { - hw->ice_pkg_ver = seg_hdr->seg_format_ver; - memcpy(hw->ice_pkg_name, seg_hdr->seg_id, - sizeof(hw->ice_pkg_name)); + hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver; + memcpy(hw->ice_seg_id, seg_hdr->seg_id, + sizeof(hw->ice_seg_id)); ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", seg_hdr->seg_format_ver.major, @@ -2063,6 +2069,7 @@ ice_match_prop_lst(struct list_head *list1, struct list_head *list2) count++; list_for_each_entry(tmp2, list2, list) chk_count++; + /* cppcheck-suppress knownConditionTrueFalse */ if (!count || count != chk_count) return false; @@ -2361,18 +2368,82 @@ ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) } /** - * ice_find_prof_id - find profile ID for a given field vector + * ice_prof_has_mask_idx - determine if profile index masking is identical + * @hw: pointer to the hardware structure + * @blk: HW block + * @prof: profile to check + * @idx: profile index to check + * @mask: mask to match + */ +static bool +ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx, + u16 mask) +{ + bool expect_no_mask = false; + bool found = false; + bool match = false; + u16 i; + + /* If mask is 0x0000 or 0xffff, then there is no masking */ + if (mask == 0 || mask == 0xffff) + expect_no_mask = true; + + /* Scan the enabled masks on this profile, for the specified idx */ + for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first + + hw->blk[blk].masks.count; i++) + if (hw->blk[blk].es.mask_ena[prof] & BIT(i)) + if (hw->blk[blk].masks.masks[i].in_use && + hw->blk[blk].masks.masks[i].idx == idx) { + found = true; + if (hw->blk[blk].masks.masks[i].mask == mask) + match = true; + break; + } + + if (expect_no_mask) { + if (found) + return false; + } else { + if (!match) + return false; + } + + return true; +} + +/** + * ice_prof_has_mask - determine if profile masking is identical + * @hw: pointer to the hardware structure + * @blk: HW block + * @prof: profile to check + * @masks: masks to match + */ +static bool +ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks) +{ + u16 i; + + /* es->mask_ena[prof] will have the mask */ + for (i = 0; i < hw->blk[blk].es.fvw; i++) + if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i])) + return false; + + return true; +} + +/** + * ice_find_prof_id_with_mask - find profile ID for a given field vector * @hw: pointer to the hardware structure * @blk: HW block * @fv: field vector to search for + * @masks: masks for FV * @prof_id: receives the profile ID */ static enum ice_status -ice_find_prof_id(struct ice_hw *hw, enum ice_block blk, - struct ice_fv_word *fv, u8 *prof_id) +ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk, + struct ice_fv_word *fv, u16 *masks, u8 *prof_id) { struct ice_es *es = &hw->blk[blk].es; - u16 off; u8 i; /* For FD, we don't want to re-use a existed profile with the same @@ -2382,11 +2453,15 @@ ice_find_prof_id(struct ice_hw *hw, enum ice_block blk, return ICE_ERR_DOES_NOT_EXIST; for (i = 0; i < (u8)es->count; i++) { - off = i * es->fvw; + u16 off = i * es->fvw; if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv))) continue; + /* check if masks settings are the same for this profile */ + if (masks && !ice_prof_has_mask(hw, blk, i, masks)) + continue; + *prof_id = i; return 0; } @@ -2438,20 +2513,22 @@ static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type) * ice_alloc_tcam_ent - allocate hardware TCAM entry * @hw: pointer to the HW struct * @blk: the block to allocate the TCAM for + * @btm: true to allocate from bottom of table, false to allocate from top * @tcam_idx: pointer to variable to receive the TCAM entry * * This function allocates a new entry in a Profile ID TCAM for a specific * block. */ static enum ice_status -ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx) +ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm, + u16 *tcam_idx) { u16 res_type; if (!ice_tcam_ent_rsrc_type(blk, &res_type)) return ICE_ERR_PARAM; - return ice_alloc_hw_res(hw, res_type, 1, true, tcam_idx); + return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx); } /** @@ -2537,6 +2614,330 @@ ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) } /** + * ice_write_prof_mask_reg - write profile mask register + * @hw: pointer to the HW struct + * @blk: hardware block + * @mask_idx: mask index + * @idx: index of the FV which will use the mask + * @mask: the 16-bit mask + */ +static void +ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx, + u16 idx, u16 mask) +{ + u32 offset; + u32 val; + + switch (blk) { + case ICE_BLK_RSS: + offset = GLQF_HMASK(mask_idx); + val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M; + val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M; + break; + case ICE_BLK_FD: + offset = GLQF_FDMASK(mask_idx); + val = (idx << GLQF_FDMASK_MSK_INDEX_S) & GLQF_FDMASK_MSK_INDEX_M; + val |= (mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M; + break; + default: + ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n", + blk); + return; + } + + wr32(hw, offset, val); + ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n", + blk, idx, offset, val); +} + +/** + * ice_write_prof_mask_enable_res - write profile mask enable register + * @hw: pointer to the HW struct + * @blk: hardware block + * @prof_id: profile ID + * @enable_mask: enable mask + */ +static void +ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk, + u16 prof_id, u32 enable_mask) +{ + u32 offset; + + switch (blk) { + case ICE_BLK_RSS: + offset = GLQF_HMASK_SEL(prof_id); + break; + case ICE_BLK_FD: + offset = GLQF_FDMASK_SEL(prof_id); + break; + default: + ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n", + blk); + return; + } + + wr32(hw, offset, enable_mask); + ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n", + blk, prof_id, offset, enable_mask); +} + +/** + * ice_init_prof_masks - initial prof masks + * @hw: pointer to the HW struct + * @blk: hardware block + */ +static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk) +{ + u16 per_pf; + u16 i; + + mutex_init(&hw->blk[blk].masks.lock); + + per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs; + + hw->blk[blk].masks.count = per_pf; + hw->blk[blk].masks.first = hw->pf_id * per_pf; + + memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks)); + + for (i = hw->blk[blk].masks.first; + i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) + ice_write_prof_mask_reg(hw, blk, i, 0, 0); +} + +/** + * ice_init_all_prof_masks - initialize all prof masks + * @hw: pointer to the HW struct + */ +static void ice_init_all_prof_masks(struct ice_hw *hw) +{ + ice_init_prof_masks(hw, ICE_BLK_RSS); + ice_init_prof_masks(hw, ICE_BLK_FD); +} + +/** + * ice_alloc_prof_mask - allocate profile mask + * @hw: pointer to the HW struct + * @blk: hardware block + * @idx: index of FV which will use the mask + * @mask: the 16-bit mask + * @mask_idx: variable to receive the mask index + */ +static enum ice_status +ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask, + u16 *mask_idx) +{ + bool found_unused = false, found_copy = false; + enum ice_status status = ICE_ERR_MAX_LIMIT; + u16 unused_idx = 0, copy_idx = 0; + u16 i; + + if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) + return ICE_ERR_PARAM; + + mutex_lock(&hw->blk[blk].masks.lock); + + for (i = hw->blk[blk].masks.first; + i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) + if (hw->blk[blk].masks.masks[i].in_use) { + /* if mask is in use and it exactly duplicates the + * desired mask and index, then in can be reused + */ + if (hw->blk[blk].masks.masks[i].mask == mask && + hw->blk[blk].masks.masks[i].idx == idx) { + found_copy = true; + copy_idx = i; + break; + } + } else { + /* save off unused index, but keep searching in case + * there is an exact match later on + */ + if (!found_unused) { + found_unused = true; + unused_idx = i; + } + } + + if (found_copy) + i = copy_idx; + else if (found_unused) + i = unused_idx; + else + goto err_ice_alloc_prof_mask; + + /* update mask for a new entry */ + if (found_unused) { + hw->blk[blk].masks.masks[i].in_use = true; + hw->blk[blk].masks.masks[i].mask = mask; + hw->blk[blk].masks.masks[i].idx = idx; + hw->blk[blk].masks.masks[i].ref = 0; + ice_write_prof_mask_reg(hw, blk, i, idx, mask); + } + + hw->blk[blk].masks.masks[i].ref++; + *mask_idx = i; + status = 0; + +err_ice_alloc_prof_mask: + mutex_unlock(&hw->blk[blk].masks.lock); + + return status; +} + +/** + * ice_free_prof_mask - free profile mask + * @hw: pointer to the HW struct + * @blk: hardware block + * @mask_idx: index of mask + */ +static enum ice_status +ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx) +{ + if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) + return ICE_ERR_PARAM; + + if (!(mask_idx >= hw->blk[blk].masks.first && + mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count)) + return ICE_ERR_DOES_NOT_EXIST; + + mutex_lock(&hw->blk[blk].masks.lock); + + if (!hw->blk[blk].masks.masks[mask_idx].in_use) + goto exit_ice_free_prof_mask; + + if (hw->blk[blk].masks.masks[mask_idx].ref > 1) { + hw->blk[blk].masks.masks[mask_idx].ref--; + goto exit_ice_free_prof_mask; + } + + /* remove mask */ + hw->blk[blk].masks.masks[mask_idx].in_use = false; + hw->blk[blk].masks.masks[mask_idx].mask = 0; + hw->blk[blk].masks.masks[mask_idx].idx = 0; + + /* update mask as unused entry */ + ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n", blk, + mask_idx); + ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0); + +exit_ice_free_prof_mask: + mutex_unlock(&hw->blk[blk].masks.lock); + + return 0; +} + +/** + * ice_free_prof_masks - free all profile masks for a profile + * @hw: pointer to the HW struct + * @blk: hardware block + * @prof_id: profile ID + */ +static enum ice_status +ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id) +{ + u32 mask_bm; + u16 i; + + if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) + return ICE_ERR_PARAM; + + mask_bm = hw->blk[blk].es.mask_ena[prof_id]; + for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++) + if (mask_bm & BIT(i)) + ice_free_prof_mask(hw, blk, i); + + return 0; +} + +/** + * ice_shutdown_prof_masks - releases lock for masking + * @hw: pointer to the HW struct + * @blk: hardware block + * + * This should be called before unloading the driver + */ +static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk) +{ + u16 i; + + mutex_lock(&hw->blk[blk].masks.lock); + + for (i = hw->blk[blk].masks.first; + i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) { + ice_write_prof_mask_reg(hw, blk, i, 0, 0); + + hw->blk[blk].masks.masks[i].in_use = false; + hw->blk[blk].masks.masks[i].idx = 0; + hw->blk[blk].masks.masks[i].mask = 0; + } + + mutex_unlock(&hw->blk[blk].masks.lock); + mutex_destroy(&hw->blk[blk].masks.lock); +} + +/** + * ice_shutdown_all_prof_masks - releases all locks for masking + * @hw: pointer to the HW struct + * + * This should be called before unloading the driver + */ +static void ice_shutdown_all_prof_masks(struct ice_hw *hw) +{ + ice_shutdown_prof_masks(hw, ICE_BLK_RSS); + ice_shutdown_prof_masks(hw, ICE_BLK_FD); +} + +/** + * ice_update_prof_masking - set registers according to masking + * @hw: pointer to the HW struct + * @blk: hardware block + * @prof_id: profile ID + * @masks: masks + */ +static enum ice_status +ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id, + u16 *masks) +{ + bool err = false; + u32 ena_mask = 0; + u16 idx; + u16 i; + + /* Only support FD and RSS masking, otherwise nothing to be done */ + if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) + return 0; + + for (i = 0; i < hw->blk[blk].es.fvw; i++) + if (masks[i] && masks[i] != 0xFFFF) { + if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) { + ena_mask |= BIT(idx); + } else { + /* not enough bitmaps */ + err = true; + break; + } + } + + if (err) { + /* free any bitmaps we have allocated */ + for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++) + if (ena_mask & BIT(i)) + ice_free_prof_mask(hw, blk, i); + + return ICE_ERR_OUT_OF_RANGE; + } + + /* enable the masks for this profile */ + ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask); + + /* store enabled masks with profile so that they can be freed later */ + hw->blk[blk].es.mask_ena[prof_id] = ena_mask; + + return 0; +} + +/** * ice_write_es - write an extraction sequence to hardware * @hw: pointer to the HW struct * @blk: the block in which to write the extraction sequence @@ -2575,6 +2976,7 @@ ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) if (hw->blk[blk].es.ref_count[prof_id] > 0) { if (!--hw->blk[blk].es.ref_count[prof_id]) { ice_write_es(hw, blk, prof_id, NULL); + ice_free_prof_masks(hw, blk, prof_id); return ice_free_prof_id(hw, blk, prof_id); } } @@ -2937,6 +3339,7 @@ void ice_free_hw_tbls(struct ice_hw *hw) devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t); devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count); devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena); } list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) { @@ -2944,6 +3347,7 @@ void ice_free_hw_tbls(struct ice_hw *hw) devm_kfree(ice_hw_to_dev(hw), r); } mutex_destroy(&hw->rss_locks); + ice_shutdown_all_prof_masks(hw); memset(hw->blk, 0, sizeof(hw->blk)); } @@ -2997,6 +3401,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw) memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw); memset(es->ref_count, 0, es->count * sizeof(*es->ref_count)); memset(es->written, 0, es->count * sizeof(*es->written)); + memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena)); } } @@ -3010,6 +3415,7 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw) mutex_init(&hw->rss_locks); INIT_LIST_HEAD(&hw->rss_list_head); + ice_init_all_prof_masks(hw); for (i = 0; i < ICE_BLK_COUNT; i++) { struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; struct ice_prof_tcam *prof = &hw->blk[i].prof; @@ -3112,6 +3518,11 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw) sizeof(*es->written), GFP_KERNEL); if (!es->written) goto err; + + es->mask_ena = devm_kcalloc(ice_hw_to_dev(hw), es->count, + sizeof(*es->mask_ena), GFP_KERNEL); + if (!es->mask_ena) + goto err; } return 0; @@ -3711,22 +4122,79 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) return 0; } +/* The entries here needs to match the order of enum ice_ptype_attrib */ +static const struct ice_ptype_attrib_info ice_ptype_attributes[] = { + { ICE_GTP_PDU_EH, ICE_GTP_PDU_FLAG_MASK }, + { ICE_GTP_SESSION, ICE_GTP_FLAGS_MASK }, + { ICE_GTP_DOWNLINK, ICE_GTP_FLAGS_MASK }, + { ICE_GTP_UPLINK, ICE_GTP_FLAGS_MASK }, +}; + +/** + * ice_get_ptype_attrib_info - get PTYPE attribute information + * @type: attribute type + * @info: pointer to variable to the attribute information + */ +static void +ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type, + struct ice_ptype_attrib_info *info) +{ + *info = ice_ptype_attributes[type]; +} + +/** + * ice_add_prof_attrib - add any PTG with attributes to profile + * @prof: pointer to the profile to which PTG entries will be added + * @ptg: PTG to be added + * @ptype: PTYPE that needs to be looked up + * @attr: array of attributes that will be considered + * @attr_cnt: number of elements in the attribute array + */ +static enum ice_status +ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, + const struct ice_ptype_attributes *attr, u16 attr_cnt) +{ + bool found = false; + u16 i; + + for (i = 0; i < attr_cnt; i++) + if (attr[i].ptype == ptype) { + found = true; + + prof->ptg[prof->ptg_cnt] = ptg; + ice_get_ptype_attrib_info(attr[i].attrib, + &prof->attr[prof->ptg_cnt]); + + if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) + return ICE_ERR_MAX_LIMIT; + } + + if (!found) + return ICE_ERR_DOES_NOT_EXIST; + + return 0; +} + /** * ice_add_prof - add profile * @hw: pointer to the HW struct * @blk: hardware block * @id: profile tracking ID * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits) + * @attr: array of attributes + * @attr_cnt: number of elements in attr array * @es: extraction sequence (length of array is determined by the block) + * @masks: mask for extraction sequence * - * This function registers a profile, which matches a set of PTGs with a + * This function registers a profile, which matches a set of PTYPES with a * particular extraction sequence. While the hardware profile is allocated * it will not be written until the first call to ice_add_flow that specifies * the ID value used here. */ enum ice_status ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], - struct ice_fv_word *es) + const struct ice_ptype_attributes *attr, u16 attr_cnt, + struct ice_fv_word *es, u16 *masks) { u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE); DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); @@ -3740,7 +4208,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], mutex_lock(&hw->blk[blk].es.prof_map_lock); /* search for existing profile */ - status = ice_find_prof_id(hw, blk, es, &prof_id); + status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id); if (status) { /* allocate profile ID */ status = ice_alloc_prof_id(hw, blk, &prof_id); @@ -3758,6 +4226,9 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], if (status) goto err_ice_add_prof; } + status = ice_update_prof_masking(hw, blk, prof_id, masks); + if (status) + goto err_ice_add_prof; /* and write new es */ ice_write_es(hw, blk, prof_id, es); @@ -3792,7 +4263,6 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], BITS_PER_BYTE) { u16 ptype; u8 ptg; - u8 m; ptype = byte * BITS_PER_BYTE + bit; @@ -3807,15 +4277,25 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], continue; set_bit(ptg, ptgs_used); - prof->ptg[prof->ptg_cnt] = ptg; - - if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) + /* Check to see there are any attributes for + * this PTYPE, and add them if found. + */ + status = ice_add_prof_attrib(prof, ptg, ptype, + attr, attr_cnt); + if (status == ICE_ERR_MAX_LIMIT) break; + if (status) { + /* This is simple a PTYPE/PTG with no + * attribute + */ + prof->ptg[prof->ptg_cnt] = ptg; + prof->attr[prof->ptg_cnt].flags = 0; + prof->attr[prof->ptg_cnt].mask = 0; - /* nothing left in byte, then exit */ - m = ~(u8)((1 << (bit + 1)) - 1); - if (!(ptypes[byte] & m)) - break; + if (++prof->ptg_cnt >= + ICE_MAX_PTG_PER_PROFILE) + break; + } } bytes--; @@ -4326,7 +4806,12 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, } /* for re-enabling, reallocate a TCAM */ - status = ice_alloc_tcam_ent(hw, blk, &tcam->tcam_idx); + /* for entries with empty attribute masks, allocate entry from + * the bottom of the TCAM table; otherwise, allocate from the + * top of the table in order to give it higher priority + */ + status = ice_alloc_tcam_ent(hw, blk, tcam->attr.mask == 0, + &tcam->tcam_idx); if (status) return status; @@ -4336,8 +4821,8 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, return ICE_ERR_NO_MEMORY; status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id, - tcam->ptg, vsig, 0, 0, vl_msk, dc_msk, - nm_msk); + tcam->ptg, vsig, 0, tcam->attr.flags, + vl_msk, dc_msk, nm_msk); if (status) goto err_ice_prof_tcam_ena_dis; @@ -4485,7 +4970,12 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, } /* allocate the TCAM entry index */ - status = ice_alloc_tcam_ent(hw, blk, &tcam_idx); + /* for entries with empty attribute masks, allocate entry from + * the bottom of the TCAM table; otherwise, allocate from the + * top of the table in order to give it higher priority + */ + status = ice_alloc_tcam_ent(hw, blk, map->attr[i].mask == 0, + &tcam_idx); if (status) { devm_kfree(ice_hw_to_dev(hw), p); goto err_ice_add_prof_id_vsig; @@ -4494,6 +4984,7 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, t->tcam[i].ptg = map->ptg[i]; t->tcam[i].prof_id = map->prof_id; t->tcam[i].tcam_idx = tcam_idx; + t->tcam[i].attr = map->attr[i]; t->tcam[i].in_use = true; p->type = ICE_TCAM_ADD; diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h index 20deddb807c5..8a58e79729b9 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h @@ -27,7 +27,8 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, enum ice_status ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], - struct ice_fv_word *es); + const struct ice_ptype_attributes *attr, u16 attr_cnt, + struct ice_fv_word *es, u16 *masks); enum ice_status ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); enum ice_status diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h index 24063c1351b2..7d8b517a63c9 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_type.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -109,6 +109,7 @@ struct ice_buf_hdr { (ent_sz)) /* ice package section IDs */ +#define ICE_SID_METADATA 1 #define ICE_SID_XLT0_SW 10 #define ICE_SID_XLT_KEY_BUILDER_SW 11 #define ICE_SID_XLT1_SW 12 @@ -117,6 +118,14 @@ struct ice_buf_hdr { #define ICE_SID_PROFID_REDIR_SW 15 #define ICE_SID_FLD_VEC_SW 16 #define ICE_SID_CDID_KEY_BUILDER_SW 17 + +struct ice_meta_sect { + struct ice_pkg_ver ver; +#define ICE_META_SECT_NAME_SIZE 28 + char name[ICE_META_SECT_NAME_SIZE]; + __le32 track_id; +}; + #define ICE_SID_CDID_REDIR_SW 18 #define ICE_SID_XLT0_ACL 20 @@ -190,6 +199,64 @@ enum ice_sect { ICE_SECT_COUNT }; +#define ICE_MAC_IPV4_GTPU_IPV4_FRAG 331 +#define ICE_MAC_IPV4_GTPU_IPV4_PAY 332 +#define ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY 333 +#define ICE_MAC_IPV4_GTPU_IPV4_TCP 334 +#define ICE_MAC_IPV4_GTPU_IPV4_ICMP 335 +#define ICE_MAC_IPV6_GTPU_IPV4_FRAG 336 +#define ICE_MAC_IPV6_GTPU_IPV4_PAY 337 +#define ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY 338 +#define ICE_MAC_IPV6_GTPU_IPV4_TCP 339 +#define ICE_MAC_IPV6_GTPU_IPV4_ICMP 340 +#define ICE_MAC_IPV4_GTPU_IPV6_FRAG 341 +#define ICE_MAC_IPV4_GTPU_IPV6_PAY 342 +#define ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY 343 +#define ICE_MAC_IPV4_GTPU_IPV6_TCP 344 +#define ICE_MAC_IPV4_GTPU_IPV6_ICMPV6 345 +#define ICE_MAC_IPV6_GTPU_IPV6_FRAG 346 +#define ICE_MAC_IPV6_GTPU_IPV6_PAY 347 +#define ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY 348 +#define ICE_MAC_IPV6_GTPU_IPV6_TCP 349 +#define ICE_MAC_IPV6_GTPU_IPV6_ICMPV6 350 + +/* Attributes that can modify PTYPE definitions. + * + * These values will represent special attributes for PTYPEs, which will + * resolve into metadata packet flags definitions that can be used in the TCAM + * for identifying a PTYPE with specific characteristics. + */ +enum ice_ptype_attrib_type { + /* GTP PTYPEs */ + ICE_PTYPE_ATTR_GTP_PDU_EH, + ICE_PTYPE_ATTR_GTP_SESSION, + ICE_PTYPE_ATTR_GTP_DOWNLINK, + ICE_PTYPE_ATTR_GTP_UPLINK, +}; + +struct ice_ptype_attrib_info { + u16 flags; + u16 mask; +}; + +/* TCAM flag definitions */ +#define ICE_GTP_PDU BIT(14) +#define ICE_GTP_PDU_LINK BIT(13) + +/* GTP attributes */ +#define ICE_GTP_PDU_FLAG_MASK (ICE_GTP_PDU) +#define ICE_GTP_PDU_EH ICE_GTP_PDU + +#define ICE_GTP_FLAGS_MASK (ICE_GTP_PDU | ICE_GTP_PDU_LINK) +#define ICE_GTP_SESSION 0 +#define ICE_GTP_DOWNLINK ICE_GTP_PDU +#define ICE_GTP_UPLINK (ICE_GTP_PDU | ICE_GTP_PDU_LINK) + +struct ice_ptype_attributes { + u16 ptype; + enum ice_ptype_attrib_type attrib; +}; + /* package labels */ struct ice_label { __le16 value; @@ -335,6 +402,7 @@ struct ice_es { u16 count; u16 fvw; u16 *ref_count; + u32 *mask_ena; struct list_head prof_map; struct ice_fv_word *t; struct mutex prof_map_lock; /* protect access to profiles list */ @@ -372,12 +440,14 @@ struct ice_prof_map { u8 prof_id; u8 ptg_cnt; u8 ptg[ICE_MAX_PTG_PER_PROFILE]; + struct ice_ptype_attrib_info attr[ICE_MAX_PTG_PER_PROFILE]; }; #define ICE_INVALID_TCAM 0xFFFF struct ice_tcam_inf { u16 tcam_idx; + struct ice_ptype_attrib_info attr; u8 ptg; u8 prof_id; u8 in_use; @@ -427,8 +497,8 @@ struct ice_xlt1 { #define ICE_PF_NUM_S 13 #define ICE_PF_NUM_M (0x07 << ICE_PF_NUM_S) #define ICE_VSIG_VALUE(vsig, pf_id) \ - (u16)((((u16)(vsig)) & ICE_VSIG_IDX_M) | \ - (((u16)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M)) + ((u16)((((u16)(vsig)) & ICE_VSIG_IDX_M) | \ + (((u16)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M))) #define ICE_DEFAULT_VSIG 0 /* XLT2 Table */ @@ -478,6 +548,21 @@ struct ice_prof_redir { u16 count; }; +struct ice_mask { + u16 mask; /* 16-bit mask */ + u16 idx; /* index */ + u16 ref; /* reference count */ + u8 in_use; /* non-zero if used */ +}; + +struct ice_masks { + struct mutex lock; /* lock to protect this structure */ + u16 first; /* first mask owned by the PF */ + u16 count; /* number of masks owned by the PF */ +#define ICE_PROF_MASK_COUNT 32 + struct ice_mask masks[ICE_PROF_MASK_COUNT]; +}; + /* Tables per block */ struct ice_blk_info { struct ice_xlt1 xlt1; @@ -485,6 +570,7 @@ struct ice_blk_info { struct ice_prof_tcam prof; struct ice_prof_redir prof_redir; struct ice_es es; + struct ice_masks masks; u8 overwrite; /* set to true to allow overwrite of table entries */ u8 is_list_init; }; @@ -513,6 +599,7 @@ struct ice_chs_chg { u16 vsig; u16 orig_vsig; u16 tcam_idx; + struct ice_ptype_attrib_info attr; }; #define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c index 89a0cef20506..f160672448a0 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.c +++ b/drivers/net/ethernet/intel/ice/ice_flow.c @@ -9,18 +9,50 @@ struct ice_flow_field_info { enum ice_flow_seg_hdr hdr; s16 off; /* Offset from start of a protocol header, in bits */ u16 size; /* Size of fields in bits */ + u16 mask; /* 16-bit mask for field */ }; #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \ .hdr = _hdr, \ .off = (_offset_bytes) * BITS_PER_BYTE, \ .size = (_size_bytes) * BITS_PER_BYTE, \ + .mask = 0, \ +} + +#define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \ + .hdr = _hdr, \ + .off = (_offset_bytes) * BITS_PER_BYTE, \ + .size = (_size_bytes) * BITS_PER_BYTE, \ + .mask = _mask, \ } /* Table containing properties of supported protocol header fields */ static const struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { + /* Ether */ + /* ICE_FLOW_FIELD_IDX_ETH_DA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN), + /* ICE_FLOW_FIELD_IDX_ETH_SA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN), + /* ICE_FLOW_FIELD_IDX_S_VLAN */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, sizeof(__be16)), + /* ICE_FLOW_FIELD_IDX_C_VLAN */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, sizeof(__be16)), + /* ICE_FLOW_FIELD_IDX_ETH_TYPE */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, sizeof(__be16)), /* IPv4 / IPv6 */ + /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, 1, 0x00fc), + /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, 1, 0x0ff0), + /* ICE_FLOW_FIELD_IDX_IPV4_TTL */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, 1, 0xff00), + /* ICE_FLOW_FIELD_IDX_IPV4_PROT */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, 1, 0x00ff), + /* ICE_FLOW_FIELD_IDX_IPV6_TTL */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, 1, 0x00ff), + /* ICE_FLOW_FIELD_IDX_IPV6_PROT */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, 1, 0xff00), /* ICE_FLOW_FIELD_IDX_IPV4_SA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, sizeof(struct in_addr)), /* ICE_FLOW_FIELD_IDX_IPV4_DA */ @@ -42,22 +74,112 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)), /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)), + /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, 1), + /* ARP */ + /* ICE_FLOW_FIELD_IDX_ARP_SIP */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, sizeof(struct in_addr)), + /* ICE_FLOW_FIELD_IDX_ARP_DIP */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, sizeof(struct in_addr)), + /* ICE_FLOW_FIELD_IDX_ARP_SHA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN), + /* ICE_FLOW_FIELD_IDX_ARP_DHA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN), + /* ICE_FLOW_FIELD_IDX_ARP_OP */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, sizeof(__be16)), + /* ICMP */ + /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, 1), + /* ICE_FLOW_FIELD_IDX_ICMP_CODE */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, 1), /* GRE */ /* ICE_FLOW_FIELD_IDX_GRE_KEYID */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, sizeof_field(struct gre_full_hdr, key)), + /* GTP */ + /* ICE_FLOW_FIELD_IDX_GTPC_TEID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12, sizeof(__be32)), + /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12, sizeof(__be32)), + /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12, sizeof(__be32)), + /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22, sizeof(__be16), + 0x3f00), + /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12, sizeof(__be32)), + /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12, sizeof(__be32)), + /* PPPoE */ + /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2, sizeof(__be16)), + /* PFCP */ + /* ICE_FLOW_FIELD_IDX_PFCP_SEID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12, sizeof(__be64)), + /* L2TPv3 */ + /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0, sizeof(__be32)), + /* ESP */ + /* ICE_FLOW_FIELD_IDX_ESP_SPI */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0, sizeof(__be32)), + /* AH */ + /* ICE_FLOW_FIELD_IDX_AH_SPI */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4, sizeof(__be32)), + /* NAT_T_ESP */ + /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8, sizeof(__be32)), }; /* Bitmaps indicating relevant packet types for a particular protocol header * - * Packet types for packets with an Outer/First/Single IPv4 header + * Packet types for packets with an Outer/First/Single MAC header + */ +static const u32 ice_ptypes_mac_ofos[] = { + 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB, + 0x0000077E, 0x00000000, 0x00000000, 0x00000000, + 0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with an Innermost/Last MAC VLAN header */ +static const u32 ice_ptypes_macvlan_il[] = { + 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000, + 0x0000077E, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with an Outer/First/Single IPv4 header, does NOT + * include IPv4 other PTYPEs */ static const u32 ice_ptypes_ipv4_ofos[] = { 0x1DC00000, 0x04000800, 0x00000000, 0x00000000, + 0x00000000, 0x00000155, 0x00000000, 0x00000000, + 0x00000000, 0x000FC000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with an Outer/First/Single IPv4 header, includes + * IPv4 other PTYPEs + */ +static const u32 ice_ptypes_ipv4_ofos_all[] = { + 0x1DC00000, 0x04000800, 0x00000000, 0x00000000, + 0x00000000, 0x00000155, 0x00000000, 0x00000000, + 0x00000000, 0x000FC000, 0x83E0F800, 0x00000101, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -67,7 +189,7 @@ static const u32 ice_ptypes_ipv4_ofos[] = { static const u32 ice_ptypes_ipv4_il[] = { 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B, 0x0000000E, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x001FF800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -75,11 +197,27 @@ static const u32 ice_ptypes_ipv4_il[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; -/* Packet types for packets with an Outer/First/Single IPv6 header */ +/* Packet types for packets with an Outer/First/Single IPv6 header, does NOT + * include IPv6 other PTYPEs + */ static const u32 ice_ptypes_ipv6_ofos[] = { 0x00000000, 0x00000000, 0x77000000, 0x10002000, + 0x00000000, 0x000002AA, 0x00000000, 0x00000000, + 0x00000000, 0x03F00000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with an Outer/First/Single IPv6 header, includes + * IPv6 other PTYPEs + */ +static const u32 ice_ptypes_ipv6_ofos_all[] = { + 0x00000000, 0x00000000, 0x77000000, 0x10002000, + 0x00000000, 0x000002AA, 0x00000000, 0x00000000, + 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -91,7 +229,7 @@ static const u32 ice_ptypes_ipv6_ofos[] = { static const u32 ice_ptypes_ipv6_il[] = { 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000, 0x00000770, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x7FE00000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -100,7 +238,7 @@ static const u32 ice_ptypes_ipv6_il[] = { }; /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */ -static const u32 ice_ipv4_ofos_no_l4[] = { +static const u32 ice_ptypes_ipv4_ofos_no_l4[] = { 0x10C00000, 0x04000800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -111,8 +249,20 @@ static const u32 ice_ipv4_ofos_no_l4[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; +/* Packet types for packets with an Outermost/First ARP header */ +static const u32 ice_ptypes_arp_of[] = { + 0x00000800, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */ -static const u32 ice_ipv4_il_no_l4[] = { +static const u32 ice_ptypes_ipv4_il_no_l4[] = { 0x60000000, 0x18043008, 0x80000002, 0x6010c021, 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -124,7 +274,7 @@ static const u32 ice_ipv4_il_no_l4[] = { }; /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */ -static const u32 ice_ipv6_ofos_no_l4[] = { +static const u32 ice_ptypes_ipv6_ofos_no_l4[] = { 0x00000000, 0x00000000, 0x43000000, 0x10002000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -136,7 +286,7 @@ static const u32 ice_ipv6_ofos_no_l4[] = { }; /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */ -static const u32 ice_ipv6_il_no_l4[] = { +static const u32 ice_ptypes_ipv6_il_no_l4[] = { 0x00000000, 0x02180430, 0x0000010c, 0x086010c0, 0x00000430, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -153,7 +303,7 @@ static const u32 ice_ipv6_il_no_l4[] = { static const u32 ice_ptypes_udp_il[] = { 0x81000000, 0x20204040, 0x04000010, 0x80810102, 0x00000040, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00410000, 0x90842000, 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -165,7 +315,7 @@ static const u32 ice_ptypes_udp_il[] = { static const u32 ice_ptypes_tcp_il[] = { 0x04000000, 0x80810102, 0x10000040, 0x02040408, 0x00000102, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00820000, 0x21084000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -177,6 +327,18 @@ static const u32 ice_ptypes_tcp_il[] = { static const u32 ice_ptypes_sctp_il[] = { 0x08000000, 0x01020204, 0x20000081, 0x04080810, 0x00000204, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x01040000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with an Outermost/First ICMP header */ +static const u32 ice_ptypes_icmp_of[] = { + 0x10000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -185,6 +347,18 @@ static const u32 ice_ptypes_sctp_il[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; +/* Packet types for packets with an Innermost/Last ICMP header */ +static const u32 ice_ptypes_icmp_il[] = { + 0x00000000, 0x02040408, 0x40000102, 0x08101020, + 0x00000408, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x42108000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + /* Packet types for packets with an Outermost/First GRE header */ static const u32 ice_ptypes_gre_of[] = { 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000, @@ -197,6 +371,218 @@ static const u32 ice_ptypes_gre_of[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; +/* Packet types for packets with an Innermost/Last MAC header */ +static const u32 ice_ptypes_mac_il[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for GTPC */ +static const u32 ice_ptypes_gtpc[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000180, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for GTPC with TEID */ +static const u32 ice_ptypes_gtpc_tid[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000060, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for GTPU */ +static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = { + { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH }, +}; + +static const struct ice_ptype_attributes ice_attr_gtpu_down[] = { + { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK }, +}; + +static const struct ice_ptype_attributes ice_attr_gtpu_up[] = { + { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK }, +}; + +static const u32 ice_ptypes_gtpu[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for PPPoE */ +static const u32 ice_ptypes_pppoe[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x03ffe000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with PFCP NODE header */ +static const u32 ice_ptypes_pfcp_node[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x80000000, 0x00000002, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with PFCP SESSION header */ +static const u32 ice_ptypes_pfcp_session[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000005, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for L2TPv3 */ +static const u32 ice_ptypes_l2tpv3[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000300, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for ESP */ +static const u32 ice_ptypes_esp[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000003, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for AH */ +static const u32 ice_ptypes_ah[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x0000000C, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with NAT_T ESP header */ +static const u32 ice_ptypes_nat_t_esp[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000030, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static const u32 ice_ptypes_mac_non_ip_ofos[] = { + 0x00000846, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00400000, 0x03FFF000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + /* Manage parameters and info. used during the creation of a flow profile */ struct ice_flow_prof_params { enum ice_block blk; @@ -208,12 +594,30 @@ struct ice_flow_prof_params { * This will give us the direction flags. */ struct ice_fv_word es[ICE_MAX_FV_WORDS]; + /* attributes can be used to add attributes to a particular PTYPE */ + const struct ice_ptype_attributes *attr; + u16 attr_cnt; + + u16 mask[ICE_MAX_FV_WORDS]; DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX); }; +#define ICE_FLOW_RSS_HDRS_INNER_MASK \ + (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \ + ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \ + ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \ + ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \ + ICE_FLOW_SEG_HDR_NAT_T_ESP) + +#define ICE_FLOW_SEG_HDRS_L2_MASK \ + (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN) #define ICE_FLOW_SEG_HDRS_L3_MASK \ - (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6) + (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP) #define ICE_FLOW_SEG_HDRS_L4_MASK \ + (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \ + ICE_FLOW_SEG_HDR_SCTP) +/* mask for L4 protocols that are NOT part of IPv4/6 OTHER PTYPE groups */ +#define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \ (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP) /** @@ -243,8 +647,11 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) /* Sizes of fixed known protocol headers without header options */ #define ICE_FLOW_PROT_HDR_SZ_MAC 14 +#define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2) #define ICE_FLOW_PROT_HDR_SZ_IPV4 20 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40 +#define ICE_FLOW_PROT_HDR_SZ_ARP 28 +#define ICE_FLOW_PROT_HDR_SZ_ICMP 8 #define ICE_FLOW_PROT_HDR_SZ_TCP 20 #define ICE_FLOW_PROT_HDR_SZ_UDP 8 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12 @@ -256,16 +663,27 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) */ static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg) { - u16 sz = ICE_FLOW_PROT_HDR_SZ_MAC; + u16 sz; + + /* L2 headers */ + sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ? + ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC; /* L3 headers */ if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) sz += ICE_FLOW_PROT_HDR_SZ_IPV4; else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6) sz += ICE_FLOW_PROT_HDR_SZ_IPV6; + else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP) + sz += ICE_FLOW_PROT_HDR_SZ_ARP; + else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK) + /* An L3 header is required if L4 is specified */ + return 0; /* L4 headers */ - if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP) + if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP) + sz += ICE_FLOW_PROT_HDR_SZ_ICMP; + else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP) sz += ICE_FLOW_PROT_HDR_SZ_TCP; else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP) sz += ICE_FLOW_PROT_HDR_SZ_UDP; @@ -298,10 +716,41 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) hdrs = prof->segs[i].hdrs; + if (hdrs & ICE_FLOW_SEG_HDR_ETH) { + src = !i ? (const unsigned long *)ice_ptypes_mac_ofos : + (const unsigned long *)ice_ptypes_mac_il; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } + + if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) { + src = (const unsigned long *)ice_ptypes_macvlan_il; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } + + if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) { + bitmap_and(params->ptypes, params->ptypes, + (const unsigned long *)ice_ptypes_arp_of, + ICE_FLOW_PTYPE_MAX); + } + if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) && - !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) { - src = !i ? (const unsigned long *)ice_ipv4_ofos_no_l4 : - (const unsigned long *)ice_ipv4_il_no_l4; + (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) { + src = i ? (const unsigned long *)ice_ptypes_ipv4_il : + (const unsigned long *)ice_ptypes_ipv4_ofos_all; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) && + (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) { + src = i ? (const unsigned long *)ice_ptypes_ipv6_il : + (const unsigned long *)ice_ptypes_ipv6_ofos_all; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) && + !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) { + src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos_no_l4 : + (const unsigned long *)ice_ptypes_ipv4_il_no_l4; bitmap_and(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) { @@ -310,9 +759,9 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) bitmap_and(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) && - !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) { - src = !i ? (const unsigned long *)ice_ipv6_ofos_no_l4 : - (const unsigned long *)ice_ipv6_il_no_l4; + !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) { + src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos_no_l4 : + (const unsigned long *)ice_ptypes_ipv6_il_no_l4; bitmap_and(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) { @@ -322,6 +771,20 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) ICE_FLOW_PTYPE_MAX); } + if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) { + src = (const unsigned long *)ice_ptypes_mac_non_ip_ofos; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) { + src = (const unsigned long *)ice_ptypes_pppoe; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else { + src = (const unsigned long *)ice_ptypes_pppoe; + bitmap_andnot(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } + if (hdrs & ICE_FLOW_SEG_HDR_UDP) { src = (const unsigned long *)ice_ptypes_udp_il; bitmap_and(params->ptypes, params->ptypes, src, @@ -334,12 +797,89 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) src = (const unsigned long *)ice_ptypes_sctp_il; bitmap_and(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); + } + + if (hdrs & ICE_FLOW_SEG_HDR_ICMP) { + src = !i ? (const unsigned long *)ice_ptypes_icmp_of : + (const unsigned long *)ice_ptypes_icmp_il; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) { if (!i) { src = (const unsigned long *)ice_ptypes_gre_of; bitmap_and(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } + } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) { + src = (const unsigned long *)ice_ptypes_gtpc; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) { + src = (const unsigned long *)ice_ptypes_gtpc_tid; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) { + src = (const unsigned long *)ice_ptypes_gtpu; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + + /* Attributes for GTP packet with downlink */ + params->attr = ice_attr_gtpu_down; + params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down); + } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) { + src = (const unsigned long *)ice_ptypes_gtpu; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + + /* Attributes for GTP packet with uplink */ + params->attr = ice_attr_gtpu_up; + params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up); + } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) { + src = (const unsigned long *)ice_ptypes_gtpu; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + + /* Attributes for GTP packet with Extension Header */ + params->attr = ice_attr_gtpu_eh; + params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh); + } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) { + src = (const unsigned long *)ice_ptypes_gtpu; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) { + src = (const unsigned long *)ice_ptypes_l2tpv3; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) { + src = (const unsigned long *)ice_ptypes_esp; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_AH) { + src = (const unsigned long *)ice_ptypes_ah; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) { + src = (const unsigned long *)ice_ptypes_nat_t_esp; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } + + if (hdrs & ICE_FLOW_SEG_HDR_PFCP) { + if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE) + src = (const unsigned long *)ice_ptypes_pfcp_node; + else + src = (const unsigned long *)ice_ptypes_pfcp_session; + + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else { + src = (const unsigned long *)ice_ptypes_pfcp_node; + bitmap_andnot(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + + src = (const unsigned long *)ice_ptypes_pfcp_session; + bitmap_andnot(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); } } @@ -352,6 +892,7 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) * @params: information about the flow to be processed * @seg: packet segment index of the field to be extracted * @fld: ID of field to be extracted + * @match: bit field of all fields * * This function determines the protocol ID, offset, and size of the given * field. It then allocates one or more extraction sequence entries for the @@ -359,17 +900,73 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) */ static enum ice_status ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, - u8 seg, enum ice_flow_field fld) + u8 seg, enum ice_flow_field fld, u64 match) { + enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX; enum ice_prot_id prot_id = ICE_PROT_ID_INVAL; u8 fv_words = hw->blk[params->blk].es.fvw; struct ice_flow_fld_info *flds; u16 cnt, ese_bits, i; + u16 sib_mask = 0; + u16 mask; u16 off; flds = params->prof->segs[seg].fields; switch (fld) { + case ICE_FLOW_FIELD_IDX_ETH_DA: + case ICE_FLOW_FIELD_IDX_ETH_SA: + case ICE_FLOW_FIELD_IDX_S_VLAN: + case ICE_FLOW_FIELD_IDX_C_VLAN: + prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL; + break; + case ICE_FLOW_FIELD_IDX_ETH_TYPE: + prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL; + break; + case ICE_FLOW_FIELD_IDX_IPV4_DSCP: + prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; + break; + case ICE_FLOW_FIELD_IDX_IPV6_DSCP: + prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; + break; + case ICE_FLOW_FIELD_IDX_IPV4_TTL: + case ICE_FLOW_FIELD_IDX_IPV4_PROT: + prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; + + /* TTL and PROT share the same extraction seq. entry. + * Each is considered a sibling to the other in terms of sharing + * the same extraction sequence entry. + */ + if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL) + sib = ICE_FLOW_FIELD_IDX_IPV4_PROT; + else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT) + sib = ICE_FLOW_FIELD_IDX_IPV4_TTL; + + /* If the sibling field is also included, that field's + * mask needs to be included. + */ + if (match & BIT(sib)) + sib_mask = ice_flds_info[sib].mask; + break; + case ICE_FLOW_FIELD_IDX_IPV6_TTL: + case ICE_FLOW_FIELD_IDX_IPV6_PROT: + prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; + + /* TTL and PROT share the same extraction seq. entry. + * Each is considered a sibling to the other in terms of sharing + * the same extraction sequence entry. + */ + if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL) + sib = ICE_FLOW_FIELD_IDX_IPV6_PROT; + else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT) + sib = ICE_FLOW_FIELD_IDX_IPV6_TTL; + + /* If the sibling field is also included, that field's + * mask needs to be included. + */ + if (match & BIT(sib)) + sib_mask = ice_flds_info[sib].mask; + break; case ICE_FLOW_FIELD_IDX_IPV4_SA: case ICE_FLOW_FIELD_IDX_IPV4_DA: prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; @@ -380,6 +977,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, break; case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT: case ICE_FLOW_FIELD_IDX_TCP_DST_PORT: + case ICE_FLOW_FIELD_IDX_TCP_FLAGS: prot_id = ICE_PROT_TCP_IL; break; case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT: @@ -390,6 +988,49 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT: prot_id = ICE_PROT_SCTP_IL; break; + case ICE_FLOW_FIELD_IDX_GTPC_TEID: + case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID: + case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID: + case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID: + case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID: + case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI: + /* GTP is accessed through UDP OF protocol */ + prot_id = ICE_PROT_UDP_OF; + break; + case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID: + prot_id = ICE_PROT_PPPOE; + break; + case ICE_FLOW_FIELD_IDX_PFCP_SEID: + prot_id = ICE_PROT_UDP_IL_OR_S; + break; + case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID: + prot_id = ICE_PROT_L2TPV3; + break; + case ICE_FLOW_FIELD_IDX_ESP_SPI: + prot_id = ICE_PROT_ESP_F; + break; + case ICE_FLOW_FIELD_IDX_AH_SPI: + prot_id = ICE_PROT_ESP_2; + break; + case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI: + prot_id = ICE_PROT_UDP_IL_OR_S; + break; + case ICE_FLOW_FIELD_IDX_ARP_SIP: + case ICE_FLOW_FIELD_IDX_ARP_DIP: + case ICE_FLOW_FIELD_IDX_ARP_SHA: + case ICE_FLOW_FIELD_IDX_ARP_DHA: + case ICE_FLOW_FIELD_IDX_ARP_OP: + prot_id = ICE_PROT_ARP_OF; + break; + case ICE_FLOW_FIELD_IDX_ICMP_TYPE: + case ICE_FLOW_FIELD_IDX_ICMP_CODE: + /* ICMP type and code share the same extraction seq. entry */ + prot_id = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) ? + ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL; + sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ? + ICE_FLOW_FIELD_IDX_ICMP_CODE : + ICE_FLOW_FIELD_IDX_ICMP_TYPE; + break; case ICE_FLOW_FIELD_IDX_GRE_KEYID: prot_id = ICE_PROT_GRE_OF; break; @@ -407,6 +1048,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, ICE_FLOW_FV_EXTRACT_SZ; flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits); flds[fld].xtrct.idx = params->es_cnt; + flds[fld].xtrct.mask = ice_flds_info[fld].mask; /* Adjust the next field-entry index after accommodating the number of * entries this field consumes @@ -416,24 +1058,34 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, /* Fill in the extraction sequence entries needed for this field */ off = flds[fld].xtrct.off; + mask = flds[fld].xtrct.mask; for (i = 0; i < cnt; i++) { - u8 idx; - - /* Make sure the number of extraction sequence required - * does not exceed the block's capability + /* Only consume an extraction sequence entry if there is no + * sibling field associated with this field or the sibling entry + * already extracts the word shared with this field. */ - if (params->es_cnt >= fv_words) - return ICE_ERR_MAX_LIMIT; + if (sib == ICE_FLOW_FIELD_IDX_MAX || + flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL || + flds[sib].xtrct.off != off) { + u8 idx; - /* some blocks require a reversed field vector layout */ - if (hw->blk[params->blk].es.reverse) - idx = fv_words - params->es_cnt - 1; - else - idx = params->es_cnt; + /* Make sure the number of extraction sequence required + * does not exceed the block's capability + */ + if (params->es_cnt >= fv_words) + return ICE_ERR_MAX_LIMIT; - params->es[idx].prot_id = prot_id; - params->es[idx].off = off; - params->es_cnt++; + /* some blocks require a reversed field vector layout */ + if (hw->blk[params->blk].es.reverse) + idx = fv_words - params->es_cnt - 1; + else + idx = params->es_cnt; + + params->es[idx].prot_id = prot_id; + params->es[idx].off = off; + params->mask[idx] = mask | sib_mask; + params->es_cnt++; + } off += ICE_FLOW_FV_EXTRACT_SZ; } @@ -533,14 +1185,15 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw, u8 i; for (i = 0; i < prof->segs_cnt; i++) { - u8 j; + u64 match = params->prof->segs[i].match; + enum ice_flow_field j; - for_each_set_bit(j, (unsigned long *)&prof->segs[i].match, + for_each_set_bit(j, (unsigned long *)&match, ICE_FLOW_FIELD_IDX_MAX) { - status = ice_flow_xtract_fld(hw, params, i, - (enum ice_flow_field)j); + status = ice_flow_xtract_fld(hw, params, i, j, match); if (status) return status; + clear_bit(j, (unsigned long *)&match); } /* Process raw matching bytes */ @@ -751,7 +1404,8 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, /* Add a HW profile for this flow profile */ status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes, - params->es); + params->attr, params->attr_cnt, params->es, + params->mask); if (status) { ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n"); goto out; @@ -1158,6 +1812,9 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, seg->raws_cnt++; } +#define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \ + (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN) + #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \ (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6) @@ -1165,7 +1822,8 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP) #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \ - (ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \ + (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \ + ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \ ICE_FLOW_RSS_SEG_HDR_L4_MASKS) /** @@ -1193,7 +1851,8 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields, ICE_FLOW_SET_HDRS(segs, flow_hdr); - if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS) + if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS & + ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER) return ICE_ERR_PARAM; val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS); @@ -1349,9 +2008,9 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled */ #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \ - (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \ - (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \ - ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0)) + ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \ + (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \ + ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))) /** * ice_add_rss_cfg_sync - add an RSS configuration @@ -1490,6 +2149,94 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, return status; } +/** + * ice_rem_rss_cfg_sync - remove an existing RSS configuration + * @hw: pointer to the hardware structure + * @vsi_handle: software VSI handle + * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove + * @addl_hdrs: Protocol header fields within a packet segment + * @segs_cnt: packet segment count + * + * Assumption: lock has already been acquired for RSS list + */ +static enum ice_status +ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, + u32 addl_hdrs, u8 segs_cnt) +{ + const enum ice_block blk = ICE_BLK_RSS; + struct ice_flow_seg_info *segs; + struct ice_flow_prof *prof; + enum ice_status status; + + segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL); + if (!segs) + return ICE_ERR_NO_MEMORY; + + /* Construct the packet segment info from the hashed fields */ + status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds, + addl_hdrs); + if (status) + goto out; + + prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, + vsi_handle, + ICE_FLOW_FIND_PROF_CHK_FLDS); + if (!prof) { + status = ICE_ERR_DOES_NOT_EXIST; + goto out; + } + + status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle); + if (status) + goto out; + + /* Remove RSS configuration from VSI context before deleting + * the flow profile. + */ + ice_rem_rss_list(hw, vsi_handle, prof); + + if (bitmap_empty(prof->vsis, ICE_MAX_VSI)) + status = ice_flow_rem_prof(hw, blk, prof->id); + +out: + kfree(segs); + return status; +} + +/** + * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields + * @hw: pointer to the hardware structure + * @vsi_handle: software VSI handle + * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove + * @addl_hdrs: Protocol header fields within a packet segment + * + * This function will lookup the flow profile based on the input + * hash field bitmap, iterate through the profile entry list of + * that profile and find entry associated with input VSI to be + * removed. Calls are made to underlying flow s which will APIs + * turn build or update buffers for RSS XLT1 section. + */ +enum ice_status __maybe_unused +ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, + u32 addl_hdrs) +{ + enum ice_status status; + + if (hashed_flds == ICE_HASH_INVALID || + !ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + mutex_lock(&hw->rss_locks); + status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, + ICE_RSS_OUTER_HEADERS); + if (!status) + status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, + addl_hdrs, ICE_RSS_INNER_HEADERS); + mutex_unlock(&hw->rss_locks); + + return status; +} + /* Mapping of AVF hash bit fields to an L3-L4 hash combination. * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash, * convert its values to their appropriate flow L3, L4 values. diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h index 829f90b1e998..2a2d8c1536cb 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.h +++ b/drivers/net/ethernet/intel/ice/ice_flow.h @@ -8,6 +8,9 @@ #define ICE_FLOW_FLD_OFF_INVAL 0xffff /* Generate flow hash field from flow field type(s) */ +#define ICE_FLOW_HASH_ETH \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA) | \ + BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)) #define ICE_FLOW_HASH_IPV4 \ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)) @@ -30,6 +33,80 @@ #define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT) #define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT) +#define ICE_FLOW_HASH_GTP_TEID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)) + +#define ICE_FLOW_HASH_GTP_IPV4_TEID \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID) +#define ICE_FLOW_HASH_GTP_IPV6_TEID \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID) + +#define ICE_FLOW_HASH_GTP_U_TEID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)) + +#define ICE_FLOW_HASH_GTP_U_IPV4_TEID \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_TEID) +#define ICE_FLOW_HASH_GTP_U_IPV6_TEID \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_TEID) + +#define ICE_FLOW_HASH_GTP_U_EH_TEID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID)) + +#define ICE_FLOW_HASH_GTP_U_EH_QFI \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_QFI)) + +#define ICE_FLOW_HASH_GTP_U_IPV4_EH \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_EH_TEID | \ + ICE_FLOW_HASH_GTP_U_EH_QFI) +#define ICE_FLOW_HASH_GTP_U_IPV6_EH \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_EH_TEID | \ + ICE_FLOW_HASH_GTP_U_EH_QFI) + +#define ICE_FLOW_HASH_PPPOE_SESS_ID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)) + +#define ICE_FLOW_HASH_PPPOE_SESS_ID_ETH \ + (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_PPPOE_SESS_ID) +#define ICE_FLOW_HASH_PPPOE_TCP_ID \ + (ICE_FLOW_HASH_TCP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID) +#define ICE_FLOW_HASH_PPPOE_UDP_ID \ + (ICE_FLOW_HASH_UDP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID) + +#define ICE_FLOW_HASH_PFCP_SEID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)) +#define ICE_FLOW_HASH_PFCP_IPV4_SEID \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_PFCP_SEID) +#define ICE_FLOW_HASH_PFCP_IPV6_SEID \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_PFCP_SEID) + +#define ICE_FLOW_HASH_L2TPV3_SESS_ID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)) +#define ICE_FLOW_HASH_L2TPV3_IPV4_SESS_ID \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_L2TPV3_SESS_ID) +#define ICE_FLOW_HASH_L2TPV3_IPV6_SESS_ID \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_L2TPV3_SESS_ID) + +#define ICE_FLOW_HASH_ESP_SPI \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)) +#define ICE_FLOW_HASH_ESP_IPV4_SPI \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_ESP_SPI) +#define ICE_FLOW_HASH_ESP_IPV6_SPI \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_ESP_SPI) + +#define ICE_FLOW_HASH_AH_SPI \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)) +#define ICE_FLOW_HASH_AH_IPV4_SPI \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_AH_SPI) +#define ICE_FLOW_HASH_AH_IPV6_SPI \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_AH_SPI) + +#define ICE_FLOW_HASH_NAT_T_ESP_SPI \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI)) +#define ICE_FLOW_HASH_NAT_T_ESP_IPV4_SPI \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_NAT_T_ESP_SPI) +#define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI) + /* Protocol header fields within a packet segment. A segment consists of one or * more protocol headers that make up a logical group of protocol headers. Each * logical group of protocol headers encapsulates or is encapsulated using/by @@ -38,16 +115,66 @@ */ enum ice_flow_seg_hdr { ICE_FLOW_SEG_HDR_NONE = 0x00000000, + ICE_FLOW_SEG_HDR_ETH = 0x00000001, + ICE_FLOW_SEG_HDR_VLAN = 0x00000002, ICE_FLOW_SEG_HDR_IPV4 = 0x00000004, ICE_FLOW_SEG_HDR_IPV6 = 0x00000008, + ICE_FLOW_SEG_HDR_ARP = 0x00000010, + ICE_FLOW_SEG_HDR_ICMP = 0x00000020, ICE_FLOW_SEG_HDR_TCP = 0x00000040, ICE_FLOW_SEG_HDR_UDP = 0x00000080, ICE_FLOW_SEG_HDR_SCTP = 0x00000100, ICE_FLOW_SEG_HDR_GRE = 0x00000200, + ICE_FLOW_SEG_HDR_GTPC = 0x00000400, + ICE_FLOW_SEG_HDR_GTPC_TEID = 0x00000800, + ICE_FLOW_SEG_HDR_GTPU_IP = 0x00001000, + ICE_FLOW_SEG_HDR_GTPU_EH = 0x00002000, + ICE_FLOW_SEG_HDR_GTPU_DWN = 0x00004000, + ICE_FLOW_SEG_HDR_GTPU_UP = 0x00008000, + ICE_FLOW_SEG_HDR_PPPOE = 0x00010000, + ICE_FLOW_SEG_HDR_PFCP_NODE = 0x00020000, + ICE_FLOW_SEG_HDR_PFCP_SESSION = 0x00040000, + ICE_FLOW_SEG_HDR_L2TPV3 = 0x00080000, + ICE_FLOW_SEG_HDR_ESP = 0x00100000, + ICE_FLOW_SEG_HDR_AH = 0x00200000, + ICE_FLOW_SEG_HDR_NAT_T_ESP = 0x00400000, + ICE_FLOW_SEG_HDR_ETH_NON_IP = 0x00800000, + /* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and + * ICE_FLOW_SEG_HDR_IPV6 which include the IPV4 other PTYPEs + */ + ICE_FLOW_SEG_HDR_IPV_OTHER = 0x20000000, }; +/* These segments all have the same PTYPES, but are otherwise distinguished by + * the value of the gtp_eh_pdu and gtp_eh_pdu_link flags: + * + * gtp_eh_pdu gtp_eh_pdu_link + * ICE_FLOW_SEG_HDR_GTPU_IP 0 0 + * ICE_FLOW_SEG_HDR_GTPU_EH 1 don't care + * ICE_FLOW_SEG_HDR_GTPU_DWN 1 0 + * ICE_FLOW_SEG_HDR_GTPU_UP 1 1 + */ +#define ICE_FLOW_SEG_HDR_GTPU (ICE_FLOW_SEG_HDR_GTPU_IP | \ + ICE_FLOW_SEG_HDR_GTPU_EH | \ + ICE_FLOW_SEG_HDR_GTPU_DWN | \ + ICE_FLOW_SEG_HDR_GTPU_UP) +#define ICE_FLOW_SEG_HDR_PFCP (ICE_FLOW_SEG_HDR_PFCP_NODE | \ + ICE_FLOW_SEG_HDR_PFCP_SESSION) + enum ice_flow_field { + /* L2 */ + ICE_FLOW_FIELD_IDX_ETH_DA, + ICE_FLOW_FIELD_IDX_ETH_SA, + ICE_FLOW_FIELD_IDX_S_VLAN, + ICE_FLOW_FIELD_IDX_C_VLAN, + ICE_FLOW_FIELD_IDX_ETH_TYPE, /* L3 */ + ICE_FLOW_FIELD_IDX_IPV4_DSCP, + ICE_FLOW_FIELD_IDX_IPV6_DSCP, + ICE_FLOW_FIELD_IDX_IPV4_TTL, + ICE_FLOW_FIELD_IDX_IPV4_PROT, + ICE_FLOW_FIELD_IDX_IPV6_TTL, + ICE_FLOW_FIELD_IDX_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV4_SA, ICE_FLOW_FIELD_IDX_IPV4_DA, ICE_FLOW_FIELD_IDX_IPV6_SA, @@ -59,9 +186,42 @@ enum ice_flow_field { ICE_FLOW_FIELD_IDX_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, + ICE_FLOW_FIELD_IDX_TCP_FLAGS, + /* ARP */ + ICE_FLOW_FIELD_IDX_ARP_SIP, + ICE_FLOW_FIELD_IDX_ARP_DIP, + ICE_FLOW_FIELD_IDX_ARP_SHA, + ICE_FLOW_FIELD_IDX_ARP_DHA, + ICE_FLOW_FIELD_IDX_ARP_OP, + /* ICMP */ + ICE_FLOW_FIELD_IDX_ICMP_TYPE, + ICE_FLOW_FIELD_IDX_ICMP_CODE, /* GRE */ ICE_FLOW_FIELD_IDX_GRE_KEYID, - /* The total number of enums must not exceed 64 */ + /* GTPC_TEID */ + ICE_FLOW_FIELD_IDX_GTPC_TEID, + /* GTPU_IP */ + ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, + /* GTPU_EH */ + ICE_FLOW_FIELD_IDX_GTPU_EH_TEID, + ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, + /* GTPU_UP */ + ICE_FLOW_FIELD_IDX_GTPU_UP_TEID, + /* GTPU_DWN */ + ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID, + /* PPPoE */ + ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID, + /* PFCP */ + ICE_FLOW_FIELD_IDX_PFCP_SEID, + /* L2TPv3 */ + ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, + /* ESP */ + ICE_FLOW_FIELD_IDX_ESP_SPI, + /* AH */ + ICE_FLOW_FIELD_IDX_AH_SPI, + /* NAT_T ESP */ + ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI, + /* The total number of enums must not exceed 64 */ ICE_FLOW_FIELD_IDX_MAX }; @@ -138,6 +298,7 @@ struct ice_flow_seg_xtrct { u16 off; /* Starting offset of the field in header in bytes */ u8 idx; /* Index of FV entry used */ u8 disp; /* Displacement of field in bits fr. FV entry's start */ + u16 mask; /* Mask for field */ }; enum ice_flow_fld_match_type { @@ -248,5 +409,8 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle); enum ice_status ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, u32 addl_hdrs); +enum ice_status +ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, + u32 addl_hdrs); u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs); #endif /* _ICE_FLOW_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 093a1818a392..de38a0fc9665 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -130,6 +130,7 @@ #define GLINT_DYN_CTL_ITR_INDX_M ICE_M(0x3, 3) #define GLINT_DYN_CTL_INTERVAL_S 5 #define GLINT_DYN_CTL_INTERVAL_M ICE_M(0xFFF, 5) +#define GLINT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(24) #define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25) #define GLINT_DYN_CTL_WB_ON_ITR_M BIT(30) #define GLINT_DYN_CTL_INTENA_MSK_M BIT(31) @@ -306,8 +307,23 @@ #define GLQF_FD_SIZE_FD_BSIZE_S 16 #define GLQF_FD_SIZE_FD_BSIZE_M ICE_M(0x7FFF, 16) #define GLQF_FDINSET(_i, _j) (0x00412000 + ((_i) * 4 + (_j) * 512)) +#define GLQF_FDMASK(_i) (0x00410800 + ((_i) * 4)) +#define GLQF_FDMASK_MAX_INDEX 31 +#define GLQF_FDMASK_MSK_INDEX_S 0 +#define GLQF_FDMASK_MSK_INDEX_M ICE_M(0x1F, 0) +#define GLQF_FDMASK_MASK_S 16 +#define GLQF_FDMASK_MASK_M ICE_M(0xFFFF, 16) #define GLQF_FDMASK_SEL(_i) (0x00410400 + ((_i) * 4)) #define GLQF_FDSWAP(_i, _j) (0x00413000 + ((_i) * 4 + (_j) * 512)) +#define GLQF_HMASK(_i) (0x0040FC00 + ((_i) * 4)) +#define GLQF_HMASK_MAX_INDEX 31 +#define GLQF_HMASK_MSK_INDEX_S 0 +#define GLQF_HMASK_MSK_INDEX_M ICE_M(0x1F, 0) +#define GLQF_HMASK_MASK_S 16 +#define GLQF_HMASK_MASK_M ICE_M(0xFFFF, 16) +#define GLQF_HMASK_SEL(_i) (0x00410000 + ((_i) * 4)) +#define GLQF_HMASK_SEL_MAX_INDEX 127 +#define GLQF_HMASK_SEL_MASK_SEL_S 0 #define PFQF_FD_ENA 0x0043A000 #define PFQF_FD_ENA_FD_ENA_M BIT(0) #define PFQF_FD_SIZE 0x00460100 @@ -369,6 +385,9 @@ #define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4)) #define VSIQF_FD_CNT_FD_GCNT_S 0 #define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0) +#define VSIQF_FD_CNT_FD_BCNT_S 16 +#define VSIQF_FD_CNT_FD_BCNT_M ICE_M(0x3FFF, 16) +#define VSIQF_FD_SIZE(_VSI) (0x00462000 + ((_VSI) * 4)) #define VSIQF_HKEY_MAX_INDEX 12 #define VSIQF_HLUT_MAX_INDEX 15 #define PFPM_APM 0x000B8080 diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 4ec24c3e813f..21329ed3087e 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -55,6 +55,7 @@ struct ice_fltr_desc { #define ICE_FXD_FLTR_QW0_COMP_REPORT_M \ (0x3ULL << ICE_FXD_FLTR_QW0_COMP_REPORT_S) #define ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL 0x1ULL +#define ICE_FXD_FLTR_QW0_COMP_REPORT_SW 0x2ULL #define ICE_FXD_FLTR_QW0_FD_SPACE_S 14 #define ICE_FXD_FLTR_QW0_FD_SPACE_M (0x3ULL << ICE_FXD_FLTR_QW0_FD_SPACE_S) @@ -128,6 +129,7 @@ struct ice_fltr_desc { #define ICE_FXD_FLTR_QW1_FDID_PRI_S 25 #define ICE_FXD_FLTR_QW1_FDID_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_FDID_PRI_S) #define ICE_FXD_FLTR_QW1_FDID_PRI_ONE 0x1ULL +#define ICE_FXD_FLTR_QW1_FDID_PRI_THREE 0x3ULL #define ICE_FXD_FLTR_QW1_FDID_MDID_S 28 #define ICE_FXD_FLTR_QW1_FDID_MDID_M (0xFULL << ICE_FXD_FLTR_QW1_FDID_MDID_S) @@ -138,6 +140,26 @@ struct ice_fltr_desc { (0xFFFFFFFFULL << ICE_FXD_FLTR_QW1_FDID_S) #define ICE_FXD_FLTR_QW1_FDID_ZERO 0x0ULL +/* definition for FD filter programming status descriptor WB format */ +#define ICE_FXD_FLTR_WB_QW1_DD_S 0 +#define ICE_FXD_FLTR_WB_QW1_DD_M (0x1ULL << ICE_FXD_FLTR_WB_QW1_DD_S) +#define ICE_FXD_FLTR_WB_QW1_DD_YES 0x1ULL + +#define ICE_FXD_FLTR_WB_QW1_PROG_ID_S 1 +#define ICE_FXD_FLTR_WB_QW1_PROG_ID_M \ + (0x3ULL << ICE_FXD_FLTR_WB_QW1_PROG_ID_S) +#define ICE_FXD_FLTR_WB_QW1_PROG_ADD 0x0ULL +#define ICE_FXD_FLTR_WB_QW1_PROG_DEL 0x1ULL + +#define ICE_FXD_FLTR_WB_QW1_FAIL_S 4 +#define ICE_FXD_FLTR_WB_QW1_FAIL_M (0x1ULL << ICE_FXD_FLTR_WB_QW1_FAIL_S) +#define ICE_FXD_FLTR_WB_QW1_FAIL_YES 0x1ULL + +#define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S 5 +#define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M \ + (0x1ULL << ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S) +#define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES 0x1ULL + struct ice_rx_ptype_decoded { u32 ptype:10; u32 known:1; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index d13c7fc8fb0a..82e2ce23df3d 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -158,6 +158,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) if (vsi->type == ICE_VSI_VF) vsi->vf_id = vf_id; + else + vsi->vf_id = ICE_INVAL_VFID; switch (vsi->type) { case ICE_VSI_PF: @@ -343,6 +345,9 @@ static int ice_vsi_clear(struct ice_vsi *vsi) pf->vsi[vsi->idx] = NULL; if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL) pf->next_vsi = vsi->idx; + if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL && + vsi->vf_id != ICE_INVAL_VFID) + pf->next_vsi = vsi->idx; ice_vsi_free_arrays(vsi); mutex_unlock(&pf->sw_mutex); @@ -382,6 +387,8 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) if (!q_vector->tx.ring && !q_vector->rx.ring) return IRQ_HANDLED; + q_vector->total_events++; + napi_schedule(&q_vector->napi); return IRQ_HANDLED; @@ -419,7 +426,7 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id) vsi->type = vsi_type; vsi->back = pf; - set_bit(__ICE_DOWN, vsi->state); + set_bit(ICE_VSI_DOWN, vsi->state); if (vsi_type == ICE_VSI_VF) ice_vsi_set_num_qs(vsi, vf_id); @@ -454,8 +461,8 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id) goto unlock_pf; } - if (vsi->type == ICE_VSI_CTRL) { - /* Use the last VSI slot as the index for the control VSI */ + if (vsi->type == ICE_VSI_CTRL && vf_id == ICE_INVAL_VFID) { + /* Use the last VSI slot as the index for PF control VSI */ vsi->idx = pf->num_alloc_vsi - 1; pf->ctrl_vsi_idx = vsi->idx; pf->vsi[vsi->idx] = vsi; @@ -468,6 +475,9 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id) pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, pf->next_vsi); } + + if (vsi->type == ICE_VSI_CTRL && vf_id != ICE_INVAL_VFID) + pf->vf[vf_id].ctrl_vsi_idx = vsi->idx; goto unlock_pf; err_rings: @@ -506,7 +516,7 @@ static int ice_alloc_fd_res(struct ice_vsi *vsi) if (!b_val) return -EPERM; - if (vsi->type != ICE_VSI_PF) + if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF)) return -EPERM; if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) @@ -517,6 +527,13 @@ static int ice_alloc_fd_res(struct ice_vsi *vsi) /* each VSI gets same "best_effort" quota */ vsi->num_bfltr = b_val; + if (vsi->type == ICE_VSI_VF) { + vsi->num_gfltr = 0; + + /* each VSI gets same "best_effort" quota */ + vsi->num_bfltr = b_val; + } + return 0; } @@ -729,11 +746,10 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) */ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) { - u16 offset = 0, qmap = 0, tx_count = 0; + u16 offset = 0, qmap = 0, tx_count = 0, pow = 0; + u16 num_txq_per_tc, num_rxq_per_tc; u16 qcount_tx = vsi->alloc_txq; u16 qcount_rx = vsi->alloc_rxq; - u16 tx_numq_tc, rx_numq_tc; - u16 pow = 0, max_rss = 0; bool ena_tc0 = false; u8 netdev_tc = 0; int i; @@ -751,12 +767,15 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) vsi->tc_cfg.ena_tc |= 1; } - rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc; - if (!rx_numq_tc) - rx_numq_tc = 1; - tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc; - if (!tx_numq_tc) - tx_numq_tc = 1; + num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC); + if (!num_rxq_per_tc) + num_rxq_per_tc = 1; + num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc; + if (!num_txq_per_tc) + num_txq_per_tc = 1; + + /* find the (rounded up) power-of-2 of qcount */ + pow = (u16)order_base_2(num_rxq_per_tc); /* TC mapping is a function of the number of Rx queues assigned to the * VSI for each traffic class and the offset of these queues. @@ -769,26 +788,6 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) * * Setup number and offset of Rx queues for all TCs for the VSI */ - - qcount_rx = rx_numq_tc; - - /* qcount will change if RSS is enabled */ - if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) { - if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) { - if (vsi->type == ICE_VSI_PF) - max_rss = ICE_MAX_LG_RSS_QS; - else - max_rss = ICE_MAX_RSS_QS_PER_VF; - qcount_rx = min_t(u16, rx_numq_tc, max_rss); - if (!vsi->req_rxq) - qcount_rx = min_t(u16, qcount_rx, - vsi->rss_size); - } - } - - /* find the (rounded up) power-of-2 of qcount */ - pow = (u16)order_base_2(qcount_rx); - ice_for_each_traffic_class(i) { if (!(vsi->tc_cfg.ena_tc & BIT(i))) { /* TC is not enabled */ @@ -802,16 +801,16 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) /* TC is enabled */ vsi->tc_cfg.tc_info[i].qoffset = offset; - vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx; - vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc; + vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc; + vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc; vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & ICE_AQ_VSI_TC_Q_OFFSET_M) | ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M); - offset += qcount_rx; - tx_count += tx_numq_tc; + offset += num_rxq_per_tc; + tx_count += num_txq_per_tc; ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); } @@ -824,7 +823,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) if (offset) vsi->num_rxq = offset; else - vsi->num_rxq = qcount_rx; + vsi->num_rxq = num_rxq_per_tc; vsi->num_txq = tx_count; @@ -856,7 +855,8 @@ static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) u8 dflt_q_group, dflt_q_prio; u16 dflt_q, report_q, val; - if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL) + if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL && + vsi->type != ICE_VSI_VF) return; val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID; @@ -1179,7 +1179,24 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) num_q_vectors = vsi->num_q_vectors; /* reserve slots from OS requested IRQs */ - base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, vsi->idx); + if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) { + struct ice_vf *vf; + int i; + + ice_for_each_vf(pf, i) { + vf = &pf->vf[i]; + if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI) { + base = pf->vsi[vf->ctrl_vsi_idx]->base_vector; + break; + } + } + if (i == pf->num_alloc_vfs) + base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, + ICE_RES_VF_CTRL_VEC_ID); + } else { + base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, + vsi->idx); + } if (base < 0) { dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n", @@ -1296,14 +1313,13 @@ err_out: * LUT, while in the event of enable request for RSS, it will reconfigure RSS * LUT. */ -int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) +void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) { - int err = 0; u8 *lut; lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) - return -ENOMEM; + return; if (ena) { if (vsi->rss_lut_user) @@ -1313,9 +1329,8 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) vsi->rss_size); } - err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size); + ice_set_rss_lut(vsi, lut, vsi->rss_table_size); kfree(lut); - return err; } /** @@ -1324,12 +1339,10 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) */ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) { - struct ice_aqc_get_set_rss_keys *key; struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; - int err = 0; - u8 *lut; + u8 *lut, *key; + int err; dev = ice_pf_to_dev(pf); vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq); @@ -1343,37 +1356,26 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) else ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); - status = ice_aq_set_rss_lut(&pf->hw, vsi->idx, vsi->rss_lut_type, lut, - vsi->rss_table_size); - - if (status) { - dev_err(dev, "set_rss_lut failed, error %s\n", - ice_stat_str(status)); - err = -EIO; + err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size); + if (err) { + dev_err(dev, "set_rss_lut failed, error %d\n", err); goto ice_vsi_cfg_rss_exit; } - key = kzalloc(sizeof(*key), GFP_KERNEL); + key = kzalloc(ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE, GFP_KERNEL); if (!key) { err = -ENOMEM; goto ice_vsi_cfg_rss_exit; } if (vsi->rss_hkey_user) - memcpy(key, - (struct ice_aqc_get_set_rss_keys *)vsi->rss_hkey_user, - ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); + memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); else - netdev_rss_key_fill((void *)key, - ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); + netdev_rss_key_fill((void *)key, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); - status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key); - - if (status) { - dev_err(dev, "set_rss_key failed, error %s\n", - ice_stat_str(status)); - err = -EIO; - } + err = ice_set_rss_key(vsi, key); + if (err) + dev_err(dev, "set_rss_key failed, error %d\n", err); kfree(key); ice_vsi_cfg_rss_exit: @@ -1502,13 +1504,13 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) */ bool ice_pf_state_is_nominal(struct ice_pf *pf) { - DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 }; + DECLARE_BITMAP(check_bits, ICE_STATE_NBITS) = { 0 }; if (!pf) return false; - bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS); - if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS)) + bitmap_set(check_bits, 0, ICE_STATE_NOMINAL_CHECK_BITS); + if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS)) return false; return true; @@ -1773,7 +1775,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi) * This function converts a decimal interrupt rate limit in usecs to the format * expected by firmware. */ -u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) +static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) { u32 val = intrl / gran; @@ -1783,6 +1785,51 @@ u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) } /** + * ice_write_intrl - write throttle rate limit to interrupt specific register + * @q_vector: pointer to interrupt specific structure + * @intrl: throttle rate limit in microseconds to write + */ +void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl) +{ + struct ice_hw *hw = &q_vector->vsi->back->hw; + + wr32(hw, GLINT_RATE(q_vector->reg_idx), + ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25)); +} + +/** + * __ice_write_itr - write throttle rate to register + * @q_vector: pointer to interrupt data structure + * @rc: pointer to ring container + * @itr: throttle rate in microseconds to write + */ +static void __ice_write_itr(struct ice_q_vector *q_vector, + struct ice_ring_container *rc, u16 itr) +{ + struct ice_hw *hw = &q_vector->vsi->back->hw; + + wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), + ITR_REG_ALIGN(itr) >> ICE_ITR_GRAN_S); +} + +/** + * ice_write_itr - write throttle rate to queue specific register + * @rc: pointer to ring container + * @itr: throttle rate in microseconds to write + */ +void ice_write_itr(struct ice_ring_container *rc, u16 itr) +{ + struct ice_q_vector *q_vector; + + if (!rc->ring) + return; + + q_vector = rc->ring->q_vector; + + __ice_write_itr(q_vector, rc, itr); +} + +/** * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW * @vsi: the VSI being configured * @@ -1802,9 +1849,6 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi) ice_cfg_itr(hw, q_vector); - wr32(hw, GLINT_RATE(reg_idx), - ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran)); - /* Both Transmit Queue Interrupt Cause Control register * and Receive Queue Interrupt Cause control register * expects MSIX_INDX field to be the vector index @@ -2308,7 +2352,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, struct ice_vsi *vsi; int ret, i; - if (vsi_type == ICE_VSI_VF) + if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL) vsi = ice_vsi_alloc(pf, vsi_type, vf_id); else vsi = ice_vsi_alloc(pf, vsi_type, ICE_INVAL_VFID); @@ -2323,7 +2367,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, if (vsi->type == ICE_VSI_PF) vsi->ethtype = ETH_P_PAUSE; - if (vsi->type == ICE_VSI_VF) + if (vsi->type == ICE_VSI_VF || vsi->type == ICE_VSI_CTRL) vsi->vf_id = vf_id; ice_alloc_fd_res(vsi); @@ -2492,11 +2536,10 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi) for (i = 0; i < vsi->num_q_vectors; i++) { struct ice_q_vector *q_vector = vsi->q_vectors[i]; - u16 reg_idx = q_vector->reg_idx; - wr32(hw, GLINT_ITR(ICE_IDX_ITR0, reg_idx), 0); - wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0); + ice_write_intrl(q_vector, 0); for (q = 0; q < q_vector->num_ring_tx; q++) { + ice_write_itr(&q_vector->tx, 0); wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); if (ice_is_xdp_ena_vsi(vsi)) { u32 xdp_txq = txq + vsi->num_xdp_txq; @@ -2507,6 +2550,7 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi) } for (q = 0; q < q_vector->num_ring_rx; q++) { + ice_write_itr(&q_vector->rx, 0); wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); rxq++; } @@ -2593,7 +2637,7 @@ void ice_vsi_free_rx_rings(struct ice_vsi *vsi) */ void ice_vsi_close(struct ice_vsi *vsi) { - if (!test_and_set_bit(__ICE_DOWN, vsi->state)) + if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) ice_down(vsi); ice_vsi_free_irq(vsi); @@ -2610,10 +2654,10 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked) { int err = 0; - if (!test_bit(__ICE_NEEDS_RESTART, vsi->state)) + if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state)) return 0; - clear_bit(__ICE_NEEDS_RESTART, vsi->state); + clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state); if (vsi->netdev && vsi->type == ICE_VSI_PF) { if (netif_running(vsi->netdev)) { @@ -2639,10 +2683,10 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked) */ void ice_dis_vsi(struct ice_vsi *vsi, bool locked) { - if (test_bit(__ICE_DOWN, vsi->state)) + if (test_bit(ICE_VSI_DOWN, vsi->state)) return; - set_bit(__ICE_NEEDS_RESTART, vsi->state); + set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); if (vsi->type == ICE_VSI_PF && vsi->netdev) { if (netif_running(vsi->netdev)) { @@ -2752,11 +2796,14 @@ int ice_vsi_release(struct ice_vsi *vsi) * PF that is running the work queue items currently. This is done to * avoid check_flush_dependency() warning on this wq */ - if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) { + if (vsi->netdev && !ice_is_reset_in_progress(pf->state) && + (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state))) { unregister_netdev(vsi->netdev); - ice_devlink_destroy_port(vsi); + clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); } + ice_devlink_destroy_port(vsi); + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) ice_rss_clean(vsi); @@ -2770,7 +2817,24 @@ int ice_vsi_release(struct ice_vsi *vsi) * many interrupts each VF needs. SR-IOV MSIX resources are also * cleared in the same manner. */ - if (vsi->type != ICE_VSI_VF) { + if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) { + struct ice_vf *vf; + int i; + + ice_for_each_vf(pf, i) { + vf = &pf->vf[i]; + if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI) + break; + } + if (i == pf->num_alloc_vfs) { + /* No other VFs left that have control VSI, reclaim SW + * interrupts back to the common pool + */ + ice_free_res(pf->irq_tracker, vsi->base_vector, + ICE_RES_VF_CTRL_VEC_ID); + pf->num_avail_sw_msix += vsi->num_q_vectors; + } + } else if (vsi->type != ICE_VSI_VF) { /* reclaim SW interrupts back to the common pool */ ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); pf->num_avail_sw_msix += vsi->num_q_vectors; @@ -2794,10 +2858,16 @@ int ice_vsi_release(struct ice_vsi *vsi) ice_vsi_delete(vsi); ice_vsi_free_q_vectors(vsi); - /* make sure unregister_netdev() was called by checking __ICE_DOWN */ - if (vsi->netdev && test_bit(__ICE_DOWN, vsi->state)) { - free_netdev(vsi->netdev); - vsi->netdev = NULL; + if (vsi->netdev) { + if (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state)) { + unregister_netdev(vsi->netdev); + clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); + } + if (test_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state)) { + free_netdev(vsi->netdev); + vsi->netdev = NULL; + clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); + } } if (vsi->type == ICE_VSI_VF && @@ -2818,39 +2888,6 @@ int ice_vsi_release(struct ice_vsi *vsi) } /** - * ice_vsi_rebuild_update_coalesce - set coalesce for a q_vector - * @q_vector: pointer to q_vector which is being updated - * @coalesce: pointer to array of struct with stored coalesce - * - * Set coalesce param in q_vector and update these parameters in HW. - */ -static void -ice_vsi_rebuild_update_coalesce(struct ice_q_vector *q_vector, - struct ice_coalesce_stored *coalesce) -{ - struct ice_ring_container *rx_rc = &q_vector->rx; - struct ice_ring_container *tx_rc = &q_vector->tx; - struct ice_hw *hw = &q_vector->vsi->back->hw; - - tx_rc->itr_setting = coalesce->itr_tx; - rx_rc->itr_setting = coalesce->itr_rx; - - /* dynamic ITR values will be updated during Tx/Rx */ - if (!ITR_IS_DYNAMIC(tx_rc->itr_setting)) - wr32(hw, GLINT_ITR(tx_rc->itr_idx, q_vector->reg_idx), - ITR_REG_ALIGN(tx_rc->itr_setting) >> - ICE_ITR_GRAN_S); - if (!ITR_IS_DYNAMIC(rx_rc->itr_setting)) - wr32(hw, GLINT_ITR(rx_rc->itr_idx, q_vector->reg_idx), - ITR_REG_ALIGN(rx_rc->itr_setting) >> - ICE_ITR_GRAN_S); - - q_vector->intrl = coalesce->intrl; - wr32(hw, GLINT_RATE(q_vector->reg_idx), - ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran)); -} - -/** * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors * @vsi: VSI connected with q_vectors * @coalesce: array of struct with stored coalesce @@ -2869,6 +2906,11 @@ ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi, coalesce[i].itr_tx = q_vector->tx.itr_setting; coalesce[i].itr_rx = q_vector->rx.itr_setting; coalesce[i].intrl = q_vector->intrl; + + if (i < vsi->num_txq) + coalesce[i].tx_valid = true; + if (i < vsi->num_rxq) + coalesce[i].rx_valid = true; } return vsi->num_q_vectors; @@ -2888,22 +2930,75 @@ static void ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi, struct ice_coalesce_stored *coalesce, int size) { + struct ice_ring_container *rc; int i; if ((size && !coalesce) || !vsi) return; - for (i = 0; i < size && i < vsi->num_q_vectors; i++) - ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i], - &coalesce[i]); + /* There are a couple of cases that have to be handled here: + * 1. The case where the number of queue vectors stays the same, but + * the number of Tx or Rx rings changes (the first for loop) + * 2. The case where the number of queue vectors increased (the + * second for loop) + */ + for (i = 0; i < size && i < vsi->num_q_vectors; i++) { + /* There are 2 cases to handle here and they are the same for + * both Tx and Rx: + * if the entry was valid previously (coalesce[i].[tr]x_valid + * and the loop variable is less than the number of rings + * allocated, then write the previous values + * + * if the entry was not valid previously, but the number of + * rings is less than are allocated (this means the number of + * rings increased from previously), then write out the + * values in the first element + * + * Also, always write the ITR, even if in ITR_IS_DYNAMIC + * as there is no harm because the dynamic algorithm + * will just overwrite. + */ + if (i < vsi->alloc_rxq && coalesce[i].rx_valid) { + rc = &vsi->q_vectors[i]->rx; + rc->itr_setting = coalesce[i].itr_rx; + ice_write_itr(rc, rc->itr_setting); + } else if (i < vsi->alloc_rxq) { + rc = &vsi->q_vectors[i]->rx; + rc->itr_setting = coalesce[0].itr_rx; + ice_write_itr(rc, rc->itr_setting); + } + + if (i < vsi->alloc_txq && coalesce[i].tx_valid) { + rc = &vsi->q_vectors[i]->tx; + rc->itr_setting = coalesce[i].itr_tx; + ice_write_itr(rc, rc->itr_setting); + } else if (i < vsi->alloc_txq) { + rc = &vsi->q_vectors[i]->tx; + rc->itr_setting = coalesce[0].itr_tx; + ice_write_itr(rc, rc->itr_setting); + } + + vsi->q_vectors[i]->intrl = coalesce[i].intrl; + ice_write_intrl(vsi->q_vectors[i], coalesce[i].intrl); + } - /* number of q_vectors increased, so assume coalesce settings were - * changed globally (i.e. ethtool -C eth0 instead of per-queue) and use - * the previous settings from q_vector 0 for all of the new q_vectors + /* the number of queue vectors increased so write whatever is in + * the first element */ - for (; i < vsi->num_q_vectors; i++) - ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i], - &coalesce[0]); + for (; i < vsi->num_q_vectors; i++) { + /* transmit */ + rc = &vsi->q_vectors[i]->tx; + rc->itr_setting = coalesce[0].itr_tx; + ice_write_itr(rc, rc->itr_setting); + + /* receive */ + rc = &vsi->q_vectors[i]->rx; + rc->itr_setting = coalesce[0].itr_rx; + ice_write_itr(rc, rc->itr_setting); + + vsi->q_vectors[i]->intrl = coalesce[0].intrl; + ice_write_intrl(vsi->q_vectors[i], coalesce[0].intrl); + } } /** @@ -2919,6 +3014,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) struct ice_coalesce_stored *coalesce; int prev_num_q_vectors = 0; struct ice_vf *vf = NULL; + enum ice_vsi_type vtype; enum ice_status status; struct ice_pf *pf; int ret, i; @@ -2927,14 +3023,17 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) return -EINVAL; pf = vsi->back; - if (vsi->type == ICE_VSI_VF) + vtype = vsi->type; + if (vtype == ICE_VSI_VF) vf = &pf->vf[vsi->vf_id]; coalesce = kcalloc(vsi->num_q_vectors, sizeof(struct ice_coalesce_stored), GFP_KERNEL); - if (coalesce) - prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, - coalesce); + if (!coalesce) + return -ENOMEM; + + prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce); + ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); ice_vsi_free_q_vectors(vsi); @@ -2943,7 +3042,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) * many interrupts each VF needs. SR-IOV MSIX resources are also * cleared in the same manner. */ - if (vsi->type != ICE_VSI_VF) { + if (vtype != ICE_VSI_VF) { /* reclaim SW interrupts back to the common pool */ ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); pf->num_avail_sw_msix += vsi->num_q_vectors; @@ -2958,7 +3057,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) ice_vsi_put_qs(vsi); ice_vsi_clear_rings(vsi); ice_vsi_free_arrays(vsi); - if (vsi->type == ICE_VSI_VF) + if (vtype == ICE_VSI_VF) ice_vsi_set_num_qs(vsi, vf->vf_id); else ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); @@ -2977,7 +3076,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) if (ret < 0) goto err_vsi; - switch (vsi->type) { + switch (vtype) { case ICE_VSI_CTRL: case ICE_VSI_PF: ret = ice_vsi_alloc_q_vectors(vsi); @@ -3004,7 +3103,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) goto err_vectors; } /* ICE_VSI_CTRL does not need RSS so skip RSS processing */ - if (vsi->type != ICE_VSI_CTRL) + if (vtype != ICE_VSI_CTRL) /* Do not exit if configuring RSS had an issue, at * least receive traffic on first queue. Hence no * need to capture return value @@ -3066,7 +3165,7 @@ err_rings: } err_vsi: ice_vsi_clear(vsi); - set_bit(__ICE_RESET_FAILED, pf->state); + set_bit(ICE_RESET_FAILED, pf->state); kfree(coalesce); return ret; } @@ -3077,10 +3176,10 @@ err_vsi: */ bool ice_is_reset_in_progress(unsigned long *state) { - return test_bit(__ICE_RESET_OICR_RECV, state) || - test_bit(__ICE_PFR_REQ, state) || - test_bit(__ICE_CORER_REQ, state) || - test_bit(__ICE_GLOBR_REQ, state); + return test_bit(ICE_RESET_OICR_RECV, state) || + test_bit(ICE_PFR_REQ, state) || + test_bit(ICE_CORER_REQ, state) || + test_bit(ICE_GLOBR_REQ, state); } #ifdef CONFIG_DCB @@ -3168,20 +3267,15 @@ out: /** * ice_update_ring_stats - Update ring statistics * @ring: ring to update - * @cont: used to increment per-vector counters * @pkts: number of processed packets * @bytes: number of processed bytes * * This function assumes that caller has acquired a u64_stats_sync lock. */ -static void -ice_update_ring_stats(struct ice_ring *ring, struct ice_ring_container *cont, - u64 pkts, u64 bytes) +static void ice_update_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes) { ring->stats.bytes += bytes; ring->stats.pkts += pkts; - cont->total_bytes += bytes; - cont->total_pkts += pkts; } /** @@ -3193,7 +3287,7 @@ ice_update_ring_stats(struct ice_ring *ring, struct ice_ring_container *cont, void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes) { u64_stats_update_begin(&tx_ring->syncp); - ice_update_ring_stats(tx_ring, &tx_ring->q_vector->tx, pkts, bytes); + ice_update_ring_stats(tx_ring, pkts, bytes); u64_stats_update_end(&tx_ring->syncp); } @@ -3206,7 +3300,7 @@ void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes) void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes) { u64_stats_update_begin(&rx_ring->syncp); - ice_update_ring_stats(rx_ring, &rx_ring->q_vector->rx, pkts, bytes); + ice_update_ring_stats(rx_ring, pkts, bytes); u64_stats_update_end(&rx_ring->syncp); } @@ -3348,3 +3442,40 @@ int ice_clear_dflt_vsi(struct ice_sw *sw) return 0; } + +/** + * ice_set_link - turn on/off physical link + * @vsi: VSI to modify physical link on + * @ena: turn on/off physical link + */ +int ice_set_link(struct ice_vsi *vsi, bool ena) +{ + struct device *dev = ice_pf_to_dev(vsi->back); + struct ice_port_info *pi = vsi->port_info; + struct ice_hw *hw = pi->hw; + enum ice_status status; + + if (vsi->type != ICE_VSI_PF) + return -EINVAL; + + status = ice_aq_set_link_restart_an(pi, ena, NULL); + + /* if link is owned by manageability, FW will return ICE_AQ_RC_EMODE. + * this is not a fatal error, so print a warning message and return + * a success code. Return an error if FW returns an error code other + * than ICE_AQ_RC_EMODE + */ + if (status == ICE_ERR_AQ_ERROR) { + if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) + dev_warn(dev, "can't set link to %s, err %s aq_err %s. not fatal, continuing\n", + (ena ? "ON" : "OFF"), ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + } else if (status) { + dev_err(dev, "can't set link to %s, err %s aq_err %s\n", + (ena ? "ON" : "OFF"), ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + return -EIO; + } + + return 0; +} diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 3da17895a2b1..511c2316c40c 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -45,6 +45,8 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc); void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create); +int ice_set_link(struct ice_vsi *vsi, bool ena); + #ifdef CONFIG_DCB int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc); #endif /* CONFIG_DCB */ @@ -83,7 +85,7 @@ void ice_vsi_free_rx_rings(struct ice_vsi *vsi); void ice_vsi_free_tx_rings(struct ice_vsi *vsi); -int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena); +void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena); void ice_update_tx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes); @@ -93,7 +95,8 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi); int ice_status_to_errno(enum ice_status err); -u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran); +void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl); +void ice_write_itr(struct ice_ring_container *rc, u16 itr); enum ice_status ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index d821c687f239..4ee85a217c6f 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -84,7 +84,7 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf) break; } - if (!vsi || test_bit(__ICE_DOWN, vsi->state)) + if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state)) return; if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) @@ -140,21 +140,10 @@ static int ice_init_mac_fltr(struct ice_pf *pf) perm_addr = vsi->port_info->mac.perm_addr; status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); - if (!status) - return 0; - - /* We aren't useful with no MAC filters, so unregister if we - * had an error - */ - if (vsi->netdev->reg_state == NETREG_REGISTERED) { - dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %s. Unregistering device\n", - ice_stat_str(status)); - unregister_netdev(vsi->netdev); - free_netdev(vsi->netdev); - vsi->netdev = NULL; - } + if (status) + return -EIO; - return -EIO; + return 0; } /** @@ -209,9 +198,9 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) */ static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) { - return test_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags) || - test_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags) || - test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); + return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) || + test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) || + test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); } /** @@ -268,7 +257,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) if (!vsi->netdev) return -EINVAL; - while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state)) + while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) usleep_range(1000, 2000); changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; @@ -278,9 +267,9 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) INIT_LIST_HEAD(&vsi->tmp_unsync_list); if (ice_vsi_fltr_changed(vsi)) { - clear_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); - clear_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); - clear_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); + clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); + clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); + clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); /* grab the netdev's addr_list_lock */ netif_addr_lock_bh(netdev); @@ -318,7 +307,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) * space reserved for promiscuous filters. */ if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC && - !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC, + !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC, vsi->state)) { promisc_forced_on = true; netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n", @@ -361,8 +350,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) } if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || - test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) { - clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags); + test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) { + clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); if (vsi->current_netdev_flags & IFF_PROMISC) { /* Apply Rx filter rule to get traffic from wire */ if (!ice_is_dflt_vsi_in_use(pf->first_sw)) { @@ -395,14 +384,14 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) goto exit; out_promisc: - set_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags); + set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); goto exit; out: /* if something went wrong then set the changed flag so we try again */ - set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); - set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); + set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); + set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); exit: - clear_bit(__ICE_CFG_BUSY, vsi->state); + clear_bit(ICE_CFG_BUSY, vsi->state); return err; } @@ -447,7 +436,6 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++) pf->vf_agg_node[node].num_vsis = 0; - } /** @@ -463,7 +451,7 @@ ice_prepare_for_reset(struct ice_pf *pf) unsigned int i; /* already prepared for reset */ - if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) + if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) return; /* Notify VFs of impending reset */ @@ -484,7 +472,7 @@ ice_prepare_for_reset(struct ice_pf *pf) ice_shutdown_all_ctrlq(hw); - set_bit(__ICE_PREPARED_FOR_RESET, pf->state); + set_bit(ICE_PREPARED_FOR_RESET, pf->state); } /** @@ -505,12 +493,12 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) /* trigger the reset */ if (ice_reset(hw, reset_type)) { dev_err(dev, "reset %d failed\n", reset_type); - set_bit(__ICE_RESET_FAILED, pf->state); - clear_bit(__ICE_RESET_OICR_RECV, pf->state); - clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); - clear_bit(__ICE_PFR_REQ, pf->state); - clear_bit(__ICE_CORER_REQ, pf->state); - clear_bit(__ICE_GLOBR_REQ, pf->state); + set_bit(ICE_RESET_FAILED, pf->state); + clear_bit(ICE_RESET_OICR_RECV, pf->state); + clear_bit(ICE_PREPARED_FOR_RESET, pf->state); + clear_bit(ICE_PFR_REQ, pf->state); + clear_bit(ICE_CORER_REQ, pf->state); + clear_bit(ICE_GLOBR_REQ, pf->state); return; } @@ -521,8 +509,8 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) if (reset_type == ICE_RESET_PFR) { pf->pfr_count++; ice_rebuild(pf, reset_type); - clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); - clear_bit(__ICE_PFR_REQ, pf->state); + clear_bit(ICE_PREPARED_FOR_RESET, pf->state); + clear_bit(ICE_PFR_REQ, pf->state); ice_reset_all_vfs(pf, true); } } @@ -538,20 +526,20 @@ static void ice_reset_subtask(struct ice_pf *pf) /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an * OICR interrupt. The OICR handler (ice_misc_intr) determines what type * of reset is pending and sets bits in pf->state indicating the reset - * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set + * type and ICE_RESET_OICR_RECV. So, if the latter bit is set * prepare for pending reset if not already (for PF software-initiated * global resets the software should already be prepared for it as - * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated + * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated * by firmware or software on other PFs, that bit is not set so prepare * for the reset now), poll for reset done, rebuild and return. */ - if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) { + if (test_bit(ICE_RESET_OICR_RECV, pf->state)) { /* Perform the largest reset requested */ - if (test_and_clear_bit(__ICE_CORER_RECV, pf->state)) + if (test_and_clear_bit(ICE_CORER_RECV, pf->state)) reset_type = ICE_RESET_CORER; - if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state)) + if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state)) reset_type = ICE_RESET_GLOBR; - if (test_and_clear_bit(__ICE_EMPR_RECV, pf->state)) + if (test_and_clear_bit(ICE_EMPR_RECV, pf->state)) reset_type = ICE_RESET_EMPR; /* return if no valid reset type requested */ if (reset_type == ICE_RESET_INVAL) @@ -560,7 +548,7 @@ static void ice_reset_subtask(struct ice_pf *pf) /* make sure we are ready to rebuild */ if (ice_check_reset(&pf->hw)) { - set_bit(__ICE_RESET_FAILED, pf->state); + set_bit(ICE_RESET_FAILED, pf->state); } else { /* done with reset. start rebuild */ pf->hw.reset_ongoing = false; @@ -568,11 +556,11 @@ static void ice_reset_subtask(struct ice_pf *pf) /* clear bit to resume normal operations, but * ICE_NEEDS_RESTART bit is set in case rebuild failed */ - clear_bit(__ICE_RESET_OICR_RECV, pf->state); - clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); - clear_bit(__ICE_PFR_REQ, pf->state); - clear_bit(__ICE_CORER_REQ, pf->state); - clear_bit(__ICE_GLOBR_REQ, pf->state); + clear_bit(ICE_RESET_OICR_RECV, pf->state); + clear_bit(ICE_PREPARED_FOR_RESET, pf->state); + clear_bit(ICE_PFR_REQ, pf->state); + clear_bit(ICE_CORER_REQ, pf->state); + clear_bit(ICE_GLOBR_REQ, pf->state); ice_reset_all_vfs(pf, true); } @@ -580,19 +568,19 @@ static void ice_reset_subtask(struct ice_pf *pf) } /* No pending resets to finish processing. Check for new resets */ - if (test_bit(__ICE_PFR_REQ, pf->state)) + if (test_bit(ICE_PFR_REQ, pf->state)) reset_type = ICE_RESET_PFR; - if (test_bit(__ICE_CORER_REQ, pf->state)) + if (test_bit(ICE_CORER_REQ, pf->state)) reset_type = ICE_RESET_CORER; - if (test_bit(__ICE_GLOBR_REQ, pf->state)) + if (test_bit(ICE_GLOBR_REQ, pf->state)) reset_type = ICE_RESET_GLOBR; /* If no valid reset type requested just return */ if (reset_type == ICE_RESET_INVAL) return; /* reset if not already down or busy */ - if (!test_bit(__ICE_DOWN, pf->state) && - !test_bit(__ICE_CFG_BUSY, pf->state)) { + if (!test_bit(ICE_DOWN, pf->state) && + !test_bit(ICE_CFG_BUSY, pf->state)) { ice_do_reset(pf, reset_type); } } @@ -609,7 +597,7 @@ static void ice_print_topo_conflict(struct ice_vsi *vsi) case ICE_AQ_LINK_TOPO_UNREACH_PRT: case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT: case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA: - netdev_info(vsi->netdev, "Possible mis-configuration of the Ethernet port detected, please use the Intel(R) Ethernet Port Configuration Tool application to address the issue.\n"); + netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n"); break; case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA: netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); @@ -731,7 +719,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) } status = ice_aq_get_phy_caps(vsi->port_info, false, - ICE_AQC_REPORT_SW_CFG, caps, NULL); + ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); if (status) netdev_info(vsi->netdev, "Get phy capability failed.\n"); @@ -764,7 +752,7 @@ static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) if (!vsi) return; - if (test_bit(__ICE_DOWN, vsi->state) || !vsi->netdev) + if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev) return; if (vsi->type == ICE_VSI_PF) { @@ -884,10 +872,10 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, { struct device *dev = ice_pf_to_dev(pf); struct ice_phy_info *phy_info; + enum ice_status status; struct ice_vsi *vsi; u16 old_link_speed; bool old_link; - int result; phy_info = &pi->phy; phy_info->link_info_old = phy_info->link_info; @@ -898,10 +886,11 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, /* update the link info structures and re-enable link events, * don't bail on failure due to other book keeping needed */ - result = ice_update_link_info(pi); - if (result) - dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n", - pi->lport); + status = ice_update_link_info(pi); + if (status) + dev_dbg(dev, "Failed to update link status on port %d, err %s aq_err %s\n", + pi->lport, ice_stat_str(status), + ice_aq_str(pi->hw->adminq.sq_last_status)); /* Check if the link state is up after updating link info, and treat * this event as an UP event since the link is actually UP now. @@ -917,18 +906,12 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { set_bit(ICE_FLAG_NO_MEDIA, pf->flags); - - result = ice_aq_set_link_restart_an(pi, false, NULL); - if (result) { - dev_dbg(dev, "Failed to set link down, VSI %d error %d\n", - vsi->vsi_num, result); - return result; - } + ice_set_link(vsi, false); } /* if the old link up/down and speed is the same as the new */ if (link_up == old_link && link_speed == old_link_speed) - return result; + return 0; if (ice_is_dcb_active(pf)) { if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) @@ -942,7 +925,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, ice_vc_notify_link_state(pf); - return result; + return 0; } /** @@ -954,8 +937,8 @@ static void ice_watchdog_subtask(struct ice_pf *pf) int i; /* if interface is down do nothing */ - if (test_bit(__ICE_DOWN, pf->state) || - test_bit(__ICE_CFG_BUSY, pf->state)) + if (test_bit(ICE_DOWN, pf->state) || + test_bit(ICE_CFG_BUSY, pf->state)) return; /* make sure we don't do these things too often */ @@ -1044,7 +1027,7 @@ struct ice_aq_task { }; /** - * ice_wait_for_aq_event - Wait for an AdminQ event from firmware + * ice_aq_wait_for_event - Wait for an AdminQ event from firmware * @pf: pointer to the PF private structure * @opcode: the opcode to wait for * @timeout: how long to wait, in jiffies @@ -1199,7 +1182,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) u32 oldval, val; /* Do not clean control queue if/when PF reset fails */ - if (test_bit(__ICE_RESET_FAILED, pf->state)) + if (test_bit(ICE_RESET_FAILED, pf->state)) return 0; switch (q_type) { @@ -1210,6 +1193,10 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) case ICE_CTL_Q_MAILBOX: cq = &hw->mailboxq; qtype = "Mailbox"; + /* we are going to try to detect a malicious VF, so set the + * state to begin detection + */ + hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; break; default: dev_warn(dev, "Unknown control queue type 0x%x\n", q_type); @@ -1291,7 +1278,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) ice_vf_lan_overflow_event(pf, &event); break; case ice_mbx_opc_send_msg_to_pf: - ice_vc_process_vf_msg(pf, &event); + if (!ice_is_malicious_vf(pf, &event, i, pending)) + ice_vc_process_vf_msg(pf, &event); break; case ice_aqc_opc_fw_logging: ice_output_fw_log(hw, &event.desc, event.msg_buf); @@ -1334,13 +1322,13 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; - if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) + if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) return; if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) return; - clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); + clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); /* There might be a situation where new messages arrive to a control * queue between processing the last message and clearing the @@ -1361,13 +1349,13 @@ static void ice_clean_mailboxq_subtask(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; - if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state)) + if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state)) return; if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) return; - clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); + clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); if (ice_ctrlq_pending(hw, &hw->mailboxq)) __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); @@ -1383,9 +1371,9 @@ static void ice_clean_mailboxq_subtask(struct ice_pf *pf) */ void ice_service_task_schedule(struct ice_pf *pf) { - if (!test_bit(__ICE_SERVICE_DIS, pf->state) && - !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) && - !test_bit(__ICE_NEEDS_RESTART, pf->state)) + if (!test_bit(ICE_SERVICE_DIS, pf->state) && + !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) && + !test_bit(ICE_NEEDS_RESTART, pf->state)) queue_work(ice_wq, &pf->serv_task); } @@ -1395,32 +1383,32 @@ void ice_service_task_schedule(struct ice_pf *pf) */ static void ice_service_task_complete(struct ice_pf *pf) { - WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state)); + WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state)); /* force memory (pf->state) to sync before next service task */ smp_mb__before_atomic(); - clear_bit(__ICE_SERVICE_SCHED, pf->state); + clear_bit(ICE_SERVICE_SCHED, pf->state); } /** * ice_service_task_stop - stop service task and cancel works * @pf: board private structure * - * Return 0 if the __ICE_SERVICE_DIS bit was not already set, + * Return 0 if the ICE_SERVICE_DIS bit was not already set, * 1 otherwise. */ static int ice_service_task_stop(struct ice_pf *pf) { int ret; - ret = test_and_set_bit(__ICE_SERVICE_DIS, pf->state); + ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); if (pf->serv_tmr.function) del_timer_sync(&pf->serv_tmr); if (pf->serv_task.func) cancel_work_sync(&pf->serv_task); - clear_bit(__ICE_SERVICE_SCHED, pf->state); + clear_bit(ICE_SERVICE_SCHED, pf->state); return ret; } @@ -1432,7 +1420,7 @@ static int ice_service_task_stop(struct ice_pf *pf) */ static void ice_service_task_restart(struct ice_pf *pf) { - clear_bit(__ICE_SERVICE_DIS, pf->state); + clear_bit(ICE_SERVICE_DIS, pf->state); ice_service_task_schedule(pf); } @@ -1465,7 +1453,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) unsigned int i; u32 reg; - if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) { + if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { /* Since the VF MDD event logging is rate limited, check if * there are pending MDD events. */ @@ -1557,7 +1545,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (reg & VP_MDET_TX_PQM_VALID_M) { wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); vf->mdd_tx_events.count++; - set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); + set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", i); @@ -1567,7 +1555,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (reg & VP_MDET_TX_TCLAN_VALID_M) { wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); vf->mdd_tx_events.count++; - set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); + set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", i); @@ -1577,7 +1565,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (reg & VP_MDET_TX_TDPU_VALID_M) { wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); vf->mdd_tx_events.count++; - set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); + set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", i); @@ -1587,7 +1575,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (reg & VP_MDET_RX_VALID_M) { wr32(hw, VP_MDET_RX(i), 0xFFFF); vf->mdd_rx_events.count++; - set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); + set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); if (netif_msg_rx_err(pf)) dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", i); @@ -1642,7 +1630,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) if (!pcaps) return -ENOMEM; - retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, NULL); if (retcode) { dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n", @@ -1702,7 +1690,7 @@ static int ice_init_nvm_phy_type(struct ice_port_info *pi) if (!pcaps) return -ENOMEM; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_NVM_CAP, pcaps, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps, NULL); if (status) { @@ -1748,15 +1736,18 @@ static void ice_init_link_dflt_override(struct ice_port_info *pi) * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings * @pi: port info structure * - * If default override is enabled, initialized the user PHY cfg speed and FEC + * If default override is enabled, initialize the user PHY cfg speed and FEC * settings using the default override mask from the NVM. * * The PHY should only be configured with the default override settings the - * first time media is available. The __ICE_LINK_DEFAULT_OVERRIDE_PENDING state + * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state * is used to indicate that the user PHY cfg default override is initialized * and the PHY has not been configured with the default override settings. The * state is set here, and cleared in ice_configure_phy the first time the PHY is * configured. + * + * This function should be called only if the FW doesn't support default + * configuration mode, as reported by ice_fw_supports_report_dflt_cfg. */ static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi) { @@ -1781,7 +1772,7 @@ static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi) cfg->link_fec_opt = ldo->fec_options; phy->curr_user_fec_req = ICE_FEC_AUTO; - set_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); + set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); } /** @@ -1804,22 +1795,21 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi) struct ice_phy_info *phy = &pi->phy; struct ice_pf *pf = pi->hw->back; enum ice_status status; - struct ice_vsi *vsi; int err = 0; if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) return -EIO; - vsi = ice_get_main_vsi(pf); - if (!vsi) - return -EINVAL; - pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); if (!pcaps) return -ENOMEM; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, - NULL); + if (ice_fw_supports_report_dflt_cfg(pi->hw)) + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, + pcaps, NULL); + else + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + pcaps, NULL); if (status) { dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); err = -EIO; @@ -1829,22 +1819,24 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi) ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg); /* check if lenient mode is supported and enabled */ - if (ice_fw_supports_link_override(&vsi->back->hw) && + if (ice_fw_supports_link_override(pi->hw) && !(pcaps->module_compliance_enforcement & ICE_AQC_MOD_ENFORCE_STRICT_MODE)) { set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); - /* if link default override is enabled, initialize user PHY - * configuration with link default override values + /* if the FW supports default PHY configuration mode, then the driver + * does not have to apply link override settings. If not, + * initialize user PHY configuration with link override values */ - if (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN) { + if (!ice_fw_supports_report_dflt_cfg(pi->hw) && + (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) { ice_init_phy_cfg_dflt_override(pi); goto out; } } - /* if link default override is not enabled, initialize PHY using - * topology with media + /* if link default override is not enabled, set user flow control and + * FEC settings based on what get_phy_caps returned */ phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps, pcaps->link_fec_options); @@ -1852,7 +1844,7 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi) out: phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M; - set_bit(__ICE_PHY_INIT_COMPLETE, pf->state); + set_bit(ICE_PHY_INIT_COMPLETE, pf->state); err_out: kfree(pcaps); return err; @@ -1869,27 +1861,24 @@ err_out: static int ice_configure_phy(struct ice_vsi *vsi) { struct device *dev = ice_pf_to_dev(vsi->back); + struct ice_port_info *pi = vsi->port_info; struct ice_aqc_get_phy_caps_data *pcaps; struct ice_aqc_set_phy_cfg_data *cfg; - struct ice_port_info *pi; + struct ice_phy_info *phy = &pi->phy; + struct ice_pf *pf = vsi->back; enum ice_status status; int err = 0; - pi = vsi->port_info; - if (!pi) - return -EINVAL; - /* Ensure we have media as we cannot configure a medialess port */ - if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) + if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) return -EPERM; ice_print_topo_conflict(vsi); - if (vsi->port_info->phy.link_info.topo_media_conflict == - ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) + if (phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) return -EPERM; - if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) + if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) return ice_force_phys_link_state(vsi, true); pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); @@ -1897,7 +1886,7 @@ static int ice_configure_phy(struct ice_vsi *vsi) return -ENOMEM; /* Get current PHY config */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, NULL); if (status) { dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n", @@ -1910,15 +1899,19 @@ static int ice_configure_phy(struct ice_vsi *vsi) * there's nothing to do */ if (pcaps->caps & ICE_AQC_PHY_EN_LINK && - ice_phy_caps_equals_cfg(pcaps, &pi->phy.curr_user_phy_cfg)) + ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg)) goto done; /* Use PHY topology as baseline for configuration */ memset(pcaps, 0, sizeof(*pcaps)); - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, - NULL); + if (ice_fw_supports_report_dflt_cfg(pi->hw)) + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, + pcaps, NULL); + else + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + pcaps, NULL); if (status) { - dev_err(dev, "Failed to get PHY topology, VSI %d error %s\n", + dev_err(dev, "Failed to get PHY caps, VSI %d error %s\n", vsi->vsi_num, ice_stat_str(status)); err = -EIO; goto done; @@ -1935,10 +1928,10 @@ static int ice_configure_phy(struct ice_vsi *vsi) /* Speed - If default override pending, use curr_user_phy_cfg set in * ice_init_phy_user_cfg_ldo. */ - if (test_and_clear_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING, + if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, vsi->back->state)) { - cfg->phy_type_low = pi->phy.curr_user_phy_cfg.phy_type_low; - cfg->phy_type_high = pi->phy.curr_user_phy_cfg.phy_type_high; + cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low; + cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high; } else { u64 phy_low = 0, phy_high = 0; @@ -1956,7 +1949,7 @@ static int ice_configure_phy(struct ice_vsi *vsi) } /* FEC */ - ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req); + ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req); /* Can't provide what was requested; use PHY capabilities */ if (cfg->link_fec_opt != @@ -1968,12 +1961,12 @@ static int ice_configure_phy(struct ice_vsi *vsi) /* Flow Control - always supported; no need to check against * capabilities */ - ice_cfg_phy_fc(pi, cfg, pi->phy.curr_user_fc_req); + ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req); /* Enable link and link update */ cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; - status = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); + status = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); if (status) { dev_err(dev, "Failed to set phy config, VSI %d error %s\n", vsi->vsi_num, ice_stat_str(status)); @@ -2014,13 +2007,13 @@ static void ice_check_media_subtask(struct ice_pf *pf) return; if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { - if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) + if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) ice_init_phy_user_cfg(pi); /* PHY settings are reset on media insertion, reconfigure * PHY to preserve settings. */ - if (test_bit(__ICE_DOWN, vsi->state) && + if (test_bit(ICE_VSI_DOWN, vsi->state) && test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) return; @@ -2050,8 +2043,8 @@ static void ice_service_task(struct work_struct *work) /* bail if a reset/recovery cycle is pending or rebuild failed */ if (ice_is_reset_in_progress(pf->state) || - test_bit(__ICE_SUSPENDED, pf->state) || - test_bit(__ICE_NEEDS_RESTART, pf->state)) { + test_bit(ICE_SUSPENDED, pf->state) || + test_bit(ICE_NEEDS_RESTART, pf->state)) { ice_service_task_complete(pf); return; } @@ -2071,7 +2064,9 @@ static void ice_service_task(struct work_struct *work) ice_process_vflr_event(pf); ice_clean_mailboxq_subtask(pf); ice_sync_arfs_fltrs(pf); - /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ + ice_flush_fdir_ctx(pf); + + /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */ ice_service_task_complete(pf); /* If the tasks have taken longer than one service timer period @@ -2079,10 +2074,11 @@ static void ice_service_task(struct work_struct *work) * schedule the service task now. */ if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || - test_bit(__ICE_MDD_EVENT_PENDING, pf->state) || - test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) || - test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) || - test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) + test_bit(ICE_MDD_EVENT_PENDING, pf->state) || + test_bit(ICE_VFLR_EVENT_PENDING, pf->state) || + test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) || + test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) || + test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) mod_timer(&pf->serv_tmr, jiffies); } @@ -2112,7 +2108,7 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) struct device *dev = ice_pf_to_dev(pf); /* bail out if earlier reset has failed */ - if (test_bit(__ICE_RESET_FAILED, pf->state)) { + if (test_bit(ICE_RESET_FAILED, pf->state)) { dev_dbg(dev, "earlier reset has failed\n"); return -EIO; } @@ -2124,13 +2120,13 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) switch (reset) { case ICE_RESET_PFR: - set_bit(__ICE_PFR_REQ, pf->state); + set_bit(ICE_PFR_REQ, pf->state); break; case ICE_RESET_CORER: - set_bit(__ICE_CORER_REQ, pf->state); + set_bit(ICE_CORER_REQ, pf->state); break; case ICE_RESET_GLOBR: - set_bit(__ICE_GLOBR_REQ, pf->state); + set_bit(ICE_GLOBR_REQ, pf->state); break; default: return -EINVAL; @@ -2220,8 +2216,13 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) /* skip this unused q_vector */ continue; } - err = devm_request_irq(dev, irq_num, vsi->irq_handler, 0, - q_vector->name, q_vector); + if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) + err = devm_request_irq(dev, irq_num, vsi->irq_handler, + IRQF_SHARED, q_vector->name, + q_vector); + else + err = devm_request_irq(dev, irq_num, vsi->irq_handler, + 0, q_vector->name, q_vector); if (err) { netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", err); @@ -2524,7 +2525,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, } /* need to stop netdev while setting up the program for Rx rings */ - if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) { + if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { ret = ice_down(vsi); if (ret) { NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed"); @@ -2630,8 +2631,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) u32 oicr, ena_mask; dev = ice_pf_to_dev(pf); - set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); - set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); + set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); + set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); oicr = rd32(hw, PFINT_OICR); ena_mask = rd32(hw, PFINT_OICR_ENA); @@ -2643,18 +2644,18 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) if (oicr & PFINT_OICR_MAL_DETECT_M) { ena_mask &= ~PFINT_OICR_MAL_DETECT_M; - set_bit(__ICE_MDD_EVENT_PENDING, pf->state); + set_bit(ICE_MDD_EVENT_PENDING, pf->state); } if (oicr & PFINT_OICR_VFLR_M) { /* disable any further VFLR event notifications */ - if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) { + if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { u32 reg = rd32(hw, PFINT_OICR_ENA); reg &= ~PFINT_OICR_VFLR_M; wr32(hw, PFINT_OICR_ENA, reg); } else { ena_mask &= ~PFINT_OICR_VFLR_M; - set_bit(__ICE_VFLR_EVENT_PENDING, pf->state); + set_bit(ICE_VFLR_EVENT_PENDING, pf->state); } } @@ -2680,13 +2681,13 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) * We also make note of which reset happened so that peer * devices/drivers can be informed. */ - if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) { + if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) { if (reset == ICE_RESET_CORER) - set_bit(__ICE_CORER_RECV, pf->state); + set_bit(ICE_CORER_RECV, pf->state); else if (reset == ICE_RESET_GLOBR) - set_bit(__ICE_GLOBR_RECV, pf->state); + set_bit(ICE_GLOBR_RECV, pf->state); else - set_bit(__ICE_EMPR_RECV, pf->state); + set_bit(ICE_EMPR_RECV, pf->state); /* There are couple of different bits at play here. * hw->reset_ongoing indicates whether the hardware is @@ -2694,7 +2695,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) * is received and set back to false after the driver * has determined that the hardware is out of reset. * - * __ICE_RESET_OICR_RECV in pf->state indicates + * ICE_RESET_OICR_RECV in pf->state indicates * that a post reset rebuild is required before the * driver is operational again. This is set above. * @@ -2722,7 +2723,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) if (oicr & (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_PCI_EXCEPTION_M | PFINT_OICR_ECC_ERR_M)) { - set_bit(__ICE_PFR_REQ, pf->state); + set_bit(ICE_PFR_REQ, pf->state); ice_service_task_schedule(pf); } } @@ -2975,19 +2976,13 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) struct ice_netdev_priv *np; struct net_device *netdev; u8 mac_addr[ETH_ALEN]; - int err; - - err = ice_devlink_create_port(vsi); - if (err) - return err; netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, vsi->alloc_rxq); - if (!netdev) { - err = -ENOMEM; - goto err_destroy_devlink_port; - } + if (!netdev) + return -ENOMEM; + set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); vsi->netdev = netdev; np = netdev_priv(netdev); np->vsi = vsi; @@ -3014,25 +3009,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = ICE_MAX_MTU; - err = register_netdev(vsi->netdev); - if (err) - goto err_free_netdev; - - devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev); - - netif_carrier_off(vsi->netdev); - - /* make sure transmit queues start off as stopped */ - netif_tx_stop_all_queues(vsi->netdev); - return 0; - -err_free_netdev: - free_netdev(vsi->netdev); - vsi->netdev = NULL; -err_destroy_devlink_port: - ice_devlink_destroy_port(vsi); - return err; } /** @@ -3107,15 +3084,6 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, struct ice_vsi *vsi = np->vsi; int ret; - if (vid >= VLAN_N_VID) { - netdev_err(netdev, "VLAN id requested %d is out of range %d\n", - vid, VLAN_N_VID); - return -EINVAL; - } - - if (vsi->info.pvid) - return -EINVAL; - /* VLAN 0 is added by default during load/reset */ if (!vid) return 0; @@ -3132,7 +3100,7 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, */ ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI); if (!ret) - set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); + set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); return ret; } @@ -3153,9 +3121,6 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, struct ice_vsi *vsi = np->vsi; int ret; - if (vsi->info.pvid) - return -EINVAL; - /* don't allow removal of VLAN 0 */ if (!vid) return 0; @@ -3171,7 +3136,7 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi)) ret = ice_cfg_vlan_pruning(vsi, false, false); - set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); + set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); return ret; } @@ -3230,8 +3195,7 @@ unroll_napi_add: if (vsi) { ice_napi_del(vsi); if (vsi->netdev) { - if (vsi->netdev->reg_state == NETREG_REGISTERED) - unregister_netdev(vsi->netdev); + clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); free_netdev(vsi->netdev); vsi->netdev = NULL; } @@ -3365,7 +3329,7 @@ static int ice_init_pf(struct ice_pf *pf) timer_setup(&pf->serv_tmr, ice_service_timer, 0); pf->serv_tmr_period = HZ; INIT_WORK(&pf->serv_task, ice_service_task); - clear_bit(__ICE_SERVICE_SCHED, pf->state); + clear_bit(ICE_SERVICE_SCHED, pf->state); mutex_init(&pf->avail_q_mutex); pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); @@ -3574,7 +3538,7 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx) if (!new_rx && !new_tx) return -EINVAL; - while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { + while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { timeout--; if (!timeout) return -EBUSY; @@ -3598,7 +3562,7 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx) ice_pf_dcb_recfg(pf); ice_vsi_open(vsi); done: - clear_bit(__ICE_CFG_BUSY, pf->state); + clear_bit(ICE_CFG_BUSY, pf->state); return err; } @@ -3985,6 +3949,43 @@ static void ice_print_wake_reason(struct ice_pf *pf) } /** + * ice_register_netdev - register netdev and devlink port + * @pf: pointer to the PF struct + */ +static int ice_register_netdev(struct ice_pf *pf) +{ + struct ice_vsi *vsi; + int err = 0; + + vsi = ice_get_main_vsi(pf); + if (!vsi || !vsi->netdev) + return -EIO; + + err = register_netdev(vsi->netdev); + if (err) + goto err_register_netdev; + + set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); + netif_carrier_off(vsi->netdev); + netif_tx_stop_all_queues(vsi->netdev); + err = ice_devlink_create_port(vsi); + if (err) + goto err_devlink_create; + + devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev); + + return 0; +err_devlink_create: + unregister_netdev(vsi->netdev); + clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); +err_register_netdev: + free_netdev(vsi->netdev); + vsi->netdev = NULL; + clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); + return err; +} + +/** * ice_probe - Device initialization routine * @pdev: PCI device information struct * @ent: entry in ice_pci_tbl @@ -4006,7 +4007,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) if (err) return err; - err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); + err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev)); if (err) { dev_err(dev, "BAR0 I/O map error %d\n", err); return err; @@ -4030,9 +4031,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) pf->pdev = pdev; pci_set_drvdata(pdev, pf); - set_bit(__ICE_DOWN, pf->state); + set_bit(ICE_DOWN, pf->state); /* Disable service task until DOWN bit is cleared */ - set_bit(__ICE_SERVICE_DIS, pf->state); + set_bit(ICE_SERVICE_DIS, pf->state); hw = &pf->hw; hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; @@ -4172,7 +4173,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) goto err_alloc_sw_unroll; } - clear_bit(__ICE_SERVICE_DIS, pf->state); + clear_bit(ICE_SERVICE_DIS, pf->state); /* tell the firmware we are up */ err = ice_send_version(pf); @@ -4261,15 +4262,20 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) pcie_print_link_status(pf->pdev); probe_done: + err = ice_register_netdev(pf); + if (err) + goto err_netdev_reg; + /* ready to go, so clear down state bit */ - clear_bit(__ICE_DOWN, pf->state); + clear_bit(ICE_DOWN, pf->state); return 0; +err_netdev_reg: err_send_version_unroll: ice_vsi_release_all(pf); err_alloc_sw_unroll: - set_bit(__ICE_SERVICE_DIS, pf->state); - set_bit(__ICE_DOWN, pf->state); + set_bit(ICE_SERVICE_DIS, pf->state); + set_bit(ICE_DOWN, pf->state); devm_kfree(dev, pf->first_sw); err_msix_misc_unroll: ice_free_irq_msix_misc(pf); @@ -4310,7 +4316,7 @@ static void ice_set_wake(struct ice_pf *pf) } /** - * ice_setup_magic_mc_wake - setup device to wake on multicast magic packet + * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet * @pf: pointer to the PF struct * * Issue firmware command to enable multicast magic wake, making @@ -4369,11 +4375,11 @@ static void ice_remove(struct pci_dev *pdev) } if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { - set_bit(__ICE_VF_RESETS_DISABLED, pf->state); + set_bit(ICE_VF_RESETS_DISABLED, pf->state); ice_free_vfs(pf); } - set_bit(__ICE_DOWN, pf->state); + set_bit(ICE_DOWN, pf->state); ice_service_task_stop(pf); ice_aq_cancel_waiting_tasks(pf); @@ -4533,13 +4539,13 @@ static int __maybe_unused ice_suspend(struct device *dev) disabled = ice_service_task_stop(pf); /* Already suspended?, then there is nothing to do */ - if (test_and_set_bit(__ICE_SUSPENDED, pf->state)) { + if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { if (!disabled) ice_service_task_restart(pf); return 0; } - if (test_bit(__ICE_DOWN, pf->state) || + if (test_bit(ICE_DOWN, pf->state) || ice_is_reset_in_progress(pf->state)) { dev_err(dev, "can't suspend device in reset or already down\n"); if (!disabled) @@ -4611,16 +4617,16 @@ static int __maybe_unused ice_resume(struct device *dev) if (ret) dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); - clear_bit(__ICE_DOWN, pf->state); + clear_bit(ICE_DOWN, pf->state); /* Now perform PF reset and rebuild */ reset_type = ICE_RESET_PFR; /* re-enable service task for reset, but allow reset to schedule it */ - clear_bit(__ICE_SERVICE_DIS, pf->state); + clear_bit(ICE_SERVICE_DIS, pf->state); if (ice_schedule_reset(pf, reset_type)) dev_err(dev, "Reset during resume failed.\n"); - clear_bit(__ICE_SUSPENDED, pf->state); + clear_bit(ICE_SUSPENDED, pf->state); ice_service_task_restart(pf); /* Restart the service task */ @@ -4649,11 +4655,11 @@ ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err) return PCI_ERS_RESULT_DISCONNECT; } - if (!test_bit(__ICE_SUSPENDED, pf->state)) { + if (!test_bit(ICE_SUSPENDED, pf->state)) { ice_service_task_stop(pf); - if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) { - set_bit(__ICE_PFR_REQ, pf->state); + if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { + set_bit(ICE_PFR_REQ, pf->state); ice_prepare_for_reset(pf); } } @@ -4720,7 +4726,7 @@ static void ice_pci_err_resume(struct pci_dev *pdev) return; } - if (test_bit(__ICE_SUSPENDED, pf->state)) { + if (test_bit(ICE_SUSPENDED, pf->state)) { dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n", __func__); return; @@ -4741,11 +4747,11 @@ static void ice_pci_err_reset_prepare(struct pci_dev *pdev) { struct ice_pf *pf = pci_get_drvdata(pdev); - if (!test_bit(__ICE_SUSPENDED, pf->state)) { + if (!test_bit(ICE_SUSPENDED, pf->state)) { ice_service_task_stop(pf); - if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) { - set_bit(__ICE_PFR_REQ, pf->state); + if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { + set_bit(ICE_PFR_REQ, pf->state); ice_prepare_for_reset(pf); } } @@ -4892,7 +4898,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) return 0; } - if (test_bit(__ICE_DOWN, pf->state) || + if (test_bit(ICE_DOWN, pf->state) || ice_is_reset_in_progress(pf->state)) { netdev_err(netdev, "can't set mac %pM. device not ready\n", mac); @@ -4961,8 +4967,8 @@ static void ice_set_rx_mode(struct net_device *netdev) * ndo_set_rx_mode may be triggered even without a change in netdev * flags */ - set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); - set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); + set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); + set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); /* schedule our worker thread which will take care of @@ -5111,10 +5117,10 @@ ice_set_features(struct net_device *netdev, netdev_features_t features) * separate if/else statements to guarantee each feature is checked */ if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) - ret = ice_vsi_manage_rss_lut(vsi, true); + ice_vsi_manage_rss_lut(vsi, true); else if (!(features & NETIF_F_RXHASH) && netdev->features & NETIF_F_RXHASH) - ret = ice_vsi_manage_rss_lut(vsi, false); + ice_vsi_manage_rss_lut(vsi, false); if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) @@ -5195,6 +5201,105 @@ int ice_vsi_cfg(struct ice_vsi *vsi) return err; } +/* THEORY OF MODERATION: + * The below code creates custom DIM profiles for use by this driver, because + * the ice driver hardware works differently than the hardware that DIMLIB was + * originally made for. ice hardware doesn't have packet count limits that + * can trigger an interrupt, but it *does* have interrupt rate limit support, + * and this code adds that capability to be used by the driver when it's using + * DIMLIB. The DIMLIB code was always designed to be a suggestion to the driver + * for how to "respond" to traffic and interrupts, so this driver uses a + * slightly different set of moderation parameters to get best performance. + */ +struct ice_dim { + /* the throttle rate for interrupts, basically worst case delay before + * an initial interrupt fires, value is stored in microseconds. + */ + u16 itr; + /* the rate limit for interrupts, which can cap a delay from a small + * ITR at a certain amount of interrupts per second. f.e. a 2us ITR + * could yield as much as 500,000 interrupts per second, but with a + * 10us rate limit, it limits to 100,000 interrupts per second. Value + * is stored in microseconds. + */ + u16 intrl; +}; + +/* Make a different profile for Rx that doesn't allow quite so aggressive + * moderation at the high end (it maxes out at 128us or about 8k interrupts a + * second. The INTRL/rate parameters here are only useful to cap small ITR + * values, which is why for larger ITR's - like 128, which can only generate + * 8k interrupts per second, there is no point to rate limit and the values + * are set to zero. The rate limit values do affect latency, and so must + * be reasonably small so to not impact latency sensitive tests. + */ +static const struct ice_dim rx_profile[] = { + {2, 10}, + {8, 16}, + {32, 0}, + {96, 0}, + {128, 0} +}; + +/* The transmit profile, which has the same sorts of values + * as the previous struct + */ +static const struct ice_dim tx_profile[] = { + {2, 10}, + {8, 16}, + {64, 0}, + {128, 0}, + {256, 0} +}; + +static void ice_tx_dim_work(struct work_struct *work) +{ + struct ice_ring_container *rc; + struct ice_q_vector *q_vector; + struct dim *dim; + u16 itr, intrl; + + dim = container_of(work, struct dim, work); + rc = container_of(dim, struct ice_ring_container, dim); + q_vector = container_of(rc, struct ice_q_vector, tx); + + if (dim->profile_ix >= ARRAY_SIZE(tx_profile)) + dim->profile_ix = ARRAY_SIZE(tx_profile) - 1; + + /* look up the values in our local table */ + itr = tx_profile[dim->profile_ix].itr; + intrl = tx_profile[dim->profile_ix].intrl; + + ice_write_itr(rc, itr); + ice_write_intrl(q_vector, intrl); + + dim->state = DIM_START_MEASURE; +} + +static void ice_rx_dim_work(struct work_struct *work) +{ + struct ice_ring_container *rc; + struct ice_q_vector *q_vector; + struct dim *dim; + u16 itr, intrl; + + dim = container_of(work, struct dim, work); + rc = container_of(dim, struct ice_ring_container, dim); + q_vector = container_of(rc, struct ice_q_vector, rx); + + if (dim->profile_ix >= ARRAY_SIZE(rx_profile)) + dim->profile_ix = ARRAY_SIZE(rx_profile) - 1; + + /* look up the values in our local table */ + itr = rx_profile[dim->profile_ix].itr; + intrl = rx_profile[dim->profile_ix].intrl; + + ice_write_itr(rc, itr); + ice_write_intrl(q_vector, intrl); + + dim->state = DIM_START_MEASURE; +} + /** * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI * @vsi: the VSI being configured @@ -5209,6 +5314,12 @@ static void ice_napi_enable_all(struct ice_vsi *vsi) ice_for_each_q_vector(vsi, q_idx) { struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; + INIT_WORK(&q_vector->tx.dim.work, ice_tx_dim_work); + q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + + INIT_WORK(&q_vector->rx.dim.work, ice_rx_dim_work); + q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + if (q_vector->rx.ring || q_vector->tx.ring) napi_enable(&q_vector->napi); } @@ -5235,7 +5346,7 @@ static int ice_up_complete(struct ice_vsi *vsi) if (err) return err; - clear_bit(__ICE_DOWN, vsi->state); + clear_bit(ICE_VSI_DOWN, vsi->state); ice_napi_enable_all(vsi); ice_vsi_ena_irq(vsi); @@ -5342,7 +5453,6 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) vsi->tx_linearize = 0; vsi->rx_buf_failed = 0; vsi->rx_page_failed = 0; - vsi->rx_gro_dropped = 0; rcu_read_lock(); @@ -5357,7 +5467,6 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) vsi_stats->rx_bytes += bytes; vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; - vsi->rx_gro_dropped += ring->rx_stats.gro_dropped; } /* update XDP Tx rings counters */ @@ -5378,8 +5487,8 @@ void ice_update_vsi_stats(struct ice_vsi *vsi) struct ice_eth_stats *cur_es = &vsi->eth_stats; struct ice_pf *pf = vsi->back; - if (test_bit(__ICE_DOWN, vsi->state) || - test_bit(__ICE_CFG_BUSY, pf->state)) + if (test_bit(ICE_VSI_DOWN, vsi->state) || + test_bit(ICE_CFG_BUSY, pf->state)) return; /* get stats as recorded by Tx/Rx rings */ @@ -5389,7 +5498,7 @@ void ice_update_vsi_stats(struct ice_vsi *vsi) ice_update_eth_stats(vsi); cur_ns->tx_errors = cur_es->tx_errors; - cur_ns->rx_dropped = cur_es->rx_discards + vsi->rx_gro_dropped; + cur_ns->rx_dropped = cur_es->rx_discards; cur_ns->tx_dropped = cur_es->tx_discards; cur_ns->multicast = cur_es->rx_multicast; @@ -5583,7 +5692,7 @@ void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) * But, only call the update routine and read the registers if VSI is * not down. */ - if (!test_bit(__ICE_DOWN, vsi->state)) + if (!test_bit(ICE_VSI_DOWN, vsi->state)) ice_update_vsi_ring_stats(vsi); stats->tx_packets = vsi_stats->tx_packets; stats->tx_bytes = vsi_stats->tx_bytes; @@ -5619,6 +5728,9 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) if (q_vector->rx.ring || q_vector->tx.ring) napi_disable(&q_vector->napi); + + cancel_work_sync(&q_vector->tx.dim.work); + cancel_work_sync(&q_vector->rx.dim.work); } } @@ -5631,7 +5743,7 @@ int ice_down(struct ice_vsi *vsi) int i, tx_err, rx_err, link_err = 0; /* Caller of this function is expected to set the - * vsi->state __ICE_DOWN bit + * vsi->state ICE_DOWN bit */ if (vsi->netdev) { netif_carrier_off(vsi->netdev); @@ -5783,7 +5895,7 @@ int ice_vsi_open_ctrl(struct ice_vsi *vsi) if (err) goto err_up_complete; - clear_bit(__ICE_DOWN, vsi->state); + clear_bit(ICE_VSI_DOWN, vsi->state); ice_vsi_ena_irq(vsi); return 0; @@ -5979,7 +6091,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) enum ice_status ret; int err; - if (test_bit(__ICE_DOWN, pf->state)) + if (test_bit(ICE_DOWN, pf->state)) goto clear_recovery; dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); @@ -6095,7 +6207,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ice_replay_post(hw); /* if we get here, reset flow is successful */ - clear_bit(__ICE_RESET_FAILED, pf->state); + clear_bit(ICE_RESET_FAILED, pf->state); return; err_vsi_rebuild: @@ -6103,10 +6215,10 @@ err_sched_init_port: ice_sched_cleanup_all(hw); err_init_ctrlq: ice_shutdown_all_ctrlq(hw); - set_bit(__ICE_RESET_FAILED, pf->state); + set_bit(ICE_RESET_FAILED, pf->state); clear_recovery: /* set this bit in PF state to control service task scheduling */ - set_bit(__ICE_NEEDS_RESTART, pf->state); + set_bit(ICE_NEEDS_RESTART, pf->state); dev_err(dev, "Rebuild failed, unload and reload driver\n"); } @@ -6170,7 +6282,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) netdev->mtu = (unsigned int)new_mtu; /* if VSI is up, bring it down and then back up */ - if (!test_and_set_bit(__ICE_DOWN, vsi->state)) { + if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { int err; err = ice_down(vsi); @@ -6305,89 +6417,118 @@ const char *ice_stat_str(enum ice_status stat_err) } /** - * ice_set_rss - Set RSS keys and lut + * ice_set_rss_lut - Set RSS LUT * @vsi: Pointer to VSI structure - * @seed: RSS hash seed * @lut: Lookup table * @lut_size: Lookup table size * * Returns 0 on success, negative on failure */ -int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) +int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) { - struct ice_pf *pf = vsi->back; - struct ice_hw *hw = &pf->hw; + struct ice_aq_get_set_rss_lut_params params = {}; + struct ice_hw *hw = &vsi->back->hw; enum ice_status status; - struct device *dev; - dev = ice_pf_to_dev(pf); - if (seed) { - struct ice_aqc_get_set_rss_keys *buf = - (struct ice_aqc_get_set_rss_keys *)seed; + if (!lut) + return -EINVAL; - status = ice_aq_set_rss_key(hw, vsi->idx, buf); + params.vsi_handle = vsi->idx; + params.lut_size = lut_size; + params.lut_type = vsi->rss_lut_type; + params.lut = lut; - if (status) { - dev_err(dev, "Cannot set RSS key, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return -EIO; - } + status = ice_aq_set_rss_lut(hw, ¶ms); + if (status) { + dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + return -EIO; } - if (lut) { - status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, - lut, lut_size); - if (status) { - dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return -EIO; - } + return 0; +} + +/** + * ice_set_rss_key - Set RSS key + * @vsi: Pointer to the VSI structure + * @seed: RSS hash seed + * + * Returns 0 on success, negative on failure + */ +int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed) +{ + struct ice_hw *hw = &vsi->back->hw; + enum ice_status status; + + if (!seed) + return -EINVAL; + + status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); + if (status) { + dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + return -EIO; } return 0; } /** - * ice_get_rss - Get RSS keys and lut + * ice_get_rss_lut - Get RSS LUT * @vsi: Pointer to VSI structure - * @seed: Buffer to store the keys * @lut: Buffer to store the lookup table entries * @lut_size: Size of buffer to store the lookup table entries * * Returns 0 on success, negative on failure */ -int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) +int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) { - struct ice_pf *pf = vsi->back; - struct ice_hw *hw = &pf->hw; + struct ice_aq_get_set_rss_lut_params params = {}; + struct ice_hw *hw = &vsi->back->hw; enum ice_status status; - struct device *dev; - dev = ice_pf_to_dev(pf); - if (seed) { - struct ice_aqc_get_set_rss_keys *buf = - (struct ice_aqc_get_set_rss_keys *)seed; + if (!lut) + return -EINVAL; - status = ice_aq_get_rss_key(hw, vsi->idx, buf); - if (status) { - dev_err(dev, "Cannot get RSS key, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return -EIO; - } + params.vsi_handle = vsi->idx; + params.lut_size = lut_size; + params.lut_type = vsi->rss_lut_type; + params.lut = lut; + + status = ice_aq_get_rss_lut(hw, ¶ms); + if (status) { + dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + return -EIO; } - if (lut) { - status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type, - lut, lut_size); - if (status) { - dev_err(dev, "Cannot get RSS lut, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return -EIO; - } + return 0; +} + +/** + * ice_get_rss_key - Get RSS key + * @vsi: Pointer to VSI structure + * @seed: Buffer to store the key in + * + * Returns 0 on success, negative on failure + */ +int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed) +{ + struct ice_hw *hw = &vsi->back->hw; + enum ice_status status; + + if (!seed) + return -EINVAL; + + status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); + if (status) { + dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + return -EIO; } return 0; @@ -6599,19 +6740,19 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) switch (pf->tx_timeout_recovery_level) { case 1: - set_bit(__ICE_PFR_REQ, pf->state); + set_bit(ICE_PFR_REQ, pf->state); break; case 2: - set_bit(__ICE_CORER_REQ, pf->state); + set_bit(ICE_CORER_REQ, pf->state); break; case 3: - set_bit(__ICE_GLOBR_REQ, pf->state); + set_bit(ICE_GLOBR_REQ, pf->state); break; default: netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); - set_bit(__ICE_DOWN, pf->state); - set_bit(__ICE_NEEDS_RESTART, vsi->state); - set_bit(__ICE_SERVICE_DIS, pf->state); + set_bit(ICE_DOWN, pf->state); + set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); + set_bit(ICE_SERVICE_DIS, pf->state); break; } @@ -6659,32 +6800,28 @@ int ice_open_internal(struct net_device *netdev) struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_port_info *pi; + enum ice_status status; int err; - if (test_bit(__ICE_NEEDS_RESTART, pf->state)) { + if (test_bit(ICE_NEEDS_RESTART, pf->state)) { netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); return -EIO; } - if (test_bit(__ICE_DOWN, pf->state)) { - netdev_err(netdev, "device is not ready yet\n"); - return -EBUSY; - } - netif_carrier_off(netdev); pi = vsi->port_info; - err = ice_update_link_info(pi); - if (err) { - netdev_err(netdev, "Failed to get link info, error %d\n", - err); - return err; + status = ice_update_link_info(pi); + if (status) { + netdev_err(netdev, "Failed to get link info, error %s\n", + ice_stat_str(status)); + return -EIO; } /* Set PHY if there is media, otherwise, turn off PHY */ if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); - if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) { + if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) { err = ice_init_phy_user_cfg(pi); if (err) { netdev_err(netdev, "Failed to initialize PHY settings, error %d\n", @@ -6701,12 +6838,7 @@ int ice_open_internal(struct net_device *netdev) } } else { set_bit(ICE_FLAG_NO_MEDIA, pf->flags); - err = ice_aq_set_link_restart_an(pi, false, NULL); - if (err) { - netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n", - vsi->vsi_num, err); - return err; - } + ice_set_link(vsi, false); } err = ice_vsi_open(vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 75ccbfc07f99..fee37a5844cf 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -644,6 +644,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, /* Verify that the simple checksum is zero */ for (i = 0; i < sizeof(tmp); i++) + /* cppcheck-suppress objectIndex */ sum += ((u8 *)&tmp)[i]; if (sum) { diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h index 7f4c1ec1eff2..199aa5b71540 100644 --- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h +++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h @@ -13,6 +13,9 @@ enum ice_prot_id { ICE_PROT_ID_INVAL = 0, ICE_PROT_MAC_OF_OR_S = 1, + ICE_PROT_MAC_IL = 4, + ICE_PROT_ETYPE_OL = 9, + ICE_PROT_ETYPE_IL = 10, ICE_PROT_IPV4_OF_OR_S = 32, ICE_PROT_IPV4_IL = 33, ICE_PROT_IPV6_OF_OR_S = 40, @@ -21,7 +24,14 @@ enum ice_prot_id { ICE_PROT_UDP_OF = 52, ICE_PROT_UDP_IL_OR_S = 53, ICE_PROT_GRE_OF = 64, + ICE_PROT_ESP_F = 88, + ICE_PROT_ESP_2 = 89, ICE_PROT_SCTP_IL = 96, + ICE_PROT_ICMP_IL = 98, + ICE_PROT_ICMPV6_IL = 100, + ICE_PROT_PPPOE = 103, + ICE_PROT_L2TPV3 = 104, + ICE_PROT_ARP_OF = 118, ICE_PROT_META_ID = 255, /* when offset == metadata */ ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */ }; diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 2403cb38b93c..2f097637e405 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -919,7 +919,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, } /** - * ice_sched_add_nodes_to_layer - Add nodes to a given layer + * ice_sched_add_nodes_to_hw_layer - Add nodes to HW layer * @pi: port information structure * @tc_node: pointer to TC node * @parent: pointer to parent node @@ -928,82 +928,107 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, * @first_node_teid: pointer to the first node TEID * @num_nodes_added: pointer to number of nodes added * - * This function add nodes to a given layer. + * Add nodes into specific HW layer. */ static enum ice_status -ice_sched_add_nodes_to_layer(struct ice_port_info *pi, - struct ice_sched_node *tc_node, - struct ice_sched_node *parent, u8 layer, - u16 num_nodes, u32 *first_node_teid, - u16 *num_nodes_added) +ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, + struct ice_sched_node *tc_node, + struct ice_sched_node *parent, u8 layer, + u16 num_nodes, u32 *first_node_teid, + u16 *num_nodes_added) { - u32 *first_teid_ptr = first_node_teid; - u16 new_num_nodes, max_child_nodes; - enum ice_status status = 0; - struct ice_hw *hw = pi->hw; - u16 num_added = 0; - u32 temp; + u16 max_child_nodes; *num_nodes_added = 0; if (!num_nodes) - return status; + return 0; - if (!parent || layer < hw->sw_entry_point_layer) + if (!parent || layer < pi->hw->sw_entry_point_layer) return ICE_ERR_PARAM; /* max children per node per layer */ - max_child_nodes = hw->max_children[parent->tx_sched_layer]; + max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; - /* current number of children + required nodes exceed max children ? */ + /* current number of children + required nodes exceed max children */ if ((parent->num_children + num_nodes) > max_child_nodes) { /* Fail if the parent is a TC node */ if (parent == tc_node) return ICE_ERR_CFG; + return ICE_ERR_MAX_LIMIT; + } + + return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes, + num_nodes_added, first_node_teid); +} + +/** + * ice_sched_add_nodes_to_layer - Add nodes to a given layer + * @pi: port information structure + * @tc_node: pointer to TC node + * @parent: pointer to parent node + * @layer: layer number to add nodes + * @num_nodes: number of nodes to be added + * @first_node_teid: pointer to the first node TEID + * @num_nodes_added: pointer to number of nodes added + * + * This function add nodes to a given layer. + */ +static enum ice_status +ice_sched_add_nodes_to_layer(struct ice_port_info *pi, + struct ice_sched_node *tc_node, + struct ice_sched_node *parent, u8 layer, + u16 num_nodes, u32 *first_node_teid, + u16 *num_nodes_added) +{ + u32 *first_teid_ptr = first_node_teid; + u16 new_num_nodes = num_nodes; + enum ice_status status = 0; + *num_nodes_added = 0; + while (*num_nodes_added < num_nodes) { + u16 max_child_nodes, num_added = 0; + /* cppcheck-suppress unusedVariable */ + u32 temp; + + status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent, + layer, new_num_nodes, + first_teid_ptr, + &num_added); + if (!status) + *num_nodes_added += num_added; + /* added more nodes than requested ? */ + if (*num_nodes_added > num_nodes) { + ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes, + *num_nodes_added); + status = ICE_ERR_CFG; + break; + } + /* break if all the nodes are added successfully */ + if (!status && (*num_nodes_added == num_nodes)) + break; + /* break if the error is not max limit */ + if (status && status != ICE_ERR_MAX_LIMIT) + break; + /* Exceeded the max children */ + max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; /* utilize all the spaces if the parent is not full */ if (parent->num_children < max_child_nodes) { new_num_nodes = max_child_nodes - parent->num_children; - /* this recursion is intentional, and wouldn't - * go more than 2 calls + } else { + /* This parent is full, try the next sibling */ + parent = parent->sibling; + /* Don't modify the first node TEID memory if the + * first node was added already in the above call. + * Instead send some temp memory for all other + * recursive calls. */ - status = ice_sched_add_nodes_to_layer(pi, tc_node, - parent, layer, - new_num_nodes, - first_node_teid, - &num_added); - if (status) - return status; + if (num_added) + first_teid_ptr = &temp; - *num_nodes_added += num_added; + new_num_nodes = num_nodes - *num_nodes_added; } - /* Don't modify the first node TEID memory if the first node was - * added already in the above call. Instead send some temp - * memory for all other recursive calls. - */ - if (num_added) - first_teid_ptr = &temp; - - new_num_nodes = num_nodes - num_added; - - /* This parent is full, try the next sibling */ - parent = parent->sibling; - - /* this recursion is intentional, for 1024 queues - * per VSI, it goes max of 16 iterations. - * 1024 / 8 = 128 layer 8 nodes - * 128 /8 = 16 (add 8 nodes per iteration) - */ - status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, - layer, new_num_nodes, - first_teid_ptr, - &num_added); - *num_nodes_added += num_added; - return status; } - - status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes, - num_nodes_added, first_node_teid); return status; } @@ -1857,7 +1882,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, } /** - * ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry + * ice_sched_rm_agg_vsi_info - remove aggregator related VSI info entry * @pi: port information structure * @vsi_handle: software VSI handle * diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 554f567476f3..aa11d07793d4 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -2,7 +2,6 @@ /* Copyright (c) 2018, Intel Corporation. */ #include "ice_common.h" -#include "ice_adminq_cmd.h" #include "ice_sriov.h" /** @@ -132,3 +131,402 @@ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed) return speed; } + +/* The mailbox overflow detection algorithm helps to check if there + * is a possibility of a malicious VF transmitting too many MBX messages to the + * PF. + * 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during + * driver initialization in ice_init_hw() using ice_mbx_init_snapshot(). + * The struct ice_mbx_snapshot helps to track and traverse a static window of + * messages within the mailbox queue while looking for a malicious VF. + * + * 2. When the caller starts processing its mailbox queue in response to an + * interrupt, the structure ice_mbx_snapshot is expected to be cleared before + * the algorithm can be run for the first time for that interrupt. This can be + * done via ice_mbx_reset_snapshot(). + * + * 3. For every message read by the caller from the MBX Queue, the caller must + * call the detection algorithm's entry function ice_mbx_vf_state_handler(). + * Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is + * filled as it is required to be passed to the algorithm. + * + * 4. Every time a message is read from the MBX queue, a VFId is received which + * is passed to the state handler. The boolean output is_malvf of the state + * handler ice_mbx_vf_state_handler() serves as an indicator to the caller + * whether this VF is malicious or not. + * + * 5. When a VF is identified to be malicious, the caller can send a message + * to the system administrator. The caller can invoke ice_mbx_report_malvf() + * to help determine if a malicious VF is to be reported or not. This function + * requires the caller to maintain a global bitmap to track all malicious VFs + * and pass that to ice_mbx_report_malvf() along with the VFID which was identified + * to be malicious by ice_mbx_vf_state_handler(). + * + * 6. The global bitmap maintained by PF can be cleared completely if PF is in + * reset or the bit corresponding to a VF can be cleared if that VF is in reset. + * When a VF is shut down and brought back up, we assume that the new VF + * brought up is not malicious and hence report it if found malicious. + * + * 7. The function ice_mbx_reset_snapshot() is called to reset the information + * in ice_mbx_snapshot for every new mailbox interrupt handled. + * + * 8. The memory allocated for variables in ice_mbx_snapshot is de-allocated + * when driver is unloaded. + */ +#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M) +/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that + * the max messages check must be ignored in the algorithm + */ +#define ICE_IGNORE_MAX_MSG_CNT 0xFFFF + +/** + * ice_mbx_traverse - Pass through mailbox snapshot + * @hw: pointer to the HW struct + * @new_state: new algorithm state + * + * Traversing the mailbox static snapshot without checking + * for malicious VFs. + */ +static void +ice_mbx_traverse(struct ice_hw *hw, + enum ice_mbx_snapshot_state *new_state) +{ + struct ice_mbx_snap_buffer_data *snap_buf; + u32 num_iterations; + + snap_buf = &hw->mbx_snapshot.mbx_buf; + + /* As mailbox buffer is circular, applying a mask + * on the incremented iteration count. + */ + num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations); + + /* Checking either of the below conditions to exit snapshot traversal: + * Condition-1: If the number of iterations in the mailbox is equal to + * the mailbox head which would indicate that we have reached the end + * of the static snapshot. + * Condition-2: If the maximum messages serviced in the mailbox for a + * given interrupt is the highest possible value then there is no need + * to check if the number of messages processed is equal to it. If not + * check if the number of messages processed is greater than or equal + * to the maximum number of mailbox entries serviced in current work item. + */ + if (num_iterations == snap_buf->head || + (snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT && + ++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx)) + *new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; +} + +/** + * ice_mbx_detect_malvf - Detect malicious VF in snapshot + * @hw: pointer to the HW struct + * @vf_id: relative virtual function ID + * @new_state: new algorithm state + * @is_malvf: boolean output to indicate if VF is malicious + * + * This function tracks the number of asynchronous messages + * sent per VF and marks the VF as malicious if it exceeds + * the permissible number of messages to send. + */ +static enum ice_status +ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id, + enum ice_mbx_snapshot_state *new_state, + bool *is_malvf) +{ + struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; + + if (vf_id >= snap->mbx_vf.vfcntr_len) + return ICE_ERR_OUT_OF_RANGE; + + /* increment the message count in the VF array */ + snap->mbx_vf.vf_cntr[vf_id]++; + + if (snap->mbx_vf.vf_cntr[vf_id] >= ICE_ASYNC_VF_MSG_THRESHOLD) + *is_malvf = true; + + /* continue to iterate through the mailbox snapshot */ + ice_mbx_traverse(hw, new_state); + + return 0; +} + +/** + * ice_mbx_reset_snapshot - Reset mailbox snapshot structure + * @snap: pointer to mailbox snapshot structure in the ice_hw struct + * + * Reset the mailbox snapshot structure and clear VF counter array. + */ +static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap) +{ + u32 vfcntr_len; + + if (!snap || !snap->mbx_vf.vf_cntr) + return; + + /* Clear VF counters. */ + vfcntr_len = snap->mbx_vf.vfcntr_len; + if (vfcntr_len) + memset(snap->mbx_vf.vf_cntr, 0, + (vfcntr_len * sizeof(*snap->mbx_vf.vf_cntr))); + + /* Reset mailbox snapshot for a new capture. */ + memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf)); + snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; +} + +/** + * ice_mbx_vf_state_handler - Handle states of the overflow algorithm + * @hw: pointer to the HW struct + * @mbx_data: pointer to structure containing mailbox data + * @vf_id: relative virtual function (VF) ID + * @is_malvf: boolean output to indicate if VF is malicious + * + * The function serves as an entry point for the malicious VF + * detection algorithm by handling the different states and state + * transitions of the algorithm: + * New snapshot: This state is entered when creating a new static + * snapshot. The data from any previous mailbox snapshot is + * cleared and a new capture of the mailbox head and tail is + * logged. This will be the new static snapshot to detect + * asynchronous messages sent by VFs. On capturing the snapshot + * and depending on whether the number of pending messages in that + * snapshot exceed the watermark value, the state machine enters + * traverse or detect states. + * Traverse: If pending message count is below watermark then iterate + * through the snapshot without any action on VF. + * Detect: If pending message count exceeds watermark traverse + * the static snapshot and look for a malicious VF. + */ +enum ice_status +ice_mbx_vf_state_handler(struct ice_hw *hw, + struct ice_mbx_data *mbx_data, u16 vf_id, + bool *is_malvf) +{ + struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; + struct ice_mbx_snap_buffer_data *snap_buf; + struct ice_ctl_q_info *cq = &hw->mailboxq; + enum ice_mbx_snapshot_state new_state; + enum ice_status status = 0; + + if (!is_malvf || !mbx_data) + return ICE_ERR_BAD_PTR; + + /* When entering the mailbox state machine assume that the VF + * is not malicious until detected. + */ + *is_malvf = false; + + /* Checking if max messages allowed to be processed while servicing current + * interrupt is not less than the defined AVF message threshold. + */ + if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD) + return ICE_ERR_INVAL_SIZE; + + /* The watermark value should not be lesser than the threshold limit + * set for the number of asynchronous messages a VF can send to mailbox + * nor should it be greater than the maximum number of messages in the + * mailbox serviced in current interrupt. + */ + if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD || + mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx) + return ICE_ERR_PARAM; + + new_state = ICE_MAL_VF_DETECT_STATE_INVALID; + snap_buf = &snap->mbx_buf; + + switch (snap_buf->state) { + case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT: + /* Clear any previously held data in mailbox snapshot structure. */ + ice_mbx_reset_snapshot(snap); + + /* Collect the pending ARQ count, number of messages processed and + * the maximum number of messages allowed to be processed from the + * Mailbox for current interrupt. + */ + snap_buf->num_pending_arq = mbx_data->num_pending_arq; + snap_buf->num_msg_proc = mbx_data->num_msg_proc; + snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx; + + /* Capture a new static snapshot of the mailbox by logging the + * head and tail of snapshot and set num_iterations to the tail + * value to mark the start of the iteration through the snapshot. + */ + snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean + + mbx_data->num_pending_arq); + snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1); + snap_buf->num_iterations = snap_buf->tail; + + /* Pending ARQ messages returned by ice_clean_rq_elem + * is the difference between the head and tail of the + * mailbox queue. Comparing this value against the watermark + * helps to check if we potentially have malicious VFs. + */ + if (snap_buf->num_pending_arq >= + mbx_data->async_watermark_val) { + new_state = ICE_MAL_VF_DETECT_STATE_DETECT; + status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf); + } else { + new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE; + ice_mbx_traverse(hw, &new_state); + } + break; + + case ICE_MAL_VF_DETECT_STATE_TRAVERSE: + new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE; + ice_mbx_traverse(hw, &new_state); + break; + + case ICE_MAL_VF_DETECT_STATE_DETECT: + new_state = ICE_MAL_VF_DETECT_STATE_DETECT; + status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf); + break; + + default: + new_state = ICE_MAL_VF_DETECT_STATE_INVALID; + status = ICE_ERR_CFG; + } + + snap_buf->state = new_state; + + return status; +} + +/** + * ice_mbx_report_malvf - Track and note malicious VF + * @hw: pointer to the HW struct + * @all_malvfs: all malicious VFs tracked by PF + * @bitmap_len: length of bitmap in bits + * @vf_id: relative virtual function ID of the malicious VF + * @report_malvf: boolean to indicate if malicious VF must be reported + * + * This function will update a bitmap that keeps track of the malicious + * VFs attached to the PF. A malicious VF must be reported only once if + * discovered between VF resets or loading so the function checks + * the input vf_id against the bitmap to verify if the VF has been + * detected in any previous mailbox iterations. + */ +enum ice_status +ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs, + u16 bitmap_len, u16 vf_id, bool *report_malvf) +{ + if (!all_malvfs || !report_malvf) + return ICE_ERR_PARAM; + + *report_malvf = false; + + if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len) + return ICE_ERR_INVAL_SIZE; + + if (vf_id >= bitmap_len) + return ICE_ERR_OUT_OF_RANGE; + + /* If the vf_id is found in the bitmap set bit and boolean to true */ + if (!test_and_set_bit(vf_id, all_malvfs)) + *report_malvf = true; + + return 0; +} + +/** + * ice_mbx_clear_malvf - Clear VF bitmap and counter for VF ID + * @snap: pointer to the mailbox snapshot structure + * @all_malvfs: all malicious VFs tracked by PF + * @bitmap_len: length of bitmap in bits + * @vf_id: relative virtual function ID of the malicious VF + * + * In case of a VF reset, this function can be called to clear + * the bit corresponding to the VF ID in the bitmap tracking all + * malicious VFs attached to the PF. The function also clears the + * VF counter array at the index of the VF ID. This is to ensure + * that the new VF loaded is not considered malicious before going + * through the overflow detection algorithm. + */ +enum ice_status +ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs, + u16 bitmap_len, u16 vf_id) +{ + if (!snap || !all_malvfs) + return ICE_ERR_PARAM; + + if (bitmap_len < snap->mbx_vf.vfcntr_len) + return ICE_ERR_INVAL_SIZE; + + /* Ensure VF ID value is not larger than bitmap or VF counter length */ + if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len) + return ICE_ERR_OUT_OF_RANGE; + + /* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */ + clear_bit(vf_id, all_malvfs); + + /* Clear the VF counter in the mailbox snapshot structure for that VF ID. + * This is to ensure that if a VF is unloaded and a new one brought back + * up with the same VF ID for a snapshot currently in traversal or detect + * state the counter for that VF ID does not increment on top of existing + * values in the mailbox overflow detection algorithm. + */ + snap->mbx_vf.vf_cntr[vf_id] = 0; + + return 0; +} + +/** + * ice_mbx_init_snapshot - Initialize mailbox snapshot structure + * @hw: pointer to the hardware structure + * @vf_count: number of VFs allocated on a PF + * + * Clear the mailbox snapshot structure and allocate memory + * for the VF counter array based on the number of VFs allocated + * on that PF. + * + * Assumption: This function will assume ice_get_caps() has already been + * called to ensure that the vf_count can be compared against the number + * of VFs supported as defined in the functional capabilities of the device. + */ +enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count) +{ + struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; + + /* Ensure that the number of VFs allocated is non-zero and + * is not greater than the number of supported VFs defined in + * the functional capabilities of the PF. + */ + if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs) + return ICE_ERR_INVAL_SIZE; + + snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count, + sizeof(*snap->mbx_vf.vf_cntr), + GFP_KERNEL); + if (!snap->mbx_vf.vf_cntr) + return ICE_ERR_NO_MEMORY; + + /* Setting the VF counter length to the number of allocated + * VFs for given PF's functional capabilities. + */ + snap->mbx_vf.vfcntr_len = vf_count; + + /* Clear mbx_buf in the mailbox snaphot structure and setting the + * mailbox snapshot state to a new capture. + */ + memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf)); + snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; + + return 0; +} + +/** + * ice_mbx_deinit_snapshot - Free mailbox snapshot structure + * @hw: pointer to the hardware structure + * + * Clear the mailbox snapshot structure and free the VF counter array. + */ +void ice_mbx_deinit_snapshot(struct ice_hw *hw) +{ + struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; + + /* Free VF counter array and reset VF counter length */ + devm_kfree(ice_hw_to_dev(hw), snap->mbx_vf.vf_cntr); + snap->mbx_vf.vfcntr_len = 0; + + /* Clear mbx_buf in the mailbox snaphot structure */ + memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf)); +} diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h index 3d78a0795138..161dc55d9e9c 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.h +++ b/drivers/net/ethernet/intel/ice/ice_sriov.h @@ -4,7 +4,14 @@ #ifndef _ICE_SRIOV_H_ #define _ICE_SRIOV_H_ -#include "ice_common.h" +#include "ice_type.h" +#include "ice_controlq.h" + +/* Defining the mailbox message threshold as 63 asynchronous + * pending messages. Normal VF functionality does not require + * sending more than 63 asynchronous pending message. + */ +#define ICE_ASYNC_VF_MSG_THRESHOLD 63 #ifdef CONFIG_PCI_IOV enum ice_status @@ -12,6 +19,17 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, struct ice_sq_cd *cd); u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed); +enum ice_status +ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data, + u16 vf_id, bool *is_mal_vf); +enum ice_status +ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs, + u16 bitmap_len, u16 vf_id); +enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count); +void ice_mbx_deinit_snapshot(struct ice_hw *hw); +enum ice_status +ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs, + u16 bitmap_len, u16 vf_id, bool *report_malvf); #else /* CONFIG_PCI_IOV */ static inline enum ice_status ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw, diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 834cbd3f7b31..357d3073d814 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -920,7 +920,7 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, struct ice_vsi_list_map_info *v_map; int i; - v_map = devm_kcalloc(ice_hw_to_dev(hw), 1, sizeof(*v_map), GFP_KERNEL); + v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL); if (!v_map) return NULL; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index b91dcfd12727..e2b4b29ea207 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -309,7 +309,7 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) smp_mb(); if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->q_index) && - !test_bit(__ICE_DOWN, vsi->state)) { + !test_bit(ICE_VSI_DOWN, vsi->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->q_index); ++tx_ring->tx_stats.restart_q; @@ -554,8 +554,8 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp, * @frames: XDP frames to be transmitted * @flags: transmit flags * - * Returns number of frames successfully sent. Frames that fail are - * free'ed via XDP return API. + * Returns number of frames successfully sent. Failed frames + * will be free'ed by XDP core. * For error cases, a negative errno code is returned and no-frames * are transmitted (caller must handle freeing frames). */ @@ -567,9 +567,9 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, unsigned int queue_index = smp_processor_id(); struct ice_vsi *vsi = np->vsi; struct ice_ring *xdp_ring; - int drops = 0, i; + int nxmit = 0, i; - if (test_bit(__ICE_DOWN, vsi->state)) + if (test_bit(ICE_VSI_DOWN, vsi->state)) return -ENETDOWN; if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq) @@ -584,16 +584,15 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, int err; err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); - if (err != ICE_XDP_TX) { - xdp_return_frame_rx_napi(xdpf); - drops++; - } + if (err != ICE_XDP_TX) + break; + nxmit++; } if (unlikely(flags & XDP_XMIT_FLUSH)) ice_xdp_ring_update_tail(xdp_ring); - return n - drops; + return nxmit; } /** @@ -1098,6 +1097,11 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) dma_rmb(); if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { + struct ice_vsi *ctrl_vsi = rx_ring->vsi; + + if (rx_desc->wb.rxdid == FDIR_DESC_RXDID && + ctrl_vsi->vf_id != ICE_INVAL_VFID) + ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc); ice_put_rx_buf(rx_ring, NULL, 0); cleaned_count++; continue; @@ -1219,216 +1223,50 @@ construct_skb: } /** - * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic - * @port_info: port_info structure containing the current link speed - * @avg_pkt_size: average size of Tx or Rx packets based on clean routine - * @itr: ITR value to update - * - * Calculate how big of an increment should be applied to the ITR value passed - * in based on wmem_default, SKB overhead, ethernet overhead, and the current - * link speed. - * - * The following is a calculation derived from: - * wmem_default / (size + overhead) = desired_pkts_per_int - * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate - * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value + * ice_net_dim - Update net DIM algorithm + * @q_vector: the vector associated with the interrupt * - * Assuming wmem_default is 212992 and overhead is 640 bytes per - * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the - * formula down to: + * Create a DIM sample and notify net_dim() so that it can possibly decide + * a new ITR value based on incoming packets, bytes, and interrupts. * - * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24 - * ITR = -------------------------------------------- * -------------- - * rate pkt_size + 640 + * This function is a no-op if the ring is not configured to dynamic ITR. */ -static unsigned int -ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info, - unsigned int avg_pkt_size, - unsigned int itr) +static void ice_net_dim(struct ice_q_vector *q_vector) { - switch (port_info->phy.link_info.link_speed) { - case ICE_AQ_LINK_SPEED_100GB: - itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24), - avg_pkt_size + 640); - break; - case ICE_AQ_LINK_SPEED_50GB: - itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24), - avg_pkt_size + 640); - break; - case ICE_AQ_LINK_SPEED_40GB: - itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24), - avg_pkt_size + 640); - break; - case ICE_AQ_LINK_SPEED_25GB: - itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24), - avg_pkt_size + 640); - break; - case ICE_AQ_LINK_SPEED_20GB: - itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24), - avg_pkt_size + 640); - break; - case ICE_AQ_LINK_SPEED_10GB: - default: - itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24), - avg_pkt_size + 640); - break; - } - - if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { - itr &= ICE_ITR_ADAPTIVE_LATENCY; - itr += ICE_ITR_ADAPTIVE_MAX_USECS; - } + struct ice_ring_container *tx = &q_vector->tx; + struct ice_ring_container *rx = &q_vector->rx; - return itr; -} + if (ITR_IS_DYNAMIC(tx)) { + struct dim_sample dim_sample = {}; + u64 packets = 0, bytes = 0; + struct ice_ring *ring; -/** - * ice_update_itr - update the adaptive ITR value based on statistics - * @q_vector: structure containing interrupt and ring information - * @rc: structure containing ring performance data - * - * Stores a new ITR value based on packets and byte - * counts during the last interrupt. The advantage of per interrupt - * computation is faster updates and more accurate ITR for the current - * traffic pattern. Constants in this function were computed - * based on theoretical maximum wire speed and thresholds were set based - * on testing data as well as attempting to minimize response time - * while increasing bulk throughput. - */ -static void -ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc) -{ - unsigned long next_update = jiffies; - unsigned int packets, bytes, itr; - bool container_is_rx; + ice_for_each_ring(ring, q_vector->tx) { + packets += ring->stats.pkts; + bytes += ring->stats.bytes; + } - if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting)) - return; + dim_update_sample(q_vector->total_events, packets, bytes, + &dim_sample); - /* If itr_countdown is set it means we programmed an ITR within - * the last 4 interrupt cycles. This has a side effect of us - * potentially firing an early interrupt. In order to work around - * this we need to throw out any data received for a few - * interrupts following the update. - */ - if (q_vector->itr_countdown) { - itr = rc->target_itr; - goto clear_counts; + net_dim(&tx->dim, dim_sample); } - container_is_rx = (&q_vector->rx == rc); - /* For Rx we want to push the delay up and default to low latency. - * for Tx we want to pull the delay down and default to high latency. - */ - itr = container_is_rx ? - ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY : - ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY; - - /* If we didn't update within up to 1 - 2 jiffies we can assume - * that either packets are coming in so slow there hasn't been - * any work, or that there is so much work that NAPI is dealing - * with interrupt moderation and we don't need to do anything. - */ - if (time_after(next_update, rc->next_update)) - goto clear_counts; - - prefetch(q_vector->vsi->port_info); - - packets = rc->total_pkts; - bytes = rc->total_bytes; + if (ITR_IS_DYNAMIC(rx)) { + struct dim_sample dim_sample = {}; + u64 packets = 0, bytes = 0; + struct ice_ring *ring; - if (container_is_rx) { - /* If Rx there are 1 to 4 packets and bytes are less than - * 9000 assume insufficient data to use bulk rate limiting - * approach unless Tx is already in bulk rate limiting. We - * are likely latency driven. - */ - if (packets && packets < 4 && bytes < 9000 && - (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) { - itr = ICE_ITR_ADAPTIVE_LATENCY; - goto adjust_by_size_and_speed; + ice_for_each_ring(ring, q_vector->rx) { + packets += ring->stats.pkts; + bytes += ring->stats.bytes; } - } else if (packets < 4) { - /* If we have Tx and Rx ITR maxed and Tx ITR is running in - * bulk mode and we are receiving 4 or fewer packets just - * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so - * that the Rx can relax. - */ - if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS && - (q_vector->rx.target_itr & ICE_ITR_MASK) == - ICE_ITR_ADAPTIVE_MAX_USECS) - goto clear_counts; - } else if (packets > 32) { - /* If we have processed over 32 packets in a single interrupt - * for Tx assume we need to switch over to "bulk" mode. - */ - rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY; - } - - /* We have no packets to actually measure against. This means - * either one of the other queues on this vector is active or - * we are a Tx queue doing TSO with too high of an interrupt rate. - * - * Between 4 and 56 we can assume that our current interrupt delay - * is only slightly too low. As such we should increase it by a small - * fixed amount. - */ - if (packets < 56) { - itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC; - if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { - itr &= ICE_ITR_ADAPTIVE_LATENCY; - itr += ICE_ITR_ADAPTIVE_MAX_USECS; - } - goto clear_counts; - } - - if (packets <= 256) { - itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); - itr &= ICE_ITR_MASK; - /* Between 56 and 112 is our "goldilocks" zone where we are - * working out "just right". Just report that our current - * ITR is good for us. - */ - if (packets <= 112) - goto clear_counts; - - /* If packet count is 128 or greater we are likely looking - * at a slight overrun of the delay we want. Try halving - * our delay to see if that will cut the number of packets - * in half per interrupt. - */ - itr >>= 1; - itr &= ICE_ITR_MASK; - if (itr < ICE_ITR_ADAPTIVE_MIN_USECS) - itr = ICE_ITR_ADAPTIVE_MIN_USECS; + dim_update_sample(q_vector->total_events, packets, bytes, + &dim_sample); - goto clear_counts; + net_dim(&rx->dim, dim_sample); } - - /* The paths below assume we are dealing with a bulk ITR since - * number of packets is greater than 256. We are just going to have - * to compute a value and try to bring the count under control, - * though for smaller packet sizes there isn't much we can do as - * NAPI polling will likely be kicking in sooner rather than later. - */ - itr = ICE_ITR_ADAPTIVE_BULK; - -adjust_by_size_and_speed: - - /* based on checks above packets cannot be 0 so division is safe */ - itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info, - bytes / packets, itr); - -clear_counts: - /* write back value */ - rc->target_itr = itr; - - /* next update should occur within next jiffy */ - rc->next_update = next_update + 1; - - rc->total_bytes = 0; - rc->total_pkts = 0; } /** @@ -1452,72 +1290,46 @@ static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); } -/* The act of updating the ITR will cause it to immediately trigger. In order - * to prevent this from throwing off adaptive update statistics we defer the - * update so that it can only happen so often. So after either Tx or Rx are - * updated we make the adaptive scheme wait until either the ITR completely - * expires via the next_update expiration or we have been through at least - * 3 interrupts. - */ -#define ITR_COUNTDOWN_START 3 - /** - * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt - * @q_vector: q_vector for which ITR is being updated and interrupt enabled + * ice_update_ena_itr - Update ITR moderation and re-enable MSI-X interrupt + * @q_vector: the vector associated with the interrupt to enable + * + * Update the net_dim() algorithm and re-enable the interrupt associated with + * this vector. + * + * If the VSI is down, the interrupt will not be re-enabled. */ static void ice_update_ena_itr(struct ice_q_vector *q_vector) { - struct ice_ring_container *tx = &q_vector->tx; - struct ice_ring_container *rx = &q_vector->rx; struct ice_vsi *vsi = q_vector->vsi; + bool wb_en = q_vector->wb_on_itr; u32 itr_val; - /* when exiting WB_ON_ITR just reset the countdown and let ITR - * resume it's normal "interrupts-enabled" path - */ - if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) - q_vector->itr_countdown = 0; - - /* This will do nothing if dynamic updates are not enabled */ - ice_update_itr(q_vector, tx); - ice_update_itr(q_vector, rx); + if (test_bit(ICE_DOWN, vsi->state)) + return; - /* This block of logic allows us to get away with only updating - * one ITR value with each interrupt. The idea is to perform a - * pseudo-lazy update with the following criteria. - * - * 1. Rx is given higher priority than Tx if both are in same state - * 2. If we must reduce an ITR that is given highest priority. - * 3. We then give priority to increasing ITR based on amount. + /* When exiting WB_ON_ITR, let ITR resume its normal + * interrupts-enabled path. */ - if (rx->target_itr < rx->current_itr) { - /* Rx ITR needs to be reduced, this is highest priority */ - itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); - rx->current_itr = rx->target_itr; - q_vector->itr_countdown = ITR_COUNTDOWN_START; - } else if ((tx->target_itr < tx->current_itr) || - ((rx->target_itr - rx->current_itr) < - (tx->target_itr - tx->current_itr))) { - /* Tx ITR needs to be reduced, this is second priority - * Tx ITR needs to be increased more than Rx, fourth priority - */ - itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr); - tx->current_itr = tx->target_itr; - q_vector->itr_countdown = ITR_COUNTDOWN_START; - } else if (rx->current_itr != rx->target_itr) { - /* Rx ITR needs to be increased, third priority */ - itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); - rx->current_itr = rx->target_itr; - q_vector->itr_countdown = ITR_COUNTDOWN_START; - } else { - /* Still have to re-enable the interrupts */ - itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); - if (q_vector->itr_countdown) - q_vector->itr_countdown--; + if (wb_en) + q_vector->wb_on_itr = false; + + /* This will do nothing if dynamic updates are not enabled. */ + ice_net_dim(q_vector); + + /* net_dim() updates ITR out-of-band using a work item */ + itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); + /* trigger an immediate software interrupt when exiting + * busy poll, to make sure to catch any pending cleanups + * that might have been missed due to interrupt state + * transition. + */ + if (wb_en) { + itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M | + GLINT_DYN_CTL_SW_ITR_INDX_M | + GLINT_DYN_CTL_SW_ITR_INDX_ENA_M; } - - if (!test_bit(__ICE_DOWN, vsi->state)) - wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); + wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); } /** @@ -1539,7 +1351,7 @@ static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) struct ice_vsi *vsi = q_vector->vsi; /* already in wb_on_itr mode no need to change it */ - if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) + if (q_vector->wb_on_itr) return; /* use previously set ITR values for all of the ITR indices by @@ -1551,7 +1363,7 @@ static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | GLINT_DYN_CTL_WB_ON_ITR_M); - q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE; + q_vector->wb_on_itr = true; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 5dab77504fa5..c5a92ac787d6 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -192,7 +192,11 @@ struct ice_rxq_stats { u64 non_eop_descs; u64 alloc_page_failed; u64 alloc_buf_failed; - u64 gro_dropped; /* GRO returned dropped */ +}; + +enum ice_ring_state_t { + ICE_TX_XPS_INIT_DONE, + ICE_TX_NBITS, }; /* this enum matches hardware bits and is meant to be used by DYN_CTLN @@ -219,23 +223,20 @@ enum ice_rx_dtype { #define ICE_TX_ITR ICE_IDX_ITR1 #define ICE_ITR_8K 124 #define ICE_ITR_20K 50 -#define ICE_ITR_MAX 8160 -#define ICE_DFLT_TX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC) -#define ICE_DFLT_RX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC) -#define ICE_ITR_DYNAMIC 0x8000 /* used as flag for itr_setting */ -#define ITR_IS_DYNAMIC(setting) (!!((setting) & ICE_ITR_DYNAMIC)) -#define ITR_TO_REG(setting) ((setting) & ~ICE_ITR_DYNAMIC) +#define ICE_ITR_MAX 8160 /* 0x1FE0 */ +#define ICE_DFLT_TX_ITR ICE_ITR_20K +#define ICE_DFLT_RX_ITR ICE_ITR_20K +enum ice_dynamic_itr { + ITR_STATIC = 0, + ITR_DYNAMIC = 1 +}; + +#define ITR_IS_DYNAMIC(rc) ((rc)->itr_mode == ITR_DYNAMIC) #define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */ #define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S) #define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */ #define ITR_REG_ALIGN(setting) ((setting) & ICE_ITR_MASK) -#define ICE_ITR_ADAPTIVE_MIN_INC 0x0002 -#define ICE_ITR_ADAPTIVE_MIN_USECS 0x0002 -#define ICE_ITR_ADAPTIVE_MAX_USECS 0x00FA -#define ICE_ITR_ADAPTIVE_LATENCY 0x8000 -#define ICE_ITR_ADAPTIVE_BULK 0x0000 - #define ICE_DFLT_INTRL 0 #define ICE_MAX_INTRL 236 @@ -292,6 +293,7 @@ struct ice_ring { }; struct rcu_head rcu; /* to avoid race on free */ + DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */ struct bpf_prog *xdp_prog; struct xsk_buff_pool *xsk_pool; u16 rx_offset; @@ -334,23 +336,22 @@ static inline bool ice_ring_is_xdp(struct ice_ring *ring) struct ice_ring_container { /* head of linked-list of rings */ struct ice_ring *ring; - unsigned long next_update; /* jiffies value of next queue update */ - unsigned int total_bytes; /* total bytes processed this int */ - unsigned int total_pkts; /* total packets processed this int */ + struct dim dim; /* data for net_dim algorithm */ u16 itr_idx; /* index in the interrupt vector */ - u16 target_itr; /* value in usecs divided by the hw->itr_gran */ - u16 current_itr; /* value in usecs divided by the hw->itr_gran */ - /* high bit set means dynamic ITR, rest is used to store user - * readable ITR value in usecs and must be converted before programming - * to a register. + /* this matches the maximum number of ITR bits, but in usec + * values, so it is shifted left one bit (bit zero is ignored) */ - u16 itr_setting; + u16 itr_setting:13; + u16 itr_reserved:2; + u16 itr_mode:1; }; struct ice_coalesce_stored { u16 itr_tx; u16 itr_rx; u8 intrl; + u8 tx_valid; + u8 rx_valid; }; /* iterator for handling rings in ring container */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index 02b12736ea80..207f6ee3a7f6 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -143,6 +143,7 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, case ICE_RX_PTYPE_INNER_PROT_UDP: case ICE_RX_PTYPE_INNER_PROT_SCTP: skb->ip_summed = CHECKSUM_UNNECESSARY; + break; default: break; } diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 266036b7a49a..4474dd6a7ba1 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -192,6 +192,24 @@ enum ice_fltr_ptype { ICE_FLTR_PTYPE_NONF_IPV4_TCP, ICE_FLTR_PTYPE_NONF_IPV4_SCTP, ICE_FLTR_PTYPE_NONF_IPV4_OTHER, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER, + ICE_FLTR_PTYPE_NONF_IPV6_GTPU_IPV6_OTHER, + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3, + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3, + ICE_FLTR_PTYPE_NONF_IPV4_ESP, + ICE_FLTR_PTYPE_NONF_IPV6_ESP, + ICE_FLTR_PTYPE_NONF_IPV4_AH, + ICE_FLTR_PTYPE_NONF_IPV6_AH, + ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP, + ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP, + ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE, + ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION, + ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE, + ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION, + ICE_FLTR_PTYPE_NON_IP_L2, ICE_FLTR_PTYPE_FRAG_IPV4, ICE_FLTR_PTYPE_NONF_IPV6_UDP, ICE_FLTR_PTYPE_NONF_IPV6_TCP, @@ -533,10 +551,7 @@ struct ice_dcb_app_priority_table { #define ICE_TLV_STATUS_OPER 0x1 #define ICE_TLV_STATUS_SYNC 0x2 #define ICE_TLV_STATUS_ERR 0x4 -#define ICE_APP_PROT_ID_FCOE 0x8906 -#define ICE_APP_PROT_ID_ISCSI 0x0cbc #define ICE_APP_PROT_ID_ISCSI_860 0x035c -#define ICE_APP_PROT_ID_FIP 0x8914 #define ICE_APP_SEL_ETHTYPE 0x1 #define ICE_APP_SEL_TCPIP 0x2 #define ICE_CEE_APP_SEL_ETHTYPE 0x0 @@ -615,6 +630,80 @@ struct ice_fw_log_cfg { struct ice_fw_log_evnt evnts[ICE_AQC_FW_LOG_ID_MAX]; }; +/* Enum defining the different states of the mailbox snapshot in the + * PF-VF mailbox overflow detection algorithm. The snapshot can be in + * states: + * 1. ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT - generate a new static snapshot + * within the mailbox buffer. + * 2. ICE_MAL_VF_DETECT_STATE_TRAVERSE - iterate through the mailbox snaphot + * 3. ICE_MAL_VF_DETECT_STATE_DETECT - track the messages sent per VF via the + * mailbox and mark any VFs sending more messages than the threshold limit set. + * 4. ICE_MAL_VF_DETECT_STATE_INVALID - Invalid mailbox state set to 0xFFFFFFFF. + */ +enum ice_mbx_snapshot_state { + ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT = 0, + ICE_MAL_VF_DETECT_STATE_TRAVERSE, + ICE_MAL_VF_DETECT_STATE_DETECT, + ICE_MAL_VF_DETECT_STATE_INVALID = 0xFFFFFFFF, +}; + +/* Structure to hold information of the static snapshot and the mailbox + * buffer data used to generate and track the snapshot. + * 1. state: the state of the mailbox snapshot in the malicious VF + * detection state handler ice_mbx_vf_state_handler() + * 2. head: head of the mailbox snapshot in a circular mailbox buffer + * 3. tail: tail of the mailbox snapshot in a circular mailbox buffer + * 4. num_iterations: number of messages traversed in circular mailbox buffer + * 5. num_msg_proc: number of messages processed in mailbox + * 6. num_pending_arq: number of pending asynchronous messages + * 7. max_num_msgs_mbx: maximum messages in mailbox for currently + * serviced work item or interrupt. + */ +struct ice_mbx_snap_buffer_data { + enum ice_mbx_snapshot_state state; + u32 head; + u32 tail; + u32 num_iterations; + u16 num_msg_proc; + u16 num_pending_arq; + u16 max_num_msgs_mbx; +}; + +/* Structure to track messages sent by VFs on mailbox: + * 1. vf_cntr: a counter array of VFs to track the number of + * asynchronous messages sent by each VF + * 2. vfcntr_len: number of entries in VF counter array + */ +struct ice_mbx_vf_counter { + u32 *vf_cntr; + u32 vfcntr_len; +}; + +/* Structure to hold data relevant to the captured static snapshot + * of the PF-VF mailbox. + */ +struct ice_mbx_snapshot { + struct ice_mbx_snap_buffer_data mbx_buf; + struct ice_mbx_vf_counter mbx_vf; +}; + +/* Structure to hold data to be used for capturing or updating a + * static snapshot. + * 1. num_msg_proc: number of messages processed in mailbox + * 2. num_pending_arq: number of pending asynchronous messages + * 3. max_num_msgs_mbx: maximum messages in mailbox for currently + * serviced work item or interrupt. + * 4. async_watermark_val: An upper threshold set by caller to determine + * if the pending arq count is large enough to assume that there is + * the possibility of a mailicious VF. + */ +struct ice_mbx_data { + u16 num_msg_proc; + u16 num_pending_arq; + u16 max_num_msgs_mbx; + u16 async_watermark_val; +}; + /* Port hardware description */ struct ice_hw { u8 __iomem *hw_addr; @@ -703,13 +792,13 @@ struct ice_hw { enum ice_aq_err pkg_dwnld_status; - /* Driver's package ver - (from the Metadata seg) */ + /* Driver's package ver - (from the Ice Metadata section) */ struct ice_pkg_ver pkg_ver; u8 pkg_name[ICE_PKG_NAME_SIZE]; - /* Driver's Ice package version (from the Ice seg) */ - struct ice_pkg_ver ice_pkg_ver; - u8 ice_pkg_name[ICE_PKG_NAME_SIZE]; + /* Driver's Ice segment format version and ID (from the Ice seg) */ + struct ice_pkg_ver ice_seg_fmt_ver; + u8 ice_seg_id[ICE_SEG_ID_SIZE]; /* Pointer to the ice segment */ struct ice_seg *seg; @@ -746,6 +835,7 @@ struct ice_hw { DECLARE_BITMAP(fdir_perfect_fltr, ICE_FLTR_PTYPE_MAX); struct mutex rss_locks; /* protect RSS configuration */ struct list_head rss_list_head; + struct ice_mbx_snapshot mbx_snapshot; }; /* Statistics collected by each port, VSI, VEB, and S-channel */ @@ -810,6 +900,14 @@ struct ice_hw_port_stats { u64 fd_sb_match; }; +struct ice_aq_get_set_rss_lut_params { + u16 vsi_handle; /* software VSI handle */ + u16 lut_size; /* size of the LUT buffer */ + u8 lut_type; /* type of the LUT (i.e. VSI, PF, Global) */ + u8 *lut; /* input RSS LUT for set and output RSS LUT for get */ + u8 global_lut_id; /* only valid when lut_type is global */ +}; + /* Checksum and Shadow RAM pointers */ #define ICE_SR_NVM_CTRL_WORD 0x00 #define ICE_SR_BOOT_CFG_PTR 0x132 @@ -916,4 +1014,9 @@ struct ice_hw_port_stats { #define ICE_FW_API_LLDP_FLTR_MIN 7 #define ICE_FW_API_LLDP_FLTR_PATCH 1 +/* AQ API version for report default configuration */ +#define ICE_FW_API_REPORT_DFLT_CFG_MAJ 1 +#define ICE_FW_API_REPORT_DFLT_CFG_MIN 7 +#define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3 + #endif /* _ICE_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c new file mode 100644 index 000000000000..9feebe5f556c --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021, Intel Corporation. */ + +#include "ice_virtchnl_allowlist.h" + +/* Purpose of this file is to share functionality to allowlist or denylist + * opcodes used in PF <-> VF communication. Group of opcodes: + * - default -> should be always allowed after creating VF, + * default_allowlist_opcodes + * - opcodes needed by VF to work correctly, but not associated with caps -> + * should be allowed after successful VF resources allocation, + * working_allowlist_opcodes + * - opcodes needed by VF when caps are activated + * + * Caps that don't use new opcodes (no opcodes should be allowed): + * - VIRTCHNL_VF_OFFLOAD_RSS_AQ + * - VIRTCHNL_VF_OFFLOAD_RSS_REG + * - VIRTCHNL_VF_OFFLOAD_WB_ON_ITR + * - VIRTCHNL_VF_OFFLOAD_CRC + * - VIRTCHNL_VF_OFFLOAD_RX_POLLING + * - VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 + * - VIRTCHNL_VF_OFFLOAD_ENCAP + * - VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM + * - VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM + * - VIRTCHNL_VF_OFFLOAD_USO + */ + +/* default opcodes to communicate with VF */ +static const u32 default_allowlist_opcodes[] = { + VIRTCHNL_OP_GET_VF_RESOURCES, VIRTCHNL_OP_VERSION, VIRTCHNL_OP_RESET_VF, +}; + +/* opcodes supported after successful VIRTCHNL_OP_GET_VF_RESOURCES */ +static const u32 working_allowlist_opcodes[] = { + VIRTCHNL_OP_CONFIG_TX_QUEUE, VIRTCHNL_OP_CONFIG_RX_QUEUE, + VIRTCHNL_OP_CONFIG_VSI_QUEUES, VIRTCHNL_OP_CONFIG_IRQ_MAP, + VIRTCHNL_OP_ENABLE_QUEUES, VIRTCHNL_OP_DISABLE_QUEUES, + VIRTCHNL_OP_GET_STATS, VIRTCHNL_OP_EVENT, +}; + +/* VIRTCHNL_VF_OFFLOAD_L2 */ +static const u32 l2_allowlist_opcodes[] = { + VIRTCHNL_OP_ADD_ETH_ADDR, VIRTCHNL_OP_DEL_ETH_ADDR, + VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, +}; + +/* VIRTCHNL_VF_OFFLOAD_REQ_QUEUES */ +static const u32 req_queues_allowlist_opcodes[] = { + VIRTCHNL_OP_REQUEST_QUEUES, +}; + +/* VIRTCHNL_VF_OFFLOAD_VLAN */ +static const u32 vlan_allowlist_opcodes[] = { + VIRTCHNL_OP_ADD_VLAN, VIRTCHNL_OP_DEL_VLAN, + VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, +}; + +/* VIRTCHNL_VF_OFFLOAD_RSS_PF */ +static const u32 rss_pf_allowlist_opcodes[] = { + VIRTCHNL_OP_CONFIG_RSS_KEY, VIRTCHNL_OP_CONFIG_RSS_LUT, + VIRTCHNL_OP_GET_RSS_HENA_CAPS, VIRTCHNL_OP_SET_RSS_HENA, +}; + +/* VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF */ +static const u32 adv_rss_pf_allowlist_opcodes[] = { + VIRTCHNL_OP_ADD_RSS_CFG, VIRTCHNL_OP_DEL_RSS_CFG, +}; + +/* VIRTCHNL_VF_OFFLOAD_FDIR_PF */ +static const u32 fdir_pf_allowlist_opcodes[] = { + VIRTCHNL_OP_ADD_FDIR_FILTER, VIRTCHNL_OP_DEL_FDIR_FILTER, +}; + +struct allowlist_opcode_info { + const u32 *opcodes; + size_t size; +}; + +#define BIT_INDEX(caps) (HWEIGHT((caps) - 1)) +#define ALLOW_ITEM(caps, list) \ + [BIT_INDEX(caps)] = { \ + .opcodes = list, \ + .size = ARRAY_SIZE(list) \ + } +static const struct allowlist_opcode_info allowlist_opcodes[] = { + ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_L2, l2_allowlist_opcodes), + ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_REQ_QUEUES, req_queues_allowlist_opcodes), + ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN, vlan_allowlist_opcodes), + ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_RSS_PF, rss_pf_allowlist_opcodes), + ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF, adv_rss_pf_allowlist_opcodes), + ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_FDIR_PF, fdir_pf_allowlist_opcodes), +}; + +/** + * ice_vc_is_opcode_allowed - check if this opcode is allowed on this VF + * @vf: pointer to VF structure + * @opcode: virtchnl opcode + * + * Return true if message is allowed on this VF + */ +bool ice_vc_is_opcode_allowed(struct ice_vf *vf, u32 opcode) +{ + if (opcode >= VIRTCHNL_OP_MAX) + return false; + + return test_bit(opcode, vf->opcodes_allowlist); +} + +/** + * ice_vc_allowlist_opcodes - allowlist selected opcodes + * @vf: pointer to VF structure + * @opcodes: array of opocodes to allowlist + * @size: size of opcodes array + * + * Function should be called to allowlist opcodes on VF. + */ +static void +ice_vc_allowlist_opcodes(struct ice_vf *vf, const u32 *opcodes, size_t size) +{ + unsigned int i; + + for (i = 0; i < size; i++) + set_bit(opcodes[i], vf->opcodes_allowlist); +} + +/** + * ice_vc_clear_allowlist - clear all allowlist opcodes + * @vf: pointer to VF structure + */ +static void ice_vc_clear_allowlist(struct ice_vf *vf) +{ + bitmap_zero(vf->opcodes_allowlist, VIRTCHNL_OP_MAX); +} + +/** + * ice_vc_set_default_allowlist - allowlist default opcodes for VF + * @vf: pointer to VF structure + */ +void ice_vc_set_default_allowlist(struct ice_vf *vf) +{ + ice_vc_clear_allowlist(vf); + ice_vc_allowlist_opcodes(vf, default_allowlist_opcodes, + ARRAY_SIZE(default_allowlist_opcodes)); +} + +/** + * ice_vc_set_working_allowlist - allowlist opcodes needed to by VF to work + * @vf: pointer to VF structure + * + * allowlist opcodes that aren't associated with specific caps, but + * are needed by VF to work. + */ +void ice_vc_set_working_allowlist(struct ice_vf *vf) +{ + ice_vc_allowlist_opcodes(vf, working_allowlist_opcodes, + ARRAY_SIZE(working_allowlist_opcodes)); +} + +/** + * ice_vc_set_caps_allowlist - allowlist VF opcodes according caps + * @vf: pointer to VF structure + */ +void ice_vc_set_caps_allowlist(struct ice_vf *vf) +{ + unsigned long caps = vf->driver_caps; + unsigned int i; + + for_each_set_bit(i, &caps, ARRAY_SIZE(allowlist_opcodes)) + ice_vc_allowlist_opcodes(vf, allowlist_opcodes[i].opcodes, + allowlist_opcodes[i].size); +} diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h new file mode 100644 index 000000000000..d3ae86ded219 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021, Intel Corporation. */ + +#ifndef _ICE_VIRTCHNL_ALLOWLIST_H_ +#define _ICE_VIRTCHNL_ALLOWLIST_H_ +#include "ice.h" + +bool ice_vc_is_opcode_allowed(struct ice_vf *vf, u32 opcode); + +void ice_vc_set_default_allowlist(struct ice_vf *vf); +void ice_vc_set_working_allowlist(struct ice_vf *vf); +void ice_vc_set_caps_allowlist(struct ice_vf *vf); +#endif /* _ICE_VIRTCHNL_ALLOWLIST_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c new file mode 100644 index 000000000000..eee180d8c024 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -0,0 +1,2204 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021, Intel Corporation. */ + +#include "ice.h" +#include "ice_base.h" +#include "ice_lib.h" +#include "ice_flow.h" + +#define to_fltr_conf_from_desc(p) \ + container_of(p, struct virtchnl_fdir_fltr_conf, input) + +#define ICE_FLOW_PROF_TYPE_S 0 +#define ICE_FLOW_PROF_TYPE_M (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S) +#define ICE_FLOW_PROF_VSI_S 32 +#define ICE_FLOW_PROF_VSI_M (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S) + +/* Flow profile ID format: + * [0:31] - flow type, flow + tun_offs + * [32:63] - VSI index + */ +#define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \ + ((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \ + (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M))) + +#define GTPU_TEID_OFFSET 4 +#define GTPU_EH_QFI_OFFSET 1 +#define GTPU_EH_QFI_MASK 0x3F +#define PFCP_S_OFFSET 0 +#define PFCP_S_MASK 0x1 +#define PFCP_PORT_NR 8805 + +#define FDIR_INSET_FLAG_ESP_S 0 +#define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S) +#define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S) +#define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S) + +enum ice_fdir_tunnel_type { + ICE_FDIR_TUNNEL_TYPE_NONE = 0, + ICE_FDIR_TUNNEL_TYPE_GTPU, + ICE_FDIR_TUNNEL_TYPE_GTPU_EH, +}; + +struct virtchnl_fdir_fltr_conf { + struct ice_fdir_fltr input; + enum ice_fdir_tunnel_type ttype; + u64 inset_flag; + u32 flow_id; +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ether[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv4[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv4_tcp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_TCP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv4_udp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_UDP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv4_sctp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_SCTP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv6[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV6, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv6_tcp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV6, + VIRTCHNL_PROTO_HDR_TCP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv6_udp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV6, + VIRTCHNL_PROTO_HDR_UDP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv6_sctp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV6, + VIRTCHNL_PROTO_HDR_SCTP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_UDP, + VIRTCHNL_PROTO_HDR_GTPU_IP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu_eh[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_UDP, + VIRTCHNL_PROTO_HDR_GTPU_IP, + VIRTCHNL_PROTO_HDR_GTPU_EH, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv4_l2tpv3[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_L2TPV3, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv6_l2tpv3[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV6, + VIRTCHNL_PROTO_HDR_L2TPV3, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv4_esp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_ESP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv6_esp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV6, + VIRTCHNL_PROTO_HDR_ESP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv4_ah[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_AH, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv6_ah[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV6, + VIRTCHNL_PROTO_HDR_AH, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv4_nat_t_esp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_UDP, + VIRTCHNL_PROTO_HDR_ESP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv6_nat_t_esp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV6, + VIRTCHNL_PROTO_HDR_UDP, + VIRTCHNL_PROTO_HDR_ESP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv4_pfcp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_UDP, + VIRTCHNL_PROTO_HDR_PFCP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv6_pfcp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV6, + VIRTCHNL_PROTO_HDR_UDP, + VIRTCHNL_PROTO_HDR_PFCP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +struct virtchnl_fdir_pattern_match_item { + enum virtchnl_proto_hdr_type *list; + u64 input_set; + u64 *meta; +}; + +static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_os[] = { + {vc_pattern_ipv4, 0, NULL}, + {vc_pattern_ipv4_tcp, 0, NULL}, + {vc_pattern_ipv4_udp, 0, NULL}, + {vc_pattern_ipv4_sctp, 0, NULL}, + {vc_pattern_ipv6, 0, NULL}, + {vc_pattern_ipv6_tcp, 0, NULL}, + {vc_pattern_ipv6_udp, 0, NULL}, + {vc_pattern_ipv6_sctp, 0, NULL}, +}; + +static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_comms[] = { + {vc_pattern_ipv4, 0, NULL}, + {vc_pattern_ipv4_tcp, 0, NULL}, + {vc_pattern_ipv4_udp, 0, NULL}, + {vc_pattern_ipv4_sctp, 0, NULL}, + {vc_pattern_ipv6, 0, NULL}, + {vc_pattern_ipv6_tcp, 0, NULL}, + {vc_pattern_ipv6_udp, 0, NULL}, + {vc_pattern_ipv6_sctp, 0, NULL}, + {vc_pattern_ether, 0, NULL}, + {vc_pattern_ipv4_gtpu, 0, NULL}, + {vc_pattern_ipv4_gtpu_eh, 0, NULL}, + {vc_pattern_ipv4_l2tpv3, 0, NULL}, + {vc_pattern_ipv6_l2tpv3, 0, NULL}, + {vc_pattern_ipv4_esp, 0, NULL}, + {vc_pattern_ipv6_esp, 0, NULL}, + {vc_pattern_ipv4_ah, 0, NULL}, + {vc_pattern_ipv6_ah, 0, NULL}, + {vc_pattern_ipv4_nat_t_esp, 0, NULL}, + {vc_pattern_ipv6_nat_t_esp, 0, NULL}, + {vc_pattern_ipv4_pfcp, 0, NULL}, + {vc_pattern_ipv6_pfcp, 0, NULL}, +}; + +struct virtchnl_fdir_inset_map { + enum virtchnl_proto_hdr_field field; + enum ice_flow_field fld; + u64 flag; + u64 mask; +}; + +static const struct virtchnl_fdir_inset_map fdir_inset_map[] = { + {VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0}, + {VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0}, + {VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0}, + {VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0}, + {VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0}, + {VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0}, + {VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0}, + {VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0}, + {VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0}, + {VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0}, + {VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0}, + {VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0}, + {VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0}, + {VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0}, + {VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0}, + {VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0}, + {VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0}, + {VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0}, + {VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0}, + {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI, + FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M}, + {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI, + FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M}, + {VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0}, + {VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0}, + {VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0}, +}; + +/** + * ice_vc_fdir_param_check + * @vf: pointer to the VF structure + * @vsi_id: VF relative VSI ID + * + * Check for the valid VSI ID, PF's state and VF's state + * + * Return: 0 on success, and -EINVAL on error. + */ +static int +ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id) +{ + struct ice_pf *pf = vf->pf; + + if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) + return -EINVAL; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + return -EINVAL; + + if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)) + return -EINVAL; + + if (vsi_id != vf->lan_vsi_num) + return -EINVAL; + + if (!ice_vc_isvalid_vsi_id(vf, vsi_id)) + return -EINVAL; + + if (!pf->vsi[vf->lan_vsi_idx]) + return -EINVAL; + + return 0; +} + +/** + * ice_vf_start_ctrl_vsi + * @vf: pointer to the VF structure + * + * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF + * + * Return: 0 on success, and other on error. + */ +static int ice_vf_start_ctrl_vsi(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct ice_vsi *ctrl_vsi; + struct device *dev; + int err; + + dev = ice_pf_to_dev(pf); + if (vf->ctrl_vsi_idx != ICE_NO_VSI) + return -EEXIST; + + ctrl_vsi = ice_vf_ctrl_vsi_setup(vf); + if (!ctrl_vsi) { + dev_dbg(dev, "Could not setup control VSI for VF %d\n", + vf->vf_id); + return -ENOMEM; + } + + err = ice_vsi_open_ctrl(ctrl_vsi); + if (err) { + dev_dbg(dev, "Could not open control VSI for VF %d\n", + vf->vf_id); + goto err_vsi_open; + } + + return 0; + +err_vsi_open: + ice_vsi_release(ctrl_vsi); + if (vf->ctrl_vsi_idx != ICE_NO_VSI) { + pf->vsi[vf->ctrl_vsi_idx] = NULL; + vf->ctrl_vsi_idx = ICE_NO_VSI; + } + return err; +} + +/** + * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type + * @vf: pointer to the VF structure + * @flow: filter flow type + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) +{ + struct ice_vf_fdir *fdir = &vf->fdir; + + if (!fdir->fdir_prof) { + fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf), + ICE_FLTR_PTYPE_MAX, + sizeof(*fdir->fdir_prof), + GFP_KERNEL); + if (!fdir->fdir_prof) + return -ENOMEM; + } + + if (!fdir->fdir_prof[flow]) { + fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf), + sizeof(**fdir->fdir_prof), + GFP_KERNEL); + if (!fdir->fdir_prof[flow]) + return -ENOMEM; + } + + return 0; +} + +/** + * ice_vc_fdir_free_prof - free profile for this filter flow type + * @vf: pointer to the VF structure + * @flow: filter flow type + */ +static void +ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) +{ + struct ice_vf_fdir *fdir = &vf->fdir; + + if (!fdir->fdir_prof) + return; + + if (!fdir->fdir_prof[flow]) + return; + + devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]); + fdir->fdir_prof[flow] = NULL; +} + +/** + * ice_vc_fdir_free_prof_all - free all the profile for this VF + * @vf: pointer to the VF structure + */ +static void ice_vc_fdir_free_prof_all(struct ice_vf *vf) +{ + struct ice_vf_fdir *fdir = &vf->fdir; + enum ice_fltr_ptype flow; + + if (!fdir->fdir_prof) + return; + + for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++) + ice_vc_fdir_free_prof(vf, flow); + + devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof); + fdir->fdir_prof = NULL; +} + +/** + * ice_vc_fdir_parse_flow_fld + * @proto_hdr: virtual channel protocol filter header + * @conf: FDIR configuration for each filter + * @fld: field type array + * @fld_cnt: field counter + * + * Parse the virtual channel filter header and store them into field type array + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr, + struct virtchnl_fdir_fltr_conf *conf, + enum ice_flow_field *fld, int *fld_cnt) +{ + struct virtchnl_proto_hdr hdr; + u32 i; + + memcpy(&hdr, proto_hdr, sizeof(hdr)); + + for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) && + VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++) + if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) { + if (fdir_inset_map[i].mask && + ((fdir_inset_map[i].mask & conf->inset_flag) != + fdir_inset_map[i].flag)) + continue; + + fld[*fld_cnt] = fdir_inset_map[i].fld; + *fld_cnt += 1; + if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX) + return -EINVAL; + VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr, + fdir_inset_map[i].field); + } + + return 0; +} + +/** + * ice_vc_fdir_set_flow_fld + * @vf: pointer to the VF structure + * @fltr: virtual channel add cmd buffer + * @conf: FDIR configuration for each filter + * @seg: array of one or more packet segments that describe the flow + * + * Parse the virtual channel add msg buffer's field vector and store them into + * flow's packet segment field + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, + struct virtchnl_fdir_fltr_conf *conf, + struct ice_flow_seg_info *seg) +{ + struct virtchnl_fdir_rule *rule = &fltr->rule_cfg; + enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX]; + struct device *dev = ice_pf_to_dev(vf->pf); + struct virtchnl_proto_hdrs *proto; + int fld_cnt = 0; + int i; + + proto = &rule->proto_hdrs; + for (i = 0; i < proto->count; i++) { + struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; + int ret; + + ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt); + if (ret) + return ret; + } + + if (fld_cnt == 0) { + dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id); + return -EINVAL; + } + + for (i = 0; i < fld_cnt; i++) + ice_flow_set_fld(seg, fld[i], + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + + return 0; +} + +/** + * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header + * @vf: pointer to the VF structure + * @conf: FDIR configuration for each filter + * @seg: array of one or more packet segments that describe the flow + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_set_flow_hdr(struct ice_vf *vf, + struct virtchnl_fdir_fltr_conf *conf, + struct ice_flow_seg_info *seg) +{ + enum ice_fltr_ptype flow = conf->input.flow_type; + enum ice_fdir_tunnel_type ttype = conf->ttype; + struct device *dev = ice_pf_to_dev(vf->pf); + + switch (flow) { + case ICE_FLTR_PTYPE_NON_IP_L2: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_ESP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_AH: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_TCP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_UDP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_ESP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_AH: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_TCP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_UDP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + default: + dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n", + flow, vf->vf_id); + return -EINVAL; + } + + return 0; +} + +/** + * ice_vc_fdir_rem_prof - remove profile for this filter flow type + * @vf: pointer to the VF structure + * @flow: filter flow type + * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter + */ +static void +ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun) +{ + struct ice_vf_fdir *fdir = &vf->fdir; + struct ice_fd_hw_prof *vf_prof; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vf_vsi; + struct device *dev; + struct ice_hw *hw; + u64 prof_id; + int i; + + dev = ice_pf_to_dev(pf); + hw = &pf->hw; + if (!fdir->fdir_prof || !fdir->fdir_prof[flow]) + return; + + vf_prof = fdir->fdir_prof[flow]; + + vf_vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vf_vsi) { + dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id); + return; + } + + if (!fdir->prof_entry_cnt[flow][tun]) + return; + + prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, + flow, tun ? ICE_FLTR_PTYPE_MAX : 0); + + for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++) + if (vf_prof->entry_h[i][tun]) { + u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]); + + ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id); + ice_flow_rem_entry(hw, ICE_BLK_FD, + vf_prof->entry_h[i][tun]); + vf_prof->entry_h[i][tun] = 0; + } + + ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); + devm_kfree(dev, vf_prof->fdir_seg[tun]); + vf_prof->fdir_seg[tun] = NULL; + + for (i = 0; i < vf_prof->cnt; i++) + vf_prof->vsi_h[i] = 0; + + fdir->prof_entry_cnt[flow][tun] = 0; +} + +/** + * ice_vc_fdir_rem_prof_all - remove profile for this VF + * @vf: pointer to the VF structure + */ +static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf) +{ + enum ice_fltr_ptype flow; + + for (flow = ICE_FLTR_PTYPE_NONF_NONE; + flow < ICE_FLTR_PTYPE_MAX; flow++) { + ice_vc_fdir_rem_prof(vf, flow, 0); + ice_vc_fdir_rem_prof(vf, flow, 1); + } +} + +/** + * ice_vc_fdir_write_flow_prof + * @vf: pointer to the VF structure + * @flow: filter flow type + * @seg: array of one or more packet segments that describe the flow + * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter + * + * Write the flow's profile config and packet segment into the hardware + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, + struct ice_flow_seg_info *seg, int tun) +{ + struct ice_vf_fdir *fdir = &vf->fdir; + struct ice_vsi *vf_vsi, *ctrl_vsi; + struct ice_flow_seg_info *old_seg; + struct ice_flow_prof *prof = NULL; + struct ice_fd_hw_prof *vf_prof; + enum ice_status status; + struct device *dev; + struct ice_pf *pf; + struct ice_hw *hw; + u64 entry1_h = 0; + u64 entry2_h = 0; + u64 prof_id; + int ret; + + pf = vf->pf; + dev = ice_pf_to_dev(pf); + hw = &pf->hw; + vf_vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vf_vsi) + return -EINVAL; + + ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; + if (!ctrl_vsi) + return -EINVAL; + + vf_prof = fdir->fdir_prof[flow]; + old_seg = vf_prof->fdir_seg[tun]; + if (old_seg) { + if (!memcmp(old_seg, seg, sizeof(*seg))) { + dev_dbg(dev, "Duplicated profile for VF %d!\n", + vf->vf_id); + return -EEXIST; + } + + if (fdir->fdir_fltr_cnt[flow][tun]) { + ret = -EINVAL; + dev_dbg(dev, "Input set conflicts for VF %d\n", + vf->vf_id); + goto err_exit; + } + + /* remove previously allocated profile */ + ice_vc_fdir_rem_prof(vf, flow, tun); + } + + prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow, + tun ? ICE_FLTR_PTYPE_MAX : 0); + + status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, + tun + 1, &prof); + ret = ice_status_to_errno(status); + if (ret) { + dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n", + flow, vf->vf_id); + goto err_exit; + } + + status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, + vf_vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, &entry1_h); + ret = ice_status_to_errno(status); + if (ret) { + dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n", + flow, vf->vf_id); + goto err_prof; + } + + status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, + ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, &entry2_h); + ret = ice_status_to_errno(status); + if (ret) { + dev_dbg(dev, + "Could not add flow 0x%x Ctrl VSI entry for VF %d\n", + flow, vf->vf_id); + goto err_entry_1; + } + + vf_prof->fdir_seg[tun] = seg; + vf_prof->cnt = 0; + fdir->prof_entry_cnt[flow][tun] = 0; + + vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h; + vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx; + vf_prof->cnt++; + fdir->prof_entry_cnt[flow][tun]++; + + vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h; + vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx; + vf_prof->cnt++; + fdir->prof_entry_cnt[flow][tun]++; + + return 0; + +err_entry_1: + ice_rem_prof_id_flow(hw, ICE_BLK_FD, + ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id); + ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h); +err_prof: + ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); +err_exit: + return ret; +} + +/** + * ice_vc_fdir_config_input_set + * @vf: pointer to the VF structure + * @fltr: virtual channel add cmd buffer + * @conf: FDIR configuration for each filter + * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter + * + * Config the input set type and value for virtual channel add msg buffer + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, + struct virtchnl_fdir_fltr_conf *conf, int tun) +{ + struct ice_fdir_fltr *input = &conf->input; + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_flow_seg_info *seg; + enum ice_fltr_ptype flow; + int ret; + + flow = input->flow_type; + ret = ice_vc_fdir_alloc_prof(vf, flow); + if (ret) { + dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id); + return ret; + } + + seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); + if (!seg) + return -ENOMEM; + + ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg); + if (ret) { + dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id); + goto err_exit; + } + + ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg); + if (ret) { + dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id); + goto err_exit; + } + + ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun); + if (ret == -EEXIST) { + devm_kfree(dev, seg); + } else if (ret) { + dev_dbg(dev, "Write flow profile for VF %d failed\n", + vf->vf_id); + goto err_exit; + } + + return 0; + +err_exit: + devm_kfree(dev, seg); + return ret; +} + +/** + * ice_vc_fdir_match_pattern + * @fltr: virtual channel add cmd buffer + * @type: virtual channel protocol filter header type + * + * Matching the header type by comparing fltr and type's value. + * + * Return: true on success, and false on error. + */ +static bool +ice_vc_fdir_match_pattern(struct virtchnl_fdir_add *fltr, + enum virtchnl_proto_hdr_type *type) +{ + struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; + int i = 0; + + while ((i < proto->count) && + (*type == proto->proto_hdr[i].type) && + (*type != VIRTCHNL_PROTO_HDR_NONE)) { + type++; + i++; + } + + return ((i == proto->count) && (*type == VIRTCHNL_PROTO_HDR_NONE)); +} + +/** + * ice_vc_fdir_get_pattern - get while list pattern + * @vf: pointer to the VF info + * @len: filter list length + * + * Return: pointer to allowed filter list + */ +static const struct virtchnl_fdir_pattern_match_item * +ice_vc_fdir_get_pattern(struct ice_vf *vf, int *len) +{ + const struct virtchnl_fdir_pattern_match_item *item; + struct ice_pf *pf = vf->pf; + struct ice_hw *hw; + + hw = &pf->hw; + if (!strncmp(hw->active_pkg_name, "ICE COMMS Package", + sizeof(hw->active_pkg_name))) { + item = vc_fdir_pattern_comms; + *len = ARRAY_SIZE(vc_fdir_pattern_comms); + } else { + item = vc_fdir_pattern_os; + *len = ARRAY_SIZE(vc_fdir_pattern_os); + } + + return item; +} + +/** + * ice_vc_fdir_search_pattern + * @vf: pointer to the VF info + * @fltr: virtual channel add cmd buffer + * + * Search for matched pattern from supported pattern list + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_search_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr) +{ + const struct virtchnl_fdir_pattern_match_item *pattern; + int len, i; + + pattern = ice_vc_fdir_get_pattern(vf, &len); + + for (i = 0; i < len; i++) + if (ice_vc_fdir_match_pattern(fltr, pattern[i].list)) + return 0; + + return -EINVAL; +} + +/** + * ice_vc_fdir_parse_pattern + * @vf: pointer to the VF info + * @fltr: virtual channel add cmd buffer + * @conf: FDIR configuration for each filter + * + * Parse the virtual channel filter's pattern and store them into conf + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, + struct virtchnl_fdir_fltr_conf *conf) +{ + struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; + enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE; + enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE; + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_fdir_fltr *input = &conf->input; + int i; + + if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) { + dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n", + proto->count, vf->vf_id); + return -EINVAL; + } + + for (i = 0; i < proto->count; i++) { + struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; + struct ip_esp_hdr *esph; + struct ip_auth_hdr *ah; + struct sctphdr *sctph; + struct ipv6hdr *ip6h; + struct udphdr *udph; + struct tcphdr *tcph; + struct ethhdr *eth; + struct iphdr *iph; + u8 s_field; + u8 *rawh; + + switch (hdr->type) { + case VIRTCHNL_PROTO_HDR_ETH: + eth = (struct ethhdr *)hdr->buffer; + input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2; + + if (hdr->field_selector) + input->ext_data.ether_type = eth->h_proto; + break; + case VIRTCHNL_PROTO_HDR_IPV4: + iph = (struct iphdr *)hdr->buffer; + l3 = VIRTCHNL_PROTO_HDR_IPV4; + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER; + + if (hdr->field_selector) { + input->ip.v4.src_ip = iph->saddr; + input->ip.v4.dst_ip = iph->daddr; + input->ip.v4.tos = iph->tos; + input->ip.v4.proto = iph->protocol; + } + break; + case VIRTCHNL_PROTO_HDR_IPV6: + ip6h = (struct ipv6hdr *)hdr->buffer; + l3 = VIRTCHNL_PROTO_HDR_IPV6; + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER; + + if (hdr->field_selector) { + memcpy(input->ip.v6.src_ip, + ip6h->saddr.in6_u.u6_addr8, + sizeof(ip6h->saddr)); + memcpy(input->ip.v6.dst_ip, + ip6h->daddr.in6_u.u6_addr8, + sizeof(ip6h->daddr)); + input->ip.v6.tc = ((u8)(ip6h->priority) << 4) | + (ip6h->flow_lbl[0] >> 4); + input->ip.v6.proto = ip6h->nexthdr; + } + break; + case VIRTCHNL_PROTO_HDR_TCP: + tcph = (struct tcphdr *)hdr->buffer; + if (l3 == VIRTCHNL_PROTO_HDR_IPV4) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP; + + if (hdr->field_selector) { + if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { + input->ip.v4.src_port = tcph->source; + input->ip.v4.dst_port = tcph->dest; + } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { + input->ip.v6.src_port = tcph->source; + input->ip.v6.dst_port = tcph->dest; + } + } + break; + case VIRTCHNL_PROTO_HDR_UDP: + udph = (struct udphdr *)hdr->buffer; + if (l3 == VIRTCHNL_PROTO_HDR_IPV4) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP; + + if (hdr->field_selector) { + if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { + input->ip.v4.src_port = udph->source; + input->ip.v4.dst_port = udph->dest; + } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { + input->ip.v6.src_port = udph->source; + input->ip.v6.dst_port = udph->dest; + } + } + break; + case VIRTCHNL_PROTO_HDR_SCTP: + sctph = (struct sctphdr *)hdr->buffer; + if (l3 == VIRTCHNL_PROTO_HDR_IPV4) + input->flow_type = + ICE_FLTR_PTYPE_NONF_IPV4_SCTP; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) + input->flow_type = + ICE_FLTR_PTYPE_NONF_IPV6_SCTP; + + if (hdr->field_selector) { + if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { + input->ip.v4.src_port = sctph->source; + input->ip.v4.dst_port = sctph->dest; + } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { + input->ip.v6.src_port = sctph->source; + input->ip.v6.dst_port = sctph->dest; + } + } + break; + case VIRTCHNL_PROTO_HDR_L2TPV3: + if (l3 == VIRTCHNL_PROTO_HDR_IPV4) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3; + + if (hdr->field_selector) + input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer); + break; + case VIRTCHNL_PROTO_HDR_ESP: + esph = (struct ip_esp_hdr *)hdr->buffer; + if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && + l4 == VIRTCHNL_PROTO_HDR_UDP) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && + l4 == VIRTCHNL_PROTO_HDR_UDP) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && + l4 == VIRTCHNL_PROTO_HDR_NONE) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && + l4 == VIRTCHNL_PROTO_HDR_NONE) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP; + + if (l4 == VIRTCHNL_PROTO_HDR_UDP) + conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP; + else + conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC; + + if (hdr->field_selector) { + if (l3 == VIRTCHNL_PROTO_HDR_IPV4) + input->ip.v4.sec_parm_idx = esph->spi; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) + input->ip.v6.sec_parm_idx = esph->spi; + } + break; + case VIRTCHNL_PROTO_HDR_AH: + ah = (struct ip_auth_hdr *)hdr->buffer; + if (l3 == VIRTCHNL_PROTO_HDR_IPV4) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH; + + if (hdr->field_selector) { + if (l3 == VIRTCHNL_PROTO_HDR_IPV4) + input->ip.v4.sec_parm_idx = ah->spi; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) + input->ip.v6.sec_parm_idx = ah->spi; + } + break; + case VIRTCHNL_PROTO_HDR_PFCP: + rawh = (u8 *)hdr->buffer; + s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK; + if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION; + + if (hdr->field_selector) { + if (l3 == VIRTCHNL_PROTO_HDR_IPV4) + input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR); + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) + input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR); + } + break; + case VIRTCHNL_PROTO_HDR_GTPU_IP: + rawh = (u8 *)hdr->buffer; + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER; + + if (hdr->field_selector) + input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]); + conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU; + break; + case VIRTCHNL_PROTO_HDR_GTPU_EH: + rawh = (u8 *)hdr->buffer; + + if (hdr->field_selector) + input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK; + conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH; + break; + default: + dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n", + hdr->type, vf->vf_id); + return -EINVAL; + } + } + + return 0; +} + +/** + * ice_vc_fdir_parse_action + * @vf: pointer to the VF info + * @fltr: virtual channel add cmd buffer + * @conf: FDIR configuration for each filter + * + * Parse the virtual channel filter's action and store them into conf + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, + struct virtchnl_fdir_fltr_conf *conf) +{ + struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set; + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_fdir_fltr *input = &conf->input; + u32 dest_num = 0; + u32 mark_num = 0; + int i; + + if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) { + dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n", + as->count, vf->vf_id); + return -EINVAL; + } + + for (i = 0; i < as->count; i++) { + struct virtchnl_filter_action *action = &as->actions[i]; + + switch (action->type) { + case VIRTCHNL_ACTION_PASSTHRU: + dest_num++; + input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER; + break; + case VIRTCHNL_ACTION_DROP: + dest_num++; + input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT; + break; + case VIRTCHNL_ACTION_QUEUE: + dest_num++; + input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; + input->q_index = action->act_conf.queue.index; + break; + case VIRTCHNL_ACTION_Q_REGION: + dest_num++; + input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP; + input->q_index = action->act_conf.queue.index; + input->q_region = action->act_conf.queue.region; + break; + case VIRTCHNL_ACTION_MARK: + mark_num++; + input->fltr_id = action->act_conf.mark_id; + input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE; + break; + default: + dev_dbg(dev, "Invalid action type:0x%x for VF %d\n", + action->type, vf->vf_id); + return -EINVAL; + } + } + + if (dest_num == 0 || dest_num >= 2) { + dev_dbg(dev, "Invalid destination action for VF %d\n", + vf->vf_id); + return -EINVAL; + } + + if (mark_num >= 2) { + dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id); + return -EINVAL; + } + + return 0; +} + +/** + * ice_vc_validate_fdir_fltr - validate the virtual channel filter + * @vf: pointer to the VF info + * @fltr: virtual channel add cmd buffer + * @conf: FDIR configuration for each filter + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, + struct virtchnl_fdir_fltr_conf *conf) +{ + int ret; + + ret = ice_vc_fdir_search_pattern(vf, fltr); + if (ret) + return ret; + + ret = ice_vc_fdir_parse_pattern(vf, fltr, conf); + if (ret) + return ret; + + return ice_vc_fdir_parse_action(vf, fltr, conf); +} + +/** + * ice_vc_fdir_comp_rules - compare if two filter rules have the same value + * @conf_a: FDIR configuration for filter a + * @conf_b: FDIR configuration for filter b + * + * Return: 0 on success, and other on error. + */ +static bool +ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a, + struct virtchnl_fdir_fltr_conf *conf_b) +{ + struct ice_fdir_fltr *a = &conf_a->input; + struct ice_fdir_fltr *b = &conf_b->input; + + if (conf_a->ttype != conf_b->ttype) + return false; + if (a->flow_type != b->flow_type) + return false; + if (memcmp(&a->ip, &b->ip, sizeof(a->ip))) + return false; + if (memcmp(&a->mask, &b->mask, sizeof(a->mask))) + return false; + if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data))) + return false; + if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask))) + return false; + if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data))) + return false; + if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask))) + return false; + if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data))) + return false; + if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask))) + return false; + + return true; +} + +/** + * ice_vc_fdir_is_dup_fltr + * @vf: pointer to the VF info + * @conf: FDIR configuration for each filter + * + * Check if there is duplicated rule with same conf value + * + * Return: 0 true success, and false on error. + */ +static bool +ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf) +{ + struct ice_fdir_fltr *desc; + bool ret; + + list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) { + struct virtchnl_fdir_fltr_conf *node = + to_fltr_conf_from_desc(desc); + + ret = ice_vc_fdir_comp_rules(node, conf); + if (ret) + return true; + } + + return false; +} + +/** + * ice_vc_fdir_insert_entry + * @vf: pointer to the VF info + * @conf: FDIR configuration for each filter + * @id: pointer to ID value allocated by driver + * + * Insert FDIR conf entry into list and allocate ID for this filter + * + * Return: 0 true success, and other on error. + */ +static int +ice_vc_fdir_insert_entry(struct ice_vf *vf, + struct virtchnl_fdir_fltr_conf *conf, u32 *id) +{ + struct ice_fdir_fltr *input = &conf->input; + int i; + + /* alloc ID corresponding with conf */ + i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0, + ICE_FDIR_MAX_FLTRS, GFP_KERNEL); + if (i < 0) + return -EINVAL; + *id = i; + + list_add(&input->fltr_node, &vf->fdir.fdir_rule_list); + return 0; +} + +/** + * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value + * @vf: pointer to the VF info + * @conf: FDIR configuration for each filter + * @id: filter rule's ID + */ +static void +ice_vc_fdir_remove_entry(struct ice_vf *vf, + struct virtchnl_fdir_fltr_conf *conf, u32 id) +{ + struct ice_fdir_fltr *input = &conf->input; + + idr_remove(&vf->fdir.fdir_rule_idr, id); + list_del(&input->fltr_node); +} + +/** + * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value + * @vf: pointer to the VF info + * @id: filter rule's ID + * + * Return: NULL on error, and other on success. + */ +static struct virtchnl_fdir_fltr_conf * +ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id) +{ + return idr_find(&vf->fdir.fdir_rule_idr, id); +} + +/** + * ice_vc_fdir_flush_entry - remove all FDIR conf entry + * @vf: pointer to the VF info + */ +static void ice_vc_fdir_flush_entry(struct ice_vf *vf) +{ + struct virtchnl_fdir_fltr_conf *conf; + struct ice_fdir_fltr *desc, *temp; + + list_for_each_entry_safe(desc, temp, + &vf->fdir.fdir_rule_list, fltr_node) { + conf = to_fltr_conf_from_desc(desc); + list_del(&desc->fltr_node); + devm_kfree(ice_pf_to_dev(vf->pf), conf); + } +} + +/** + * ice_vc_fdir_write_fltr - write filter rule into hardware + * @vf: pointer to the VF info + * @conf: FDIR configuration for each filter + * @add: true implies add rule, false implies del rules + * @is_tun: false implies non-tunnel type filter, true implies tunnel filter + * + * Return: 0 on success, and other on error. + */ +static int ice_vc_fdir_write_fltr(struct ice_vf *vf, + struct virtchnl_fdir_fltr_conf *conf, + bool add, bool is_tun) +{ + struct ice_fdir_fltr *input = &conf->input; + struct ice_vsi *vsi, *ctrl_vsi; + struct ice_fltr_desc desc; + enum ice_status status; + struct device *dev; + struct ice_pf *pf; + struct ice_hw *hw; + int ret; + u8 *pkt; + + pf = vf->pf; + dev = ice_pf_to_dev(pf); + hw = &pf->hw; + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { + dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id); + return -EINVAL; + } + + input->dest_vsi = vsi->idx; + input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW; + + ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; + if (!ctrl_vsi) { + dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id); + return -EINVAL; + } + + pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL); + if (!pkt) + return -ENOMEM; + + ice_fdir_get_prgm_desc(hw, input, &desc, add); + status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); + ret = ice_status_to_errno(status); + if (ret) { + dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n", + vf->vf_id, input->flow_type); + goto err_free_pkt; + } + + ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); + if (ret) + goto err_free_pkt; + + return 0; + +err_free_pkt: + devm_kfree(dev, pkt); + return ret; +} + +/** + * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler + * @t: pointer to timer_list + */ +static void ice_vf_fdir_timer(struct timer_list *t) +{ + struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr); + struct ice_vf_fdir_ctx *ctx_done; + struct ice_vf_fdir *fdir; + unsigned long flags; + struct ice_vf *vf; + struct ice_pf *pf; + + fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq); + vf = container_of(fdir, struct ice_vf, fdir); + ctx_done = &fdir->ctx_done; + pf = vf->pf; + spin_lock_irqsave(&fdir->ctx_lock, flags); + if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) { + spin_unlock_irqrestore(&fdir->ctx_lock, flags); + WARN_ON_ONCE(1); + return; + } + + ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID; + + ctx_done->flags |= ICE_VF_FDIR_CTX_VALID; + ctx_done->conf = ctx_irq->conf; + ctx_done->stat = ICE_FDIR_CTX_TIMEOUT; + ctx_done->v_opcode = ctx_irq->v_opcode; + spin_unlock_irqrestore(&fdir->ctx_lock, flags); + + set_bit(ICE_FD_VF_FLUSH_CTX, pf->state); + ice_service_task_schedule(pf); +} + +/** + * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler + * @ctrl_vsi: pointer to a VF's CTRL VSI + * @rx_desc: pointer to FDIR Rx queue descriptor + */ +void +ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, + union ice_32b_rx_flex_desc *rx_desc) +{ + struct ice_pf *pf = ctrl_vsi->back; + struct ice_vf_fdir_ctx *ctx_done; + struct ice_vf_fdir_ctx *ctx_irq; + struct ice_vf_fdir *fdir; + unsigned long flags; + struct device *dev; + struct ice_vf *vf; + int ret; + + vf = &pf->vf[ctrl_vsi->vf_id]; + + fdir = &vf->fdir; + ctx_done = &fdir->ctx_done; + ctx_irq = &fdir->ctx_irq; + dev = ice_pf_to_dev(pf); + spin_lock_irqsave(&fdir->ctx_lock, flags); + if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) { + spin_unlock_irqrestore(&fdir->ctx_lock, flags); + WARN_ON_ONCE(1); + return; + } + + ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID; + + ctx_done->flags |= ICE_VF_FDIR_CTX_VALID; + ctx_done->conf = ctx_irq->conf; + ctx_done->stat = ICE_FDIR_CTX_IRQ; + ctx_done->v_opcode = ctx_irq->v_opcode; + memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc)); + spin_unlock_irqrestore(&fdir->ctx_lock, flags); + + ret = del_timer(&ctx_irq->rx_tmr); + if (!ret) + dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id); + + set_bit(ICE_FD_VF_FLUSH_CTX, pf->state); + ice_service_task_schedule(pf); +} + +/** + * ice_vf_fdir_dump_info - dump FDIR information for diagnosis + * @vf: pointer to the VF info + */ +static void ice_vf_fdir_dump_info(struct ice_vf *vf) +{ + struct ice_vsi *vf_vsi; + u32 fd_size, fd_cnt; + struct device *dev; + struct ice_pf *pf; + struct ice_hw *hw; + u16 vsi_num; + + pf = vf->pf; + hw = &pf->hw; + dev = ice_pf_to_dev(pf); + vf_vsi = pf->vsi[vf->lan_vsi_idx]; + vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx); + + fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num)); + fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num)); + dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x", + vf->vf_id, + (fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S, + (fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S, + (fd_cnt & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S, + (fd_cnt & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S); +} + +/** + * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor + * @vf: pointer to the VF info + * @ctx: FDIR context info for post processing + * @status: virtchnl FDIR program status + * + * Return: 0 on success, and other on error. + */ +static int +ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, + enum virtchnl_fdir_prgm_status *status) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + u32 stat_err, error, prog_id; + int ret; + + stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0); + if (((stat_err & ICE_FXD_FLTR_WB_QW1_DD_M) >> + ICE_FXD_FLTR_WB_QW1_DD_S) != ICE_FXD_FLTR_WB_QW1_DD_YES) { + *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id); + ret = -EINVAL; + goto err_exit; + } + + prog_id = (stat_err & ICE_FXD_FLTR_WB_QW1_PROG_ID_M) >> + ICE_FXD_FLTR_WB_QW1_PROG_ID_S; + if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD && + ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) { + dev_err(dev, "VF %d: Desc show add, but ctx not", + vf->vf_id); + *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; + ret = -EINVAL; + goto err_exit; + } + + if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL && + ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) { + dev_err(dev, "VF %d: Desc show del, but ctx not", + vf->vf_id); + *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; + ret = -EINVAL; + goto err_exit; + } + + error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_M) >> + ICE_FXD_FLTR_WB_QW1_FAIL_S; + if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) { + if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) { + dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table", + vf->vf_id); + *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + } else { + dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry", + vf->vf_id); + *status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST; + } + ret = -EINVAL; + goto err_exit; + } + + error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M) >> + ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S; + if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) { + dev_err(dev, "VF %d: Profile matching error", vf->vf_id); + *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + ret = -EINVAL; + goto err_exit; + } + + *status = VIRTCHNL_FDIR_SUCCESS; + + return 0; + +err_exit: + ice_vf_fdir_dump_info(vf); + return ret; +} + +/** + * ice_vc_add_fdir_fltr_post + * @vf: pointer to the VF structure + * @ctx: FDIR context info for post processing + * @status: virtchnl FDIR program status + * @success: true implies success, false implies failure + * + * Post process for flow director add command. If success, then do post process + * and send back success msg by virtchnl. Otherwise, do context reversion and + * send back failure msg by virtchnl. + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, + enum virtchnl_fdir_prgm_status status, + bool success) +{ + struct virtchnl_fdir_fltr_conf *conf = ctx->conf; + struct device *dev = ice_pf_to_dev(vf->pf); + enum virtchnl_status_code v_ret; + struct virtchnl_fdir_add *resp; + int ret, len, is_tun; + + v_ret = VIRTCHNL_STATUS_SUCCESS; + len = sizeof(*resp); + resp = kzalloc(len, GFP_KERNEL); + if (!resp) { + len = 0; + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); + goto err_exit; + } + + if (!success) + goto err_exit; + + is_tun = 0; + resp->status = status; + resp->flow_id = conf->flow_id; + vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++; + + ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, + (u8 *)resp, len); + kfree(resp); + + dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n", + vf->vf_id, conf->flow_id, + (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ? + "add" : "del"); + return ret; + +err_exit: + if (resp) + resp->status = status; + ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); + devm_kfree(dev, conf); + + ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, + (u8 *)resp, len); + kfree(resp); + return ret; +} + +/** + * ice_vc_del_fdir_fltr_post + * @vf: pointer to the VF structure + * @ctx: FDIR context info for post processing + * @status: virtchnl FDIR program status + * @success: true implies success, false implies failure + * + * Post process for flow director del command. If success, then do post process + * and send back success msg by virtchnl. Otherwise, do context reversion and + * send back failure msg by virtchnl. + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, + enum virtchnl_fdir_prgm_status status, + bool success) +{ + struct virtchnl_fdir_fltr_conf *conf = ctx->conf; + struct device *dev = ice_pf_to_dev(vf->pf); + enum virtchnl_status_code v_ret; + struct virtchnl_fdir_del *resp; + int ret, len, is_tun; + + v_ret = VIRTCHNL_STATUS_SUCCESS; + len = sizeof(*resp); + resp = kzalloc(len, GFP_KERNEL); + if (!resp) { + len = 0; + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); + goto err_exit; + } + + if (!success) + goto err_exit; + + is_tun = 0; + resp->status = status; + ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); + vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--; + + ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, + (u8 *)resp, len); + kfree(resp); + + dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n", + vf->vf_id, conf->flow_id, + (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ? + "add" : "del"); + devm_kfree(dev, conf); + return ret; + +err_exit: + if (resp) + resp->status = status; + if (success) + devm_kfree(dev, conf); + + ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, + (u8 *)resp, len); + kfree(resp); + return ret; +} + +/** + * ice_flush_fdir_ctx + * @pf: pointer to the PF structure + * + * Flush all the pending event on ctx_done list and process them. + */ +void ice_flush_fdir_ctx(struct ice_pf *pf) +{ + int i; + + if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state)) + return; + + ice_for_each_vf(pf, i) { + struct device *dev = ice_pf_to_dev(pf); + enum virtchnl_fdir_prgm_status status; + struct ice_vf *vf = &pf->vf[i]; + struct ice_vf_fdir_ctx *ctx; + unsigned long flags; + int ret; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + continue; + + if (vf->ctrl_vsi_idx == ICE_NO_VSI) + continue; + + ctx = &vf->fdir.ctx_done; + spin_lock_irqsave(&vf->fdir.ctx_lock, flags); + if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) { + spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); + continue; + } + spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); + + WARN_ON(ctx->stat == ICE_FDIR_CTX_READY); + if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) { + status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT; + dev_err(dev, "VF %d: ctrl_vsi irq timeout\n", + vf->vf_id); + goto err_exit; + } + + ret = ice_vf_verify_rx_desc(vf, ctx, &status); + if (ret) + goto err_exit; + + if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) + ice_vc_add_fdir_fltr_post(vf, ctx, status, true); + else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER) + ice_vc_del_fdir_fltr_post(vf, ctx, status, true); + else + dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id); + + spin_lock_irqsave(&vf->fdir.ctx_lock, flags); + ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; + spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); + continue; +err_exit: + if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) + ice_vc_add_fdir_fltr_post(vf, ctx, status, false); + else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER) + ice_vc_del_fdir_fltr_post(vf, ctx, status, false); + else + dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id); + + spin_lock_irqsave(&vf->fdir.ctx_lock, flags); + ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; + spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); + } +} + +/** + * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler + * @vf: pointer to the VF structure + * @conf: FDIR configuration for each filter + * @v_opcode: virtual channel operation code + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf, + enum virtchnl_ops v_opcode) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vf_fdir_ctx *ctx; + unsigned long flags; + + ctx = &vf->fdir.ctx_irq; + spin_lock_irqsave(&vf->fdir.ctx_lock, flags); + if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) || + (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) { + spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); + dev_dbg(dev, "VF %d: Last request is still in progress\n", + vf->vf_id); + return -EBUSY; + } + ctx->flags |= ICE_VF_FDIR_CTX_VALID; + spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); + + ctx->conf = conf; + ctx->v_opcode = v_opcode; + ctx->stat = ICE_FDIR_CTX_READY; + timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0); + + mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies)); + + return 0; +} + +/** + * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler + * @vf: pointer to the VF structure + * + * Return: 0 on success, and other on error. + */ +static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf) +{ + struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq; + unsigned long flags; + + del_timer(&ctx->rx_tmr); + spin_lock_irqsave(&vf->fdir.ctx_lock, flags); + ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; + spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); +} + +/** + * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * Return: 0 on success, and other on error. + */ +int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg; + struct virtchnl_fdir_add *stat = NULL; + struct virtchnl_fdir_fltr_conf *conf; + enum virtchnl_status_code v_ret; + struct device *dev; + struct ice_pf *pf; + int is_tun = 0; + int len = 0; + int ret; + + pf = vf->pf; + dev = ice_pf_to_dev(pf); + ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); + if (ret) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); + goto err_exit; + } + + ret = ice_vf_start_ctrl_vsi(vf); + if (ret && (ret != -EEXIST)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n", + vf->vf_id, ret); + goto err_exit; + } + + stat = kzalloc(sizeof(*stat), GFP_KERNEL); + if (!stat) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); + goto err_exit; + } + + conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL); + if (!conf) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id); + goto err_exit; + } + + len = sizeof(*stat); + ret = ice_vc_validate_fdir_fltr(vf, fltr, conf); + if (ret) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; + dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id); + goto err_free_conf; + } + + if (fltr->validate_only) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_SUCCESS; + devm_kfree(dev, conf); + ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, + v_ret, (u8 *)stat, len); + goto exit; + } + + ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun); + if (ret) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT; + dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n", + vf->vf_id, ret); + goto err_free_conf; + } + + ret = ice_vc_fdir_is_dup_fltr(vf, conf); + if (ret) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST; + dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n", + vf->vf_id); + goto err_free_conf; + } + + ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id); + if (ret) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id); + goto err_free_conf; + } + + ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER); + if (ret) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id); + goto err_free_conf; + } + + ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun); + if (ret) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", + vf->vf_id, ret); + goto err_rem_entry; + } + +exit: + kfree(stat); + return ret; + +err_rem_entry: + ice_vc_fdir_clear_irq_ctx(vf); + ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); +err_free_conf: + devm_kfree(dev, conf); +err_exit: + ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret, + (u8 *)stat, len); + kfree(stat); + return ret; +} + +/** + * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * Return: 0 on success, and other on error. + */ +int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg; + struct virtchnl_fdir_del *stat = NULL; + struct virtchnl_fdir_fltr_conf *conf; + enum virtchnl_status_code v_ret; + struct device *dev; + struct ice_pf *pf; + int is_tun = 0; + int len = 0; + int ret; + + pf = vf->pf; + dev = ice_pf_to_dev(pf); + ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); + if (ret) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); + goto err_exit; + } + + stat = kzalloc(sizeof(*stat), GFP_KERNEL); + if (!stat) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); + goto err_exit; + } + + len = sizeof(*stat); + + conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id); + if (!conf) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST; + dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n", + vf->vf_id, fltr->flow_id); + goto err_exit; + } + + /* Just return failure when ctrl_vsi idx is invalid */ + if (vf->ctrl_vsi_idx == ICE_NO_VSI) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id); + goto err_exit; + } + + ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER); + if (ret) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id); + goto err_exit; + } + + ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun); + if (ret) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", + vf->vf_id, ret); + goto err_del_tmr; + } + + kfree(stat); + + return ret; + +err_del_tmr: + ice_vc_fdir_clear_irq_ctx(vf); +err_exit: + ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret, + (u8 *)stat, len); + kfree(stat); + return ret; +} + +/** + * ice_vf_fdir_init - init FDIR resource for VF + * @vf: pointer to the VF info + */ +void ice_vf_fdir_init(struct ice_vf *vf) +{ + struct ice_vf_fdir *fdir = &vf->fdir; + + idr_init(&fdir->fdir_rule_idr); + INIT_LIST_HEAD(&fdir->fdir_rule_list); + + spin_lock_init(&fdir->ctx_lock); + fdir->ctx_irq.flags = 0; + fdir->ctx_done.flags = 0; +} + +/** + * ice_vf_fdir_exit - destroy FDIR resource for VF + * @vf: pointer to the VF info + */ +void ice_vf_fdir_exit(struct ice_vf *vf) +{ + ice_vc_fdir_flush_entry(vf); + idr_destroy(&vf->fdir.fdir_rule_idr); + ice_vc_fdir_rem_prof_all(vf); + ice_vc_fdir_free_prof_all(vf); +} diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h new file mode 100644 index 000000000000..f4e629f4c09b --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021, Intel Corporation. */ + +#ifndef _ICE_VIRTCHNL_FDIR_H_ +#define _ICE_VIRTCHNL_FDIR_H_ + +struct ice_vf; +struct ice_pf; + +enum ice_fdir_ctx_stat { + ICE_FDIR_CTX_READY, + ICE_FDIR_CTX_IRQ, + ICE_FDIR_CTX_TIMEOUT, +}; + +struct ice_vf_fdir_ctx { + struct timer_list rx_tmr; + enum virtchnl_ops v_opcode; + enum ice_fdir_ctx_stat stat; + union ice_32b_rx_flex_desc rx_desc; +#define ICE_VF_FDIR_CTX_VALID BIT(0) + u32 flags; + + void *conf; +}; + +/* VF FDIR information structure */ +struct ice_vf_fdir { + u16 fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; + int prof_entry_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; + struct ice_fd_hw_prof **fdir_prof; + + struct idr fdir_rule_idr; + struct list_head fdir_rule_list; + + spinlock_t ctx_lock; /* protects FDIR context info */ + struct ice_vf_fdir_ctx ctx_irq; + struct ice_vf_fdir_ctx ctx_done; +}; + +#ifdef CONFIG_PCI_IOV +int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg); +int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg); +void ice_vf_fdir_init(struct ice_vf *vf); +void ice_vf_fdir_exit(struct ice_vf *vf); +void +ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, + union ice_32b_rx_flex_desc *rx_desc); +void ice_flush_fdir_ctx(struct ice_pf *pf); +#else +static inline void +ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, union ice_32b_rx_flex_desc *rx_desc) { } +static inline void ice_flush_fdir_ctx(struct ice_pf *pf) { } +#endif /* CONFIG_PCI_IOV */ +#endif /* _ICE_VIRTCHNL_FDIR_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 1f38a8d0c525..a1d22d2aa0bd 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -5,6 +5,256 @@ #include "ice_base.h" #include "ice_lib.h" #include "ice_fltr.h" +#include "ice_flow.h" +#include "ice_virtchnl_allowlist.h" + +#define FIELD_SELECTOR(proto_hdr_field) \ + BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK) + +struct ice_vc_hdr_match_type { + u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */ + u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */ +}; + +static const struct ice_vc_hdr_match_type ice_vc_hdr_list_os[] = { + {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE}, + {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER}, + {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER}, + {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP}, + {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP}, + {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP}, +}; + +static const struct ice_vc_hdr_match_type ice_vc_hdr_list_comms[] = { + {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE}, + {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH}, + {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN}, + {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN}, + {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER}, + {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER}, + {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP}, + {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP}, + {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP}, + {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE}, + {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP}, + {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH}, + {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN, + ICE_FLOW_SEG_HDR_GTPU_DWN}, + {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP, + ICE_FLOW_SEG_HDR_GTPU_UP}, + {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3}, + {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP}, + {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH}, + {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION}, +}; + +struct ice_vc_hash_field_match_type { + u32 vc_hdr; /* virtchnl headers + * (VIRTCHNL_PROTO_HDR_XXX) + */ + u32 vc_hash_field; /* virtchnl hash fields selector + * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX)) + */ + u64 ice_hash_field; /* ice hash fields + * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX)) + */ +}; + +static const struct +ice_vc_hash_field_match_type ice_vc_hash_field_list_os[] = { + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), + ICE_FLOW_HASH_IPV4}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)}, + {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)}, + {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST), + ICE_FLOW_HASH_IPV6}, + {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), + ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, + {VIRTCHNL_PROTO_HDR_TCP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT), + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)}, + {VIRTCHNL_PROTO_HDR_TCP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT), + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)}, + {VIRTCHNL_PROTO_HDR_TCP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT), + ICE_FLOW_HASH_TCP_PORT}, + {VIRTCHNL_PROTO_HDR_UDP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT), + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)}, + {VIRTCHNL_PROTO_HDR_UDP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT), + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)}, + {VIRTCHNL_PROTO_HDR_UDP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT), + ICE_FLOW_HASH_UDP_PORT}, + {VIRTCHNL_PROTO_HDR_SCTP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT), + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)}, + {VIRTCHNL_PROTO_HDR_SCTP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT), + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)}, + {VIRTCHNL_PROTO_HDR_SCTP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT), + ICE_FLOW_HASH_SCTP_PORT}, +}; + +static const struct +ice_vc_hash_field_match_type ice_vc_hash_field_list_comms[] = { + {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC), + BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)}, + {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST), + BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)}, + {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST), + ICE_FLOW_HASH_ETH}, + {VIRTCHNL_PROTO_HDR_ETH, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE), + BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)}, + {VIRTCHNL_PROTO_HDR_S_VLAN, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID), + BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)}, + {VIRTCHNL_PROTO_HDR_C_VLAN, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID), + BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), + ICE_FLOW_HASH_IPV4}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)}, + {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)}, + {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST), + ICE_FLOW_HASH_IPV6}, + {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), + ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, + {VIRTCHNL_PROTO_HDR_TCP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT), + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)}, + {VIRTCHNL_PROTO_HDR_TCP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT), + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)}, + {VIRTCHNL_PROTO_HDR_TCP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT), + ICE_FLOW_HASH_TCP_PORT}, + {VIRTCHNL_PROTO_HDR_UDP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT), + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)}, + {VIRTCHNL_PROTO_HDR_UDP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT), + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)}, + {VIRTCHNL_PROTO_HDR_UDP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT), + ICE_FLOW_HASH_UDP_PORT}, + {VIRTCHNL_PROTO_HDR_SCTP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT), + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)}, + {VIRTCHNL_PROTO_HDR_SCTP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT), + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)}, + {VIRTCHNL_PROTO_HDR_SCTP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT), + ICE_FLOW_HASH_SCTP_PORT}, + {VIRTCHNL_PROTO_HDR_PPPOE, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID), + BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)}, + {VIRTCHNL_PROTO_HDR_GTPU_IP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID), + BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)}, + {VIRTCHNL_PROTO_HDR_L2TPV3, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID), + BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)}, + {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI), + BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)}, + {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI), + BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)}, + {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID), + BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)}, +}; + +/** + * ice_get_vf_vsi - get VF's VSI based on the stored index + * @vf: VF used to get VSI + */ +static struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) +{ + return vf->pf->vsi[vf->lan_vsi_idx]; +} /** * ice_validate_vf_id - helper to check if VF ID is valid @@ -197,11 +447,30 @@ static void ice_vf_invalidate_vsi(struct ice_vf *vf) */ static void ice_vf_vsi_release(struct ice_vf *vf) { - ice_vsi_release(vf->pf->vsi[vf->lan_vsi_idx]); + ice_vsi_release(ice_get_vf_vsi(vf)); ice_vf_invalidate_vsi(vf); } /** + * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access + * @vf: VF that control VSI is being invalidated on + */ +static void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf) +{ + vf->ctrl_vsi_idx = ICE_NO_VSI; +} + +/** + * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it + * @vf: VF that control VSI is being released on + */ +static void ice_vf_ctrl_vsi_release(struct ice_vf *vf) +{ + ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]); + ice_vf_ctrl_invalidate_vsi(vf); +} + +/** * ice_free_vf_res - Free a VF's resources * @vf: pointer to the VF info */ @@ -214,6 +483,10 @@ static void ice_free_vf_res(struct ice_vf *vf) * accessing the VF's VSI after it's freed or invalidated. */ clear_bit(ICE_VF_STATE_INIT, vf->vf_states); + ice_vf_fdir_exit(vf); + /* free VF control VSI */ + if (vf->ctrl_vsi_idx != ICE_NO_VSI) + ice_vf_ctrl_vsi_release(vf); /* free VSI and disconnect it from the parent uplink */ if (vf->lan_vsi_idx != ICE_NO_VSI) { @@ -250,7 +523,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) struct ice_hw *hw; hw = &pf->hw; - vsi = pf->vsi[vf->lan_vsi_idx]; + vsi = ice_get_vf_vsi(vf); dev = ice_pf_to_dev(pf); wr32(hw, VPINT_ALLOC(vf->vf_id), 0); @@ -325,10 +598,7 @@ void ice_set_vf_state_qs_dis(struct ice_vf *vf) */ static void ice_dis_vf_qs(struct ice_vf *vf) { - struct ice_pf *pf = vf->pf; - struct ice_vsi *vsi; - - vsi = pf->vsi[vf->lan_vsi_idx]; + struct ice_vsi *vsi = ice_get_vf_vsi(vf); ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); ice_vsi_stop_all_rx_rings(vsi); @@ -348,7 +618,7 @@ void ice_free_vfs(struct ice_pf *pf) if (!pf->vf) return; - while (test_and_set_bit(__ICE_VF_DIS, pf->state)) + while (test_and_set_bit(ICE_VF_DIS, pf->state)) usleep_range(1000, 2000); /* Disable IOV before freeing resources. This lets any VF drivers @@ -401,7 +671,15 @@ void ice_free_vfs(struct ice_pf *pf) wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); } } - clear_bit(__ICE_VF_DIS, pf->state); + + /* clear malicious info if the VFs are getting released */ + for (i = 0; i < tmp; i++) + if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, + ICE_MAX_VF_COUNT, i)) + dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", + i); + + clear_bit(ICE_VF_DIS, pf->state); clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); } @@ -560,6 +838,28 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) } /** + * ice_vf_ctrl_vsi_setup - Set up a VF control VSI + * @vf: VF to setup control VSI for + * + * Returns pointer to the successfully allocated VSI struct on success, + * otherwise returns NULL on failure. + */ +struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf) +{ + struct ice_port_info *pi = ice_vf_get_port_info(vf); + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id); + if (!vsi) { + dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n"); + ice_vf_ctrl_invalidate_vsi(vf); + } + + return vsi; +} + +/** * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space * @pf: pointer to PF structure * @vf: pointer to VF that the first MSIX vector index is being calculated for @@ -585,8 +885,8 @@ static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) */ static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf) { - struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); u16 vlan_id = 0; int err; @@ -622,8 +922,8 @@ static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf) */ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) { - struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); enum ice_status status; u8 broadcast[ETH_ALEN]; @@ -724,8 +1024,8 @@ static void ice_ena_vf_msix_mappings(struct ice_vf *vf) */ static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq) { - struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); struct ice_hw *hw = &vf->pf->hw; u32 reg; @@ -772,7 +1072,7 @@ static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq) */ static void ice_ena_vf_mappings(struct ice_vf *vf) { - struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; + struct ice_vsi *vsi = ice_get_vf_vsi(vf); ice_ena_vf_msix_mappings(vf); ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq); @@ -1035,7 +1335,7 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m, static void ice_vf_clear_counters(struct ice_vf *vf) { - struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; + struct ice_vsi *vsi = ice_get_vf_vsi(vf); vf->num_mac = 0; vsi->num_vlan = 0; @@ -1095,8 +1395,8 @@ static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi) */ static void ice_vf_rebuild_host_cfg(struct ice_vf *vf) { - struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); ice_vf_set_host_trust_cfg(vf); @@ -1136,10 +1436,8 @@ static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf) */ static int ice_vf_rebuild_vsi(struct ice_vf *vf) { + struct ice_vsi *vsi = ice_get_vf_vsi(vf); struct ice_pf *pf = vf->pf; - struct ice_vsi *vsi; - - vsi = pf->vsi[vf->lan_vsi_idx]; if (ice_vsi_rebuild(vsi, true)) { dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n", @@ -1212,8 +1510,13 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) if (!pf->num_alloc_vfs) return false; + /* clear all malicious info if the VFs are getting reset */ + ice_for_each_vf(pf, i) + if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, i)) + dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i); + /* If VFs have been disabled, there is no need to reset */ - if (test_and_set_bit(__ICE_VF_DIS, pf->state)) + if (test_and_set_bit(ICE_VF_DIS, pf->state)) return false; /* Begin reset on all VFs at once */ @@ -1256,13 +1559,23 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ice_for_each_vf(pf, v) { vf = &pf->vf[v]; + vf->driver_caps = 0; + ice_vc_set_default_allowlist(vf); + + ice_vf_fdir_exit(vf); + /* clean VF control VSI when resetting VFs since it should be + * setup only when VF creates its first FDIR rule. + */ + if (vf->ctrl_vsi_idx != ICE_NO_VSI) + ice_vf_ctrl_invalidate_vsi(vf); + ice_vf_pre_vsi_rebuild(vf); ice_vf_rebuild_vsi(vf); ice_vf_post_vsi_rebuild(vf); } ice_flush(hw); - clear_bit(__ICE_VF_DIS, pf->state); + clear_bit(ICE_VF_DIS, pf->state); return true; } @@ -1282,7 +1595,7 @@ static bool ice_is_vf_disabled(struct ice_vf *vf) * means something else is resetting the VF, so we shouldn't continue. * Otherwise, set disable VF state bit for actual reset, and continue. */ - return (test_bit(__ICE_VF_DIS, pf->state) || + return (test_bit(ICE_VF_DIS, pf->state) || test_bit(ICE_VF_STATE_DIS, vf->vf_states)); } @@ -1307,7 +1620,7 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) dev = ice_pf_to_dev(pf); - if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) { + if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n", vf->vf_id); return true; @@ -1323,7 +1636,7 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) set_bit(ICE_VF_STATE_DIS, vf->vf_states); ice_trigger_vf_reset(vf, is_vflr, false); - vsi = pf->vsi[vf->lan_vsi_idx]; + vsi = ice_get_vf_vsi(vf); if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) ice_dis_vf_qs(vf); @@ -1353,6 +1666,9 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) usleep_range(10, 20); } + vf->driver_caps = 0; + ice_vc_set_default_allowlist(vf); + /* Display a warning if VF didn't manage to reset in time, but need to * continue on with the operation. */ @@ -1369,15 +1685,26 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) else promisc_m = ICE_UCAST_PROMISC_BITS; - vsi = pf->vsi[vf->lan_vsi_idx]; + vsi = ice_get_vf_vsi(vf); if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true)) dev_err(dev, "disabling promiscuous mode failed\n"); } + ice_vf_fdir_exit(vf); + /* clean VF control VSI when resetting VF since it should be setup + * only when VF creates its first FDIR rule. + */ + if (vf->ctrl_vsi_idx != ICE_NO_VSI) + ice_vf_ctrl_vsi_release(vf); + ice_vf_pre_vsi_rebuild(vf); ice_vf_rebuild_vsi_with_release(vf); ice_vf_post_vsi_rebuild(vf); + /* if the VF has been reset allow it to come up again */ + if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id)) + dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i); + return true; } @@ -1532,7 +1859,7 @@ teardown: } /** - * ice_set_dflt_settings - set VF defaults during initialization/creation + * ice_set_dflt_settings_vfs - set VF defaults during initialization/creation * @pf: PF holding reference to all VFs for default configuration */ static void ice_set_dflt_settings_vfs(struct ice_pf *pf) @@ -1549,6 +1876,13 @@ static void ice_set_dflt_settings_vfs(struct ice_pf *pf) set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps); vf->spoofchk = true; vf->num_vf_qs = pf->num_qps_per_vf; + ice_vc_set_default_allowlist(vf); + + /* ctrl_vsi_idx will be set to a valid value only when VF + * creates its first fdir rule. + */ + ice_vf_ctrl_invalidate_vsi(vf); + ice_vf_fdir_init(vf); } } @@ -1586,7 +1920,7 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) /* Disable global interrupt 0 so we don't try to handle the VFLR. */ wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); - set_bit(__ICE_OICR_INTR_DIS, pf->state); + set_bit(ICE_OICR_INTR_DIS, pf->state); ice_flush(hw); ret = pci_enable_sriov(pf->pdev, num_vfs); @@ -1614,7 +1948,7 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) goto err_unroll_sriov; } - clear_bit(__ICE_VF_DIS, pf->state); + clear_bit(ICE_VF_DIS, pf->state); return 0; err_unroll_sriov: @@ -1626,7 +1960,7 @@ err_pci_disable_sriov: err_unroll_intr: /* rearm interrupts here */ ice_irq_dynamic_ena(hw, NULL, NULL); - clear_bit(__ICE_OICR_INTR_DIS, pf->state); + clear_bit(ICE_OICR_INTR_DIS, pf->state); return ret; } @@ -1704,6 +2038,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct ice_pf *pf = pci_get_drvdata(pdev); struct device *dev = ice_pf_to_dev(pf); + enum ice_status status; int err; err = ice_check_sriov_allowed(pf); @@ -1712,6 +2047,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) if (!num_vfs) { if (!pci_vfs_assigned(pdev)) { + ice_mbx_deinit_snapshot(&pf->hw); ice_free_vfs(pf); if (pf->lag) ice_enable_lag(pf->lag); @@ -1722,9 +2058,15 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) return -EBUSY; } + status = ice_mbx_init_snapshot(&pf->hw, num_vfs); + if (status) + return ice_status_to_errno(status); + err = ice_pci_sriov_ena(pf, num_vfs); - if (err) + if (err) { + ice_mbx_deinit_snapshot(&pf->hw); return err; + } if (pf->lag) ice_disable_lag(pf->lag); @@ -1744,7 +2086,7 @@ void ice_process_vflr_event(struct ice_pf *pf) unsigned int vf_id; u32 reg; - if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) || + if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) || !pf->num_alloc_vfs) return; @@ -1789,7 +2131,7 @@ static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq) struct ice_vsi *vsi; u16 rxq_idx; - vsi = pf->vsi[vf->lan_vsi_idx]; + vsi = ice_get_vf_vsi(vf); ice_for_each_rxq(vsi, rxq_idx) if (vsi->rxq_map[rxq_idx] == pfq) @@ -1848,7 +2190,7 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) * * send msg to VF */ -static int +int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) { @@ -1929,8 +2271,7 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg) */ static u16 ice_vc_get_max_frame_size(struct ice_vf *vf) { - struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; - struct ice_port_info *pi = vsi->port_info; + struct ice_port_info *pi = ice_vf_get_port_info(vf); u16 max_frame_size; max_frame_size = pi->phy.link_info.max_frame_size; @@ -1978,7 +2319,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) VIRTCHNL_VF_OFFLOAD_VLAN; vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; - vsi = pf->vsi[vf->lan_vsi_idx]; + vsi = ice_get_vf_vsi(vf); if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto err; @@ -1996,6 +2337,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; } + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF; + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; @@ -2017,6 +2361,12 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED; + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF; + + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO; + vfres->num_vsis = 1; /* Tx and Rx queue are equal for VF */ vfres->num_queue_pairs = vsi->num_txq; @@ -2034,6 +2384,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) /* match guest capabilities */ vf->driver_caps = vfres->vf_cap_flags; + ice_vc_set_caps_allowlist(vf); + ice_vc_set_working_allowlist(vf); + set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); err: @@ -2084,7 +2437,7 @@ static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id) * * check for the valid VSI ID */ -static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) +bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) { struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; @@ -2125,6 +2478,222 @@ static bool ice_vc_isvalid_ring_len(u16 ring_len) } /** + * ice_vc_parse_rss_cfg - parses hash fields and headers from + * a specific virtchnl RSS cfg + * @hw: pointer to the hardware + * @rss_cfg: pointer to the virtchnl RSS cfg + * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*) + * to configure + * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure + * + * Return true if all the protocol header and hash fields in the RSS cfg could + * be parsed, else return false + * + * This function parses the virtchnl RSS cfg to be the intended + * hash fields and the intended header for RSS configuration + */ +static bool +ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg, + u32 *addl_hdrs, u64 *hash_flds) +{ + const struct ice_vc_hash_field_match_type *hf_list; + const struct ice_vc_hdr_match_type *hdr_list; + int i, hf_list_len, hdr_list_len; + + if (!strncmp(hw->active_pkg_name, "ICE COMMS Package", + sizeof(hw->active_pkg_name))) { + hf_list = ice_vc_hash_field_list_comms; + hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_comms); + hdr_list = ice_vc_hdr_list_comms; + hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_comms); + } else { + hf_list = ice_vc_hash_field_list_os; + hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_os); + hdr_list = ice_vc_hdr_list_os; + hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_os); + } + + for (i = 0; i < rss_cfg->proto_hdrs.count; i++) { + struct virtchnl_proto_hdr *proto_hdr = + &rss_cfg->proto_hdrs.proto_hdr[i]; + bool hdr_found = false; + int j; + + /* Find matched ice headers according to virtchnl headers. */ + for (j = 0; j < hdr_list_len; j++) { + struct ice_vc_hdr_match_type hdr_map = hdr_list[j]; + + if (proto_hdr->type == hdr_map.vc_hdr) { + *addl_hdrs |= hdr_map.ice_hdr; + hdr_found = true; + } + } + + if (!hdr_found) + return false; + + /* Find matched ice hash fields according to + * virtchnl hash fields. + */ + for (j = 0; j < hf_list_len; j++) { + struct ice_vc_hash_field_match_type hf_map = hf_list[j]; + + if (proto_hdr->type == hf_map.vc_hdr && + proto_hdr->field_selector == hf_map.vc_hash_field) { + *hash_flds |= hf_map.ice_hash_field; + break; + } + } + } + + return true; +} + +/** + * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced + * RSS offloads + * @caps: VF driver negotiated capabilities + * + * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set, + * else return false + */ +static bool ice_vf_adv_rss_offload_ena(u32 caps) +{ + return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF); +} + +/** + * ice_vc_handle_rss_cfg + * @vf: pointer to the VF info + * @msg: pointer to the message buffer + * @add: add a RSS config if true, otherwise delete a RSS config + * + * This function adds/deletes a RSS config + */ +static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) +{ + u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG; + struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg; + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_hw *hw = &vf->pf->hw; + struct ice_vsi *vsi; + + if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { + dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n", + vf->vf_id); + v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; + goto error_param; + } + + if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) { + dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n", + vf->vf_id); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS || + rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC || + rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) { + dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n", + vf->vf_id); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) { + struct ice_vsi_ctx *ctx; + enum ice_status status; + u8 lut_type, hash_type; + + lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; + hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR : + ICE_AQ_VSI_Q_OPT_RSS_TPLZ; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + goto error_param; + } + + ctx->info.q_opt_rss = ((lut_type << + ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & + ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | + (hash_type & + ICE_AQ_VSI_Q_OPT_RSS_HASH_M); + + /* Preserve existing queueing option setting */ + ctx->info.q_opt_rss |= (vsi->info.q_opt_rss & + ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M); + ctx->info.q_opt_tc = vsi->info.q_opt_tc; + ctx->info.q_opt_flags = vsi->info.q_opt_rss; + + ctx->info.valid_sections = + cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); + + status = ice_update_vsi(hw, vsi->idx, ctx, NULL); + if (status) { + dev_err(dev, "update VSI for RSS failed, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + } else { + vsi->info.q_opt_rss = ctx->info.q_opt_rss; + } + + kfree(ctx); + } else { + u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE; + u64 hash_flds = ICE_HASH_INVALID; + + if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs, + &hash_flds)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + if (add) { + if (ice_add_rss_cfg(hw, vsi->idx, hash_flds, + addl_hdrs)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n", + vsi->vsi_num, v_ret); + } + } else { + enum ice_status status; + + status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds, + addl_hdrs); + /* We just ignore ICE_ERR_DOES_NOT_EXIST, because + * if two configurations share the same profile remove + * one of them actually removes both, since the + * profile is deleted. + */ + if (status && status != ICE_ERR_DOES_NOT_EXIST) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%s\n", + vf->vf_id, ice_stat_str(status)); + } + } + } + +error_param: + return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0); +} + +/** * ice_vc_config_rss_key * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -2136,7 +2705,6 @@ static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg) enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; - struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { @@ -2159,13 +2727,13 @@ static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg) goto error_param; } - vsi = pf->vsi[vf->lan_vsi_idx]; + vsi = ice_get_vf_vsi(vf); if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - if (ice_set_rss(vsi, vrk->key, NULL, 0)) + if (ice_set_rss_key(vsi, vrk->key)) v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; error_param: return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret, @@ -2183,7 +2751,6 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg) { struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; - struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { @@ -2206,13 +2773,13 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg) goto error_param; } - vsi = pf->vsi[vf->lan_vsi_idx]; + vsi = ice_get_vf_vsi(vf); if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE)) + if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE)) v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; error_param: return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret, @@ -2289,7 +2856,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) if (ret) return ret; - vf_vsi = pf->vsi[vf->lan_vsi_idx]; + vf_vsi = ice_get_vf_vsi(vf); if (!vf_vsi) { netdev_err(netdev, "VSI %d for VF %d is null\n", vf->lan_vsi_idx, vf->vf_id); @@ -2394,7 +2961,7 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg) goto error_param; } - vsi = pf->vsi[vf->lan_vsi_idx]; + vsi = ice_get_vf_vsi(vf); if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -2530,7 +3097,6 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg) struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; struct ice_eth_stats stats = { 0 }; - struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { @@ -2543,7 +3109,7 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg) goto error_param; } - vsi = pf->vsi[vf->lan_vsi_idx]; + vsi = ice_get_vf_vsi(vf); if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -2633,7 +3199,6 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; - struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; unsigned long q_map; u16 vf_q_id; @@ -2653,7 +3218,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } - vsi = pf->vsi[vf->lan_vsi_idx]; + vsi = ice_get_vf_vsi(vf); if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -2685,7 +3250,6 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) set_bit(vf_q_id, vf->rxq_ena); } - vsi = pf->vsi[vf->lan_vsi_idx]; q_map = vqs->tx_queues; for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { @@ -2724,7 +3288,6 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; - struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; unsigned long q_map; u16 vf_q_id; @@ -2745,7 +3308,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } - vsi = pf->vsi[vf->lan_vsi_idx]; + vsi = ice_get_vf_vsi(vf); if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -2910,7 +3473,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) goto error_param; } - vsi = pf->vsi[vf->lan_vsi_idx]; + vsi = ice_get_vf_vsi(vf); if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -2987,7 +3550,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } - vsi = pf->vsi[vf->lan_vsi_idx]; + vsi = ice_get_vf_vsi(vf); if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -3222,7 +3785,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) goto handle_mac_exit; } - vsi = pf->vsi[vf->lan_vsi_idx]; + vsi = ice_get_vf_vsi(vf); if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto handle_mac_exit; @@ -3454,7 +4017,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) } hw = &pf->hw; - vsi = pf->vsi[vf->lan_vsi_idx]; + vsi = ice_get_vf_vsi(vf); if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -3621,7 +4184,6 @@ static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg) static int ice_vc_ena_vlan_stripping(struct ice_vf *vf) { enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; - struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { @@ -3634,7 +4196,7 @@ static int ice_vc_ena_vlan_stripping(struct ice_vf *vf) goto error_param; } - vsi = pf->vsi[vf->lan_vsi_idx]; + vsi = ice_get_vf_vsi(vf); if (ice_vsi_manage_vlan_stripping(vsi, true)) v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -3652,7 +4214,6 @@ error_param: static int ice_vc_dis_vlan_stripping(struct ice_vf *vf) { enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; - struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { @@ -3665,7 +4226,7 @@ static int ice_vc_dis_vlan_stripping(struct ice_vf *vf) goto error_param; } - vsi = pf->vsi[vf->lan_vsi_idx]; + vsi = ice_get_vf_vsi(vf); if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -3691,7 +4252,7 @@ error_param: */ static int ice_vf_init_vlan_stripping(struct ice_vf *vf) { - struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; + struct ice_vsi *vsi = ice_get_vf_vsi(vf); if (!vsi) return -EINVAL; @@ -3747,6 +4308,13 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) err = -EINVAL; } + if (!ice_vc_is_opcode_allowed(vf, v_opcode)) { + ice_vc_send_msg_to_vf(vf, v_opcode, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL, + 0); + return; + } + error_handler: if (err) { ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM, @@ -3816,6 +4384,18 @@ error_handler: case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: err = ice_vc_dis_vlan_stripping(vf); break; + case VIRTCHNL_OP_ADD_FDIR_FILTER: + err = ice_vc_add_fdir_fltr(vf, msg); + break; + case VIRTCHNL_OP_DEL_FDIR_FILTER: + err = ice_vc_del_fdir_fltr(vf, msg); + break; + case VIRTCHNL_OP_ADD_RSS_CFG: + err = ice_vc_handle_rss_cfg(vf, msg, true); + break; + case VIRTCHNL_OP_DEL_RSS_CFG: + err = ice_vc_handle_rss_cfg(vf, msg, false); + break; case VIRTCHNL_OP_UNKNOWN: default: dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode, @@ -4066,7 +4646,7 @@ int ice_get_vf_stats(struct net_device *netdev, int vf_id, if (ret) return ret; - vsi = pf->vsi[vf->lan_vsi_idx]; + vsi = ice_get_vf_vsi(vf); if (!vsi) return -EINVAL; @@ -4108,7 +4688,7 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf) } /** - * ice_print_vfs_mdd_event - print VFs malicious driver detect event + * ice_print_vfs_mdd_events - print VFs malicious driver detect event * @pf: pointer to the PF structure * * Called from ice_handle_mdd_event to rate limit and print VFs MDD events. @@ -4120,7 +4700,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf) int i; /* check that there are pending MDD events to print */ - if (!test_and_clear_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state)) + if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state)) return; /* VF MDD event logs are rate limited to one second intervals */ @@ -4160,7 +4740,6 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf) */ void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { - struct pci_dev *vfdev; u16 vf_id; int pos; @@ -4169,6 +4748,8 @@ void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); if (pos) { + struct pci_dev *vfdev; + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); vfdev = pci_get_device(pdev->vendor, vf_id, NULL); @@ -4180,3 +4761,70 @@ void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) } } } + +/** + * ice_is_malicious_vf - helper function to detect a malicious VF + * @pf: ptr to struct ice_pf + * @event: pointer to the AQ event + * @num_msg_proc: the number of messages processed so far + * @num_msg_pending: the number of messages peinding in admin queue + */ +bool +ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, + u16 num_msg_proc, u16 num_msg_pending) +{ + s16 vf_id = le16_to_cpu(event->desc.retval); + struct device *dev = ice_pf_to_dev(pf); + struct ice_mbx_data mbxdata; + enum ice_status status; + bool malvf = false; + struct ice_vf *vf; + + if (ice_validate_vf_id(pf, vf_id)) + return false; + + vf = &pf->vf[vf_id]; + /* Check if VF is disabled. */ + if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) + return false; + + mbxdata.num_msg_proc = num_msg_proc; + mbxdata.num_pending_arq = num_msg_pending; + mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries; +#define ICE_MBX_OVERFLOW_WATERMARK 64 + mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK; + + /* check to see if we have a malicious VF */ + status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf); + if (status) + return false; + + if (malvf) { + bool report_vf = false; + + /* if the VF is malicious and we haven't let the user + * know about it, then let them know now + */ + status = ice_mbx_report_malvf(&pf->hw, pf->malvfs, + ICE_MAX_VF_COUNT, vf_id, + &report_vf); + if (status) + dev_dbg(dev, "Error reporting malicious VF\n"); + + if (report_vf) { + struct ice_vsi *pf_vsi = ice_get_main_vsi(pf); + + if (pf_vsi) + dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n", + &vf->dflt_lan_addr.addr[0], + pf_vsi->netdev->dev_addr); + } + + return true; + } + + /* if there was an error in detection or the VF is not malicious then + * return false + */ + return false; +} diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 0f519fba3770..d800ed83d6c3 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -4,6 +4,7 @@ #ifndef _ICE_VIRTCHNL_PF_H_ #define _ICE_VIRTCHNL_PF_H_ #include "ice.h" +#include "ice_virtchnl_fdir.h" /* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */ #define ICE_MAX_VLAN_PER_VF 8 @@ -70,6 +71,8 @@ struct ice_vf { u16 vf_id; /* VF ID in the PF space */ u16 lan_vsi_idx; /* index into PF struct */ + u16 ctrl_vsi_idx; + struct ice_vf_fdir fdir; /* first vector index of this VF in the PF space */ int first_vector_idx; struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */ @@ -100,6 +103,7 @@ struct ice_vf { u16 num_vf_qs; /* num of queue configured per VF */ struct ice_mdd_vf_events mdd_rx_events; struct ice_mdd_vf_events mdd_tx_events; + DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX); }; #ifdef CONFIG_PCI_IOV @@ -116,6 +120,9 @@ void ice_vc_notify_reset(struct ice_pf *pf); bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr); bool ice_reset_vf(struct ice_vf *vf, bool is_vflr); void ice_restore_all_vfs_msi_state(struct pci_dev *pdev); +bool +ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, + u16 num_msg_proc, u16 num_msg_pending); int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, @@ -138,6 +145,11 @@ void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event); void ice_print_vfs_mdd_events(struct ice_pf *pf); void ice_print_vf_rx_mdd_event(struct ice_vf *vf); +struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf); +int +ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, + enum virtchnl_status_code v_retval, u8 *msg, u16 msglen); +bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id); #else /* CONFIG_PCI_IOV */ #define ice_process_vflr_event(pf) do {} while (0) #define ice_free_vfs(pf) do {} while (0) @@ -151,6 +163,15 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf); #define ice_restore_all_vfs_msi_state(pdev) do {} while (0) static inline bool +ice_is_malicious_vf(struct ice_pf __always_unused *pf, + struct ice_rq_event_info __always_unused *event, + u16 __always_unused num_msg_proc, + u16 __always_unused num_msg_pending) +{ + return false; +} + +static inline bool ice_reset_all_vfs(struct ice_pf __always_unused *pf, bool __always_unused is_vflr) { diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 9f94d9159acd..faa7b8d96adb 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -108,9 +108,6 @@ ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector) ice_cfg_itr(hw, q_vector); - wr32(hw, GLINT_RATE(reg_idx), - ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran)); - ice_for_each_ring(ring, q_vector->tx) ice_cfg_txq_interrupt(vsi, ring->reg_idx, reg_idx, q_vector->tx.itr_idx); @@ -159,7 +156,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) rx_ring = vsi->rx_rings[q_idx]; q_vector = rx_ring->q_vector; - while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state)) { + while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) { timeout--; if (!timeout) return -EBUSY; @@ -249,7 +246,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) if (err) goto free_buf; - clear_bit(__ICE_CFG_BUSY, vsi->state); + clear_bit(ICE_CFG_BUSY, vsi->state); ice_qvec_toggle_napi(vsi, q_vector, true); ice_qvec_ena_irq(vsi, q_vector); @@ -473,6 +470,14 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp) xdp_prog = READ_ONCE(rx_ring->xdp_prog); act = bpf_prog_run_xdp(xdp_prog, xdp); + + if (likely(act == XDP_REDIRECT)) { + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); + result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED; + rcu_read_unlock(); + return result; + } + switch (act) { case XDP_PASS: break; @@ -480,10 +485,6 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp) xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index]; result = ice_xmit_xdp_buff(xdp, xdp_ring); break; - case XDP_REDIRECT: - err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED; - break; default: bpf_warn_invalid_xdp_action(act); fallthrough; @@ -754,7 +755,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, struct ice_vsi *vsi = np->vsi; struct ice_ring *ring; - if (test_bit(__ICE_DOWN, vsi->state)) + if (test_bit(ICE_DOWN, vsi->state)) return -ENETDOWN; if (!ice_is_xdp_ena_vsi(vsi)) diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index d2e2c50ce257..ca5429774994 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -340,10 +340,10 @@ #define I210_RXPBSIZE_PB_32KB 0x00000020 #define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ #define I210_TXPBSIZE_MASK 0xC0FFFFFF -#define I210_TXPBSIZE_PB0_8KB (8 << 0) -#define I210_TXPBSIZE_PB1_8KB (8 << 6) -#define I210_TXPBSIZE_PB2_4KB (4 << 12) -#define I210_TXPBSIZE_PB3_4KB (4 << 18) +#define I210_TXPBSIZE_PB0_6KB (6 << 0) +#define I210_TXPBSIZE_PB1_6KB (6 << 6) +#define I210_TXPBSIZE_PB2_6KB (6 << 12) +#define I210_TXPBSIZE_PB3_6KB (6 << 18) #define I210_DTXMXPKTSZ_DEFAULT 0x00000098 diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index fd8eb2f9ab9d..e63ee3cca5ea 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -484,6 +484,31 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) } /** + * igb_i21x_hw_doublecheck - double checks potential HW issue in i21X + * @hw: pointer to the HW structure + * + * Checks if multicast array is wrote correctly + * If not then rewrites again to register + **/ +static void igb_i21x_hw_doublecheck(struct e1000_hw *hw) +{ + bool is_failed; + int i; + + do { + is_failed = false; + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) { + if (array_rd32(E1000_MTA, i) != hw->mac.mta_shadow[i]) { + is_failed = true; + array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); + wrfl(); + break; + } + } + } while (is_failed); +} + +/** * igb_update_mc_addr_list - Update Multicast addresses * @hw: pointer to the HW structure * @mc_addr_list: array of multicast addresses to program @@ -516,6 +541,8 @@ void igb_update_mc_addr_list(struct e1000_hw *hw, for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); wrfl(); + if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) + igb_i21x_hw_doublecheck(hw); } /** diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c index 33cceb77e960..29383112bc19 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.c +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c @@ -441,7 +441,7 @@ out_no_read: } /** - * e1000_init_mbx_params_pf - set initial values for pf mailbox + * igb_init_mbx_params_pf - set initial values for pf mailbox * @hw: pointer to the HW structure * * Initializes the hw->mbx struct to correct values for pf mailbox diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 8c8eb82e6272..a018000f7db9 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -836,6 +836,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw) break; case e1000_ms_auto: data &= ~CR_1000T_MS_ENABLE; + break; default: break; } diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 28baf203459a..7545da216d8b 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2347,35 +2347,23 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) IGB_TEST_LEN*ETH_GSTRING_LEN); break; case ETH_SS_STATS: - for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { - memcpy(p, igb_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) { - memcpy(p, igb_gstrings_net_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) + ethtool_sprintf(&p, + igb_gstrings_stats[i].stat_string); + for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) + ethtool_sprintf(&p, + igb_gstrings_net_stats[i].stat_string); for (i = 0; i < adapter->num_tx_queues; i++) { - sprintf(p, "tx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_restart", i); - p += ETH_GSTRING_LEN; + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + ethtool_sprintf(&p, "tx_queue_%u_restart", i); } for (i = 0; i < adapter->num_rx_queues; i++) { - sprintf(p, "rx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_drops", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_csum_err", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_alloc_failed", i); - p += ETH_GSTRING_LEN; + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); + ethtool_sprintf(&p, "rx_queue_%u_drops", i); + ethtool_sprintf(&p, "rx_queue_%u_csum_err", i); + ethtool_sprintf(&p, "rx_queue_%u_alloc_failed", i); } /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ break; @@ -3022,6 +3010,7 @@ static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) break; case ETHTOOL_SRXCLSRLDEL: ret = igb_del_ethtool_nfc_entry(adapter, cmd); + break; default: break; } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index a45cd2b416c8..038a9fd1af44 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1921,8 +1921,8 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter) */ val = rd32(E1000_TXPBS); val &= ~I210_TXPBSIZE_MASK; - val |= I210_TXPBSIZE_PB0_8KB | I210_TXPBSIZE_PB1_8KB | - I210_TXPBSIZE_PB2_4KB | I210_TXPBSIZE_PB3_4KB; + val |= I210_TXPBSIZE_PB0_6KB | I210_TXPBSIZE_PB1_6KB | + I210_TXPBSIZE_PB2_6KB | I210_TXPBSIZE_PB3_6KB; wr32(E1000_TXPBS, val); val = rd32(E1000_RXPBS); @@ -2037,7 +2037,7 @@ static void igb_power_down_link(struct igb_adapter *adapter) } /** - * Detect and switch function for Media Auto Sense + * igb_check_swap_media - Detect and switch function for Media Auto Sense * @adapter: address of the board private structure **/ static void igb_check_swap_media(struct igb_adapter *adapter) @@ -2934,7 +2934,7 @@ static int igb_xdp_xmit(struct net_device *dev, int n, int cpu = smp_processor_id(); struct igb_ring *tx_ring; struct netdev_queue *nq; - int drops = 0; + int nxmit = 0; int i; if (unlikely(test_bit(__IGB_DOWN, &adapter->state))) @@ -2961,10 +2961,9 @@ static int igb_xdp_xmit(struct net_device *dev, int n, int err; err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf); - if (err != IGB_XDP_TX) { - xdp_return_frame_rx_napi(xdpf); - drops++; - } + if (err != IGB_XDP_TX) + break; + nxmit++; } __netif_tx_unlock(nq); @@ -2972,7 +2971,7 @@ static int igb_xdp_xmit(struct net_device *dev, int n, if (unlikely(flags & XDP_XMIT_FLUSH)) igb_xdp_ring_update_tail(tx_ring); - return n - drops; + return nxmit; } static const struct net_device_ops igb_netdev_ops = { @@ -3115,7 +3114,7 @@ static s32 igb_init_i2c(struct igb_adapter *adapter) return 0; /* Initialize the i2c bus which is controlled by the registers. - * This bus will use the i2c_algo_bit structue that implements + * This bus will use the i2c_algo_bit structure that implements * the protocol through toggling of the 4 bits in the register. */ adapter->i2c_adap.owner = THIS_MODULE; @@ -4020,7 +4019,7 @@ static int igb_sw_init(struct igb_adapter *adapter) } /** - * igb_open - Called when a network interface is made active + * __igb_open - Called when a network interface is made active * @netdev: network interface device structure * @resuming: indicates whether we are in a resume call * @@ -4138,7 +4137,7 @@ int igb_open(struct net_device *netdev) } /** - * igb_close - Disables a network interface + * __igb_close - Disables a network interface * @netdev: network interface device structure * @suspending: indicates we are in a suspend call * @@ -5856,7 +5855,7 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, */ if (tx_ring->launchtime_enable) { ts = ktime_to_timespec64(first->skb->tstamp); - first->skb->tstamp = ktime_set(0, 0); + skb_txtime_consumed(first->skb); context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); } else { context_desc->seqnum_seed = 0; diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 86a576201f5f..ba61fe9bfaf4 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -1025,6 +1025,7 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter, switch (config->tx_type) { case HWTSTAMP_TX_OFF: tsync_tx_ctl = 0; + break; case HWTSTAMP_TX_ON: break; default: diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile index 1c3051db9085..95d1e8c490a4 100644 --- a/drivers/net/ethernet/intel/igc/Makefile +++ b/drivers/net/ethernet/intel/igc/Makefile @@ -8,4 +8,4 @@ obj-$(CONFIG_IGC) += igc.o igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \ -igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o +igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o igc_xdp.o diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 1b08a7dc7bc4..25871351730b 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -28,6 +28,11 @@ void igc_ethtool_set_ops(struct net_device *); #define MAX_ETYPE_FILTER 8 #define IGC_RETA_SIZE 128 +/* SDP support */ +#define IGC_N_EXTTS 2 +#define IGC_N_PEROUT 2 +#define IGC_N_SDP 4 + enum igc_mac_filter_type { IGC_MAC_FILTER_TYPE_DST = 0, IGC_MAC_FILTER_TYPE_SRC @@ -111,6 +116,8 @@ struct igc_ring { struct sk_buff *skb; }; }; + + struct xdp_rxq_info xdp_rxq; } ____cacheline_internodealigned_in_smp; /* Board specific private data structure */ @@ -219,6 +226,16 @@ struct igc_adapter { ktime_t ptp_reset_start; /* Reset time in clock mono */ char fw_version[32]; + + struct bpf_prog *xdp_prog; + + bool pps_sys_wrap_on; + + struct ptp_pin_desc sdp_config[IGC_N_SDP]; + struct { + struct timespec64 start; + struct timespec64 period; + } perout[IGC_N_PEROUT]; }; void igc_up(struct igc_adapter *adapter); @@ -373,6 +390,8 @@ enum igc_tx_flags { /* olinfo flags */ IGC_TX_FLAGS_IPV4 = 0x10, IGC_TX_FLAGS_CSUM = 0x20, + + IGC_TX_FLAGS_XDP = 0x100, }; enum igc_boards { @@ -395,7 +414,10 @@ enum igc_boards { struct igc_tx_buffer { union igc_adv_tx_desc *next_to_watch; unsigned long time_stamp; - struct sk_buff *skb; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; unsigned int bytecount; u16 gso_segs; __be16 protocol; @@ -504,6 +526,10 @@ enum igc_ring_flags_t { #define ring_uses_large_buffer(ring) \ test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define set_ring_uses_large_buffer(ring) \ + set_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define clear_ring_uses_large_buffer(ring) \ + clear_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) #define ring_uses_build_skb(ring) \ test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) @@ -547,8 +573,7 @@ void igc_ptp_init(struct igc_adapter *adapter); void igc_ptp_reset(struct igc_adapter *adapter); void igc_ptp_suspend(struct igc_adapter *adapter); void igc_ptp_stop(struct igc_adapter *adapter); -void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va, - struct sk_buff *skb); +ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf); int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); void igc_ptp_tx_hang(struct igc_adapter *adapter); diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index b909f00a79e6..0103dda32f39 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -8,6 +8,8 @@ #define REQ_TX_DESCRIPTOR_MULTIPLE 8 #define REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define IGC_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ +#define IGC_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ #define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ /* Definitions for power management and wakeup registers */ @@ -96,6 +98,9 @@ #define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ #define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define IGC_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ +#define IGC_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ + /* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */ #define MAX_JUMBO_FRAME_SIZE 0x2600 @@ -403,6 +408,64 @@ #define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */ #define IGC_TSYNCTXCTL_TXSYNSIG 0x00000020 /* Sample TX tstamp in PHY sop */ +/* Timer selection bits */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM2 (2u << 30) /* Select SYSTIM2 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM3 (3u << 30) /* Select SYSTIM3 for auxiliary time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM2 (2u << 30) /* Select SYSTIM2 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM3 (3u << 30) /* Select SYSTIM3 for target time stamp */ + +/* TSAUXC Configuration Bits */ +#define IGC_TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */ +#define IGC_TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */ +#define IGC_TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */ +#define IGC_TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */ +#define IGC_TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */ +#define IGC_TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */ +#define IGC_TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */ +#define IGC_TSAUXC_AUTT1 BIT(11) /* Auxiliary Timestamp Taken. */ +#define IGC_TSAUXC_PLSG BIT(17) /* Generate a pulse. */ +#define IGC_TSAUXC_DISABLE1 BIT(27) /* Disable SYSTIM0 Count Operation. */ +#define IGC_TSAUXC_DISABLE2 BIT(28) /* Disable SYSTIM1 Count Operation. */ +#define IGC_TSAUXC_DISABLE3 BIT(29) /* Disable SYSTIM2 Count Operation. */ +#define IGC_TSAUXC_DIS_TS_CLEAR BIT(30) /* Disable EN_TT0/1 auto clear. */ +#define IGC_TSAUXC_DISABLE0 BIT(31) /* Disable SYSTIM0 Count Operation. */ + +/* SDP Configuration Bits */ +#define IGC_AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +#define IGC_AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */ +#define IGC_AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +#define IGC_AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */ +#define IGC_TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */ +#define IGC_TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */ +#define IGC_TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */ +#define IGC_TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */ +#define IGC_TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */ +#define IGC_TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */ +#define IGC_TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */ +#define IGC_TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */ + /* Transmit Scheduling */ #define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001 #define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008 @@ -441,11 +504,6 @@ #define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ #define MII_CR_POWER_DOWN 0x0800 /* Power down */ #define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ -#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ -#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ -#define MII_CR_SPEED_1000 0x0040 -#define MII_CR_SPEED_100 0x2000 -#define MII_CR_SPEED_10 0x0000 /* PHY Status Register */ #define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 8722294ab90c..9722449d7633 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -65,6 +65,8 @@ static const struct igc_stats igc_gstrings_stats[] = { IGC_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), IGC_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), + IGC_STAT("tx_lpi_counter", stats.tlpic), + IGC_STAT("rx_lpi_counter", stats.rlpic), }; #define IGC_NETDEV_STAT(_net_stat) { \ diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c index 7ec04e48860c..b2ef9fde97b3 100644 --- a/drivers/net/ethernet/intel/igc/igc_i225.c +++ b/drivers/net/ethernet/intel/igc/igc_i225.c @@ -6,7 +6,7 @@ #include "igc_hw.h" /** - * igc_get_hw_semaphore_i225 - Acquire hardware semaphore + * igc_acquire_nvm_i225 - Acquire exclusive access to EEPROM * @hw: pointer to the HW structure * * Acquire the necessary semaphores for exclusive access to the EEPROM. @@ -229,10 +229,11 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || words == 0) { hw_dbg("nvm parameter(s) out of bounds\n"); - goto out; + return ret_val; } for (i = 0; i < words; i++) { + ret_val = -IGC_ERR_NVM; eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) | (data[i] << IGC_NVM_RW_REG_DATA) | IGC_NVM_RW_REG_START; @@ -254,7 +255,6 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, } } -out: return ret_val; } diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 4d989ebc9713..069471b7ffb0 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -10,17 +10,24 @@ #include <linux/ip.h> #include <linux/pm_runtime.h> #include <net/pkt_sched.h> +#include <linux/bpf_trace.h> #include <net/ipv6.h> #include "igc.h" #include "igc_hw.h" #include "igc_tsn.h" +#include "igc_xdp.h" #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver" #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) +#define IGC_XDP_PASS 0 +#define IGC_XDP_CONSUMED BIT(0) +#define IGC_XDP_TX BIT(1) +#define IGC_XDP_REDIRECT BIT(2) + static int debug = -1; MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); @@ -176,8 +183,10 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring) while (i != tx_ring->next_to_use) { union igc_adv_tx_desc *eop_desc, *tx_desc; - /* Free all the Tx ring sk_buffs */ - dev_kfree_skb_any(tx_buffer->skb); + if (tx_buffer->tx_flags & IGC_TX_FLAGS_XDP) + xdp_return_frame(tx_buffer->xdpf); + else + dev_kfree_skb_any(tx_buffer->skb); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -375,6 +384,8 @@ static void igc_clean_rx_ring(struct igc_ring *rx_ring) i = 0; } + clear_ring_uses_large_buffer(rx_ring); + rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; @@ -403,6 +414,8 @@ void igc_free_rx_resources(struct igc_ring *rx_ring) { igc_clean_rx_ring(rx_ring); + igc_xdp_unregister_rxq_info(rx_ring); + vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; @@ -440,7 +453,11 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring) { struct net_device *ndev = rx_ring->netdev; struct device *dev = rx_ring->dev; - int size, desc_len; + int size, desc_len, res; + + res = igc_xdp_register_rxq_info(rx_ring); + if (res < 0) + return res; size = sizeof(struct igc_rx_buffer) * rx_ring->count; rx_ring->rx_buffer_info = vzalloc(size); @@ -466,6 +483,7 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring) return 0; err: + igc_xdp_unregister_rxq_info(rx_ring); vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n"); @@ -497,6 +515,11 @@ static int igc_setup_all_rx_resources(struct igc_adapter *adapter) return err; } +static bool igc_xdp_is_enabled(struct igc_adapter *adapter) +{ + return !!adapter->xdp_prog; +} + /** * igc_configure_rx_ring - Configure a receive ring after Reset * @adapter: board private structure @@ -513,6 +536,9 @@ static void igc_configure_rx_ring(struct igc_adapter *adapter, u32 srrctl = 0, rxdctl = 0; u64 rdba = ring->dma; + if (igc_xdp_is_enabled(adapter)) + set_ring_uses_large_buffer(ring); + /* disable the queue */ wr32(IGC_RXDCTL(reg_idx), 0); @@ -941,7 +967,7 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); ktime_t txtime = first->skb->tstamp; - first->skb->tstamp = ktime_set(0, 0); + skb_txtime_consumed(first->skb); context_desc->launch_time = igc_tx_launchtime(adapter, txtime); } else { @@ -1029,7 +1055,7 @@ static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) -static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) +static u32 igc_tx_cmd_type(u32 tx_flags) { /* set type for advanced descriptor with frame checksum insertion */ u32 cmd_type = IGC_ADVTXD_DTYP_DATA | @@ -1078,7 +1104,7 @@ static int igc_tx_map(struct igc_ring *tx_ring, u16 i = tx_ring->next_to_use; unsigned int data_len, size; dma_addr_t dma; - u32 cmd_type = igc_tx_cmd_type(skb, tx_flags); + u32 cmd_type = igc_tx_cmd_type(tx_flags); tx_desc = IGC_TX_DESC(tx_ring, i); @@ -1480,11 +1506,18 @@ static void igc_process_skb_fields(struct igc_ring *rx_ring, } static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, - const unsigned int size) + const unsigned int size, + int *rx_buffer_pgcnt) { struct igc_rx_buffer *rx_buffer; rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + *rx_buffer_pgcnt = +#if (PAGE_SIZE < 8192) + page_count(rx_buffer->page); +#else + 0; +#endif prefetchw(rx_buffer->page); /* we are reusing so sync this buffer for CPU use */ @@ -1499,6 +1532,32 @@ static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, return rx_buffer; } +static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer, + unsigned int truesize) +{ +#if (PAGE_SIZE < 8192) + buffer->page_offset ^= truesize; +#else + buffer->page_offset += truesize; +#endif +} + +static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = igc_rx_pg_size(ring) / 2; +#else + truesize = ring_uses_build_skb(ring) ? + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(IGC_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + return truesize; +} + /** * igc_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on @@ -1513,20 +1572,19 @@ static void igc_add_rx_frag(struct igc_ring *rx_ring, struct sk_buff *skb, unsigned int size) { -#if (PAGE_SIZE < 8192) - unsigned int truesize = igc_rx_pg_size(rx_ring) / 2; + unsigned int truesize; - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, - rx_buffer->page_offset, size, truesize); - rx_buffer->page_offset ^= truesize; +#if (PAGE_SIZE < 8192) + truesize = igc_rx_pg_size(rx_ring) / 2; #else - unsigned int truesize = ring_uses_build_skb(rx_ring) ? - SKB_DATA_ALIGN(IGC_SKB_PAD + size) : - SKB_DATA_ALIGN(size); + truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IGC_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, rx_buffer->page_offset, size, truesize); - rx_buffer->page_offset += truesize; -#endif + + igc_rx_buffer_flip(rx_buffer, truesize); } static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, @@ -1535,12 +1593,7 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, unsigned int size) { void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; -#if (PAGE_SIZE < 8192) - unsigned int truesize = igc_rx_pg_size(rx_ring) / 2; -#else - unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + - SKB_DATA_ALIGN(IGC_SKB_PAD + size); -#endif + unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); struct sk_buff *skb; /* prefetch first cache line of first page */ @@ -1555,27 +1608,18 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, skb_reserve(skb, IGC_SKB_PAD); __skb_put(skb, size); - /* update buffer offset */ -#if (PAGE_SIZE < 8192) - rx_buffer->page_offset ^= truesize; -#else - rx_buffer->page_offset += truesize; -#endif - + igc_rx_buffer_flip(rx_buffer, truesize); return skb; } static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, struct igc_rx_buffer *rx_buffer, - union igc_adv_rx_desc *rx_desc, - unsigned int size) + struct xdp_buff *xdp, + ktime_t timestamp) { - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; -#if (PAGE_SIZE < 8192) - unsigned int truesize = igc_rx_pg_size(rx_ring) / 2; -#else - unsigned int truesize = SKB_DATA_ALIGN(size); -#endif + unsigned int size = xdp->data_end - xdp->data; + unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); + void *va = xdp->data; unsigned int headlen; struct sk_buff *skb; @@ -1587,11 +1631,8 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, if (unlikely(!skb)) return NULL; - if (unlikely(igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP))) { - igc_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); - va += IGC_TS_HDR_LEN; - size -= IGC_TS_HDR_LEN; - } + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; /* Determine available headroom for copy */ headlen = size; @@ -1607,11 +1648,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, skb_add_rx_frag(skb, 0, rx_buffer->page, (va + headlen) - page_address(rx_buffer->page), size, truesize); -#if (PAGE_SIZE < 8192) - rx_buffer->page_offset ^= truesize; -#else - rx_buffer->page_offset += truesize; -#endif + igc_rx_buffer_flip(rx_buffer, truesize); } else { rx_buffer->pagecnt_bias++; } @@ -1648,7 +1685,8 @@ static void igc_reuse_rx_page(struct igc_ring *rx_ring, new_buff->pagecnt_bias = old_buff->pagecnt_bias; } -static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer) +static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) { unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; struct page *page = rx_buffer->page; @@ -1659,7 +1697,7 @@ static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer) #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ - if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) + if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) return false; #else #define IGC_LAST_OFFSET \ @@ -1673,8 +1711,8 @@ static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer) * the pagecnt_bias and page count so that we fully restock the * number of references the driver holds. */ - if (unlikely(!pagecnt_bias)) { - page_ref_add(page, USHRT_MAX); + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); rx_buffer->pagecnt_bias = USHRT_MAX; } @@ -1726,6 +1764,10 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring, union igc_adv_rx_desc *rx_desc, struct sk_buff *skb) { + /* XDP packets use error pointer so abort at this point */ + if (IS_ERR(skb)) + return true; + if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) { struct net_device *netdev = rx_ring->netdev; @@ -1743,9 +1785,10 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring, } static void igc_put_rx_buffer(struct igc_ring *rx_ring, - struct igc_rx_buffer *rx_buffer) + struct igc_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) { - if (igc_can_reuse_rx_page(rx_buffer)) { + if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { /* hand second half of page back to the ring */ igc_reuse_rx_page(rx_ring, rx_buffer); } else { @@ -1765,7 +1808,14 @@ static void igc_put_rx_buffer(struct igc_ring *rx_ring, static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring) { - return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0; + struct igc_adapter *adapter = rx_ring->q_vector->adapter; + + if (ring_uses_build_skb(rx_ring)) + return IGC_SKB_PAD; + if (igc_xdp_is_enabled(adapter)) + return XDP_PACKET_HEADROOM; + + return 0; } static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, @@ -1804,7 +1854,8 @@ static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, bi->dma = dma; bi->page = page; bi->page_offset = igc_rx_offset(rx_ring); - bi->pagecnt_bias = 1; + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; return true; } @@ -1879,17 +1930,196 @@ static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count) } } +static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer, + struct xdp_frame *xdpf, + struct igc_ring *ring) +{ + dma_addr_t dma; + + dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) { + netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); + return -ENOMEM; + } + + buffer->xdpf = xdpf; + buffer->tx_flags = IGC_TX_FLAGS_XDP; + buffer->protocol = 0; + buffer->bytecount = xdpf->len; + buffer->gso_segs = 1; + buffer->time_stamp = jiffies; + dma_unmap_len_set(buffer, len, xdpf->len); + dma_unmap_addr_set(buffer, dma, dma); + return 0; +} + +/* This function requires __netif_tx_lock is held by the caller. */ +static int igc_xdp_init_tx_descriptor(struct igc_ring *ring, + struct xdp_frame *xdpf) +{ + struct igc_tx_buffer *buffer; + union igc_adv_tx_desc *desc; + u32 cmd_type, olinfo_status; + int err; + + if (!igc_desc_unused(ring)) + return -EBUSY; + + buffer = &ring->tx_buffer_info[ring->next_to_use]; + err = igc_xdp_init_tx_buffer(buffer, xdpf, ring); + if (err) + return err; + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | + buffer->bytecount; + olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + + desc = IGC_TX_DESC(ring, ring->next_to_use); + desc->read.cmd_type_len = cpu_to_le32(cmd_type); + desc->read.olinfo_status = cpu_to_le32(olinfo_status); + desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma)); + + netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount); + + buffer->next_to_watch = desc; + + ring->next_to_use++; + if (ring->next_to_use == ring->count) + ring->next_to_use = 0; + + return 0; +} + +static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter, + int cpu) +{ + int index = cpu; + + if (unlikely(index < 0)) + index = 0; + + while (index >= adapter->num_tx_queues) + index -= adapter->num_tx_queues; + + return adapter->tx_ring[index]; +} + +static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp) +{ + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + int res; + + if (unlikely(!xdpf)) + return -EFAULT; + + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + res = igc_xdp_init_tx_descriptor(ring, xdpf); + __netif_tx_unlock(nq); + return res; +} + +static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, + struct xdp_buff *xdp) +{ + struct bpf_prog *prog; + int res; + u32 act; + + rcu_read_lock(); + + prog = READ_ONCE(adapter->xdp_prog); + if (!prog) { + res = IGC_XDP_PASS; + goto unlock; + } + + act = bpf_prog_run_xdp(prog, xdp); + switch (act) { + case XDP_PASS: + res = IGC_XDP_PASS; + break; + case XDP_TX: + if (igc_xdp_xmit_back(adapter, xdp) < 0) + res = IGC_XDP_CONSUMED; + else + res = IGC_XDP_TX; + break; + case XDP_REDIRECT: + if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) + res = IGC_XDP_CONSUMED; + else + res = IGC_XDP_REDIRECT; + break; + default: + bpf_warn_invalid_xdp_action(act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(adapter->netdev, prog, act); + fallthrough; + case XDP_DROP: + res = IGC_XDP_CONSUMED; + break; + } + +unlock: + rcu_read_unlock(); + return ERR_PTR(-res); +} + +/* This function assumes __netif_tx_lock is held by the caller. */ +static void igc_flush_tx_descriptors(struct igc_ring *ring) +{ + /* Once tail pointer is updated, hardware can fetch the descriptors + * any time so we issue a write membar here to ensure all memory + * writes are complete before the tail pointer is updated. + */ + wmb(); + writel(ring->next_to_use, ring->tail); +} + +static void igc_finalize_xdp(struct igc_adapter *adapter, int status) +{ + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + + if (status & IGC_XDP_TX) { + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + igc_flush_tx_descriptors(ring); + __netif_tx_unlock(nq); + } + + if (status & IGC_XDP_REDIRECT) + xdp_do_flush(); +} + static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) { unsigned int total_bytes = 0, total_packets = 0; + struct igc_adapter *adapter = q_vector->adapter; struct igc_ring *rx_ring = q_vector->rx.ring; struct sk_buff *skb = rx_ring->skb; u16 cleaned_count = igc_desc_unused(rx_ring); + int xdp_status = 0, rx_buffer_pgcnt; while (likely(total_packets < budget)) { union igc_adv_rx_desc *rx_desc; struct igc_rx_buffer *rx_buffer; - unsigned int size; + unsigned int size, truesize; + ktime_t timestamp = 0; + struct xdp_buff xdp; + int pkt_offset = 0; + void *pktbuf; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IGC_RX_BUFFER_WRITE) { @@ -1908,16 +2138,52 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) */ dma_rmb(); - rx_buffer = igc_get_rx_buffer(rx_ring, size); + rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt); + truesize = igc_get_rx_frame_truesize(rx_ring, size); + + pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; - /* retrieve a buffer from the ring */ - if (skb) + if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) { + timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, + pktbuf); + pkt_offset = IGC_TS_HDR_LEN; + size -= IGC_TS_HDR_LEN; + } + + if (!skb) { + xdp.data = pktbuf + pkt_offset; + xdp.data_end = xdp.data + size; + xdp.data_hard_start = pktbuf - igc_rx_offset(rx_ring); + xdp_set_data_meta_invalid(&xdp); + xdp.frame_sz = truesize; + xdp.rxq = &rx_ring->xdp_rxq; + + skb = igc_xdp_run_prog(adapter, &xdp); + } + + if (IS_ERR(skb)) { + unsigned int xdp_res = -PTR_ERR(skb); + + switch (xdp_res) { + case IGC_XDP_CONSUMED: + rx_buffer->pagecnt_bias++; + break; + case IGC_XDP_TX: + case IGC_XDP_REDIRECT: + igc_rx_buffer_flip(rx_buffer, truesize); + xdp_status |= xdp_res; + break; + } + + total_packets++; + total_bytes += size; + } else if (skb) igc_add_rx_frag(rx_ring, rx_buffer, skb, size); else if (ring_uses_build_skb(rx_ring)) skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size); else - skb = igc_construct_skb(rx_ring, rx_buffer, - rx_desc, size); + skb = igc_construct_skb(rx_ring, rx_buffer, &xdp, + timestamp); /* exit if we failed to retrieve a buffer */ if (!skb) { @@ -1926,7 +2192,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) break; } - igc_put_rx_buffer(rx_ring, rx_buffer); + igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt); cleaned_count++; /* fetch next buffer in frame if non-eop */ @@ -1954,6 +2220,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) total_packets++; } + if (xdp_status) + igc_finalize_xdp(adapter, xdp_status); + /* place incomplete frames back on ring for completion */ rx_ring->skb = skb; @@ -2015,8 +2284,10 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs; - /* free the skb */ - napi_consume_skb(tx_buffer->skb, napi_budget); + if (tx_buffer->tx_flags & IGC_TX_FLAGS_XDP) + xdp_return_frame(tx_buffer->xdpf); + else + napi_consume_skb(tx_buffer->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -3580,7 +3851,7 @@ void igc_up(struct igc_adapter *adapter) netif_tx_start_all_queues(adapter->netdev); /* start the watchdog. */ - hw->mac.get_link_status = 1; + hw->mac.get_link_status = true; schedule_work(&adapter->watchdog_task); } @@ -3858,6 +4129,11 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu) int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; struct igc_adapter *adapter = netdev_priv(netdev); + if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) { + netdev_dbg(netdev, "Jumbo frames not supported with XDP"); + return -EINVAL; + } + /* adjust max frame to be at least the size of a standard frame */ if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; @@ -3974,9 +4250,20 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev, static void igc_tsync_interrupt(struct igc_adapter *adapter) { + u32 ack, tsauxc, sec, nsec, tsicr; struct igc_hw *hw = &adapter->hw; - u32 tsicr = rd32(IGC_TSICR); - u32 ack = 0; + struct ptp_clock_event event; + struct timespec64 ts; + + tsicr = rd32(IGC_TSICR); + ack = 0; + + if (tsicr & IGC_TSICR_SYS_WRAP) { + event.type = PTP_CLOCK_PPS; + if (adapter->ptp_caps.pps) + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_SYS_WRAP; + } if (tsicr & IGC_TSICR_TXTS) { /* retrieve hardware timestamp */ @@ -3984,6 +4271,54 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter) ack |= IGC_TSICR_TXTS; } + if (tsicr & IGC_TSICR_TT0) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[0].start, + adapter->perout[0].period); + wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec); + tsauxc = rd32(IGC_TSAUXC); + tsauxc |= IGC_TSAUXC_EN_TT0; + wr32(IGC_TSAUXC, tsauxc); + adapter->perout[0].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= IGC_TSICR_TT0; + } + + if (tsicr & IGC_TSICR_TT1) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[1].start, + adapter->perout[1].period); + wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec); + tsauxc = rd32(IGC_TSAUXC); + tsauxc |= IGC_TSAUXC_EN_TT1; + wr32(IGC_TSAUXC, tsauxc); + adapter->perout[1].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= IGC_TSICR_TT1; + } + + if (tsicr & IGC_TSICR_AUTT0) { + nsec = rd32(IGC_AUXSTMPL0); + sec = rd32(IGC_AUXSTMPH0); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = sec * NSEC_PER_SEC + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_AUTT0; + } + + if (tsicr & IGC_TSICR_AUTT1) { + nsec = rd32(IGC_AUXSTMPL1); + sec = rd32(IGC_AUXSTMPH1); + event.type = PTP_CLOCK_EXTTS; + event.index = 1; + event.timestamp = sec * NSEC_PER_SEC + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_AUTT1; + } + /* acknowledge the interrupts */ wr32(IGC_TSICR, ack); } @@ -4009,7 +4344,7 @@ static irqreturn_t igc_msix_other(int irq, void *data) } if (icr & IGC_ICR_LSC) { - hw->mac.get_link_status = 1; + hw->mac.get_link_status = true; /* guard against interrupt when we're going down */ if (!test_bit(__IGC_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); @@ -4387,7 +4722,7 @@ static irqreturn_t igc_intr_msi(int irq, void *data) } if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { - hw->mac.get_link_status = 1; + hw->mac.get_link_status = true; if (!test_bit(__IGC_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); } @@ -4429,7 +4764,7 @@ static irqreturn_t igc_intr(int irq, void *data) } if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { - hw->mac.get_link_status = 1; + hw->mac.get_link_status = true; /* guard against interrupt when we're going down */ if (!test_bit(__IGC_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); @@ -4583,7 +4918,7 @@ static int __igc_open(struct net_device *netdev, bool resuming) netif_tx_start_all_queues(netdev); /* start the watchdog. */ - hw->mac.get_link_status = 1; + hw->mac.get_link_status = true; schedule_work(&adapter->watchdog_task); return IGC_SUCCESS; @@ -4844,6 +5179,58 @@ static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, } } +static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (bpf->command) { + case XDP_SETUP_PROG: + return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack); + default: + return -EOPNOTSUPP; + } +} + +static int igc_xdp_xmit(struct net_device *dev, int num_frames, + struct xdp_frame **frames, u32 flags) +{ + struct igc_adapter *adapter = netdev_priv(dev); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + int i, drops; + + if (unlikely(test_bit(__IGC_DOWN, &adapter->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + + drops = 0; + for (i = 0; i < num_frames; i++) { + int err; + struct xdp_frame *xdpf = frames[i]; + + err = igc_xdp_init_tx_descriptor(ring, xdpf); + if (err) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + if (flags & XDP_XMIT_FLUSH) + igc_flush_tx_descriptors(ring); + + __netif_tx_unlock(nq); + + return num_frames - drops; +} + static const struct net_device_ops igc_netdev_ops = { .ndo_open = igc_open, .ndo_stop = igc_close, @@ -4857,6 +5244,8 @@ static const struct net_device_ops igc_netdev_ops = { .ndo_features_check = igc_features_check, .ndo_do_ioctl = igc_ioctl, .ndo_setup_tc = igc_setup_tc, + .ndo_bpf = igc_bpf, + .ndo_xdp_xmit = igc_xdp_xmit, }; /* PCIe configuration access */ @@ -4924,7 +5313,7 @@ int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx) { struct igc_mac_info *mac = &adapter->hw.mac; - mac->autoneg = 0; + mac->autoneg = false; /* Make sure dplx is at most 1 bit and lsb of speed is not set * for the switch() below to work @@ -4946,13 +5335,13 @@ int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx) mac->forced_speed_duplex = ADVERTISE_100_FULL; break; case SPEED_1000 + DUPLEX_FULL: - mac->autoneg = 1; + mac->autoneg = true; adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; break; case SPEED_1000 + DUPLEX_HALF: /* not supported */ goto err_inval; case SPEED_2500 + DUPLEX_FULL: - mac->autoneg = 1; + mac->autoneg = true; adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; break; case SPEED_2500 + DUPLEX_HALF: /* not supported */ diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 545f4d0e67cf..69617d2c1be2 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -120,12 +120,289 @@ static int igc_ptp_settime_i225(struct ptp_clock_info *ptp, return 0; } +static void igc_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext) +{ + u32 *ptr = pin < 2 ? ctrl : ctrl_ext; + static const u32 mask[IGC_N_SDP] = { + IGC_CTRL_SDP0_DIR, + IGC_CTRL_SDP1_DIR, + IGC_CTRL_EXT_SDP2_DIR, + IGC_CTRL_EXT_SDP3_DIR, + }; + + if (input) + *ptr &= ~mask[pin]; + else + *ptr |= mask[pin]; +} + +static void igc_pin_perout(struct igc_adapter *igc, int chan, int pin, int freq) +{ + static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = { + IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3, + }; + static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = { + IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3, + }; + static const u32 igc_ts_sdp_en[IGC_N_SDP] = { + IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN, + }; + static const u32 igc_ts_sdp_sel_tt0[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_TT0, IGC_TS_SDP1_SEL_TT0, + IGC_TS_SDP2_SEL_TT0, IGC_TS_SDP3_SEL_TT0, + }; + static const u32 igc_ts_sdp_sel_tt1[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_TT1, IGC_TS_SDP1_SEL_TT1, + IGC_TS_SDP2_SEL_TT1, IGC_TS_SDP3_SEL_TT1, + }; + static const u32 igc_ts_sdp_sel_fc0[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC0, IGC_TS_SDP1_SEL_FC0, + IGC_TS_SDP2_SEL_FC0, IGC_TS_SDP3_SEL_FC0, + }; + static const u32 igc_ts_sdp_sel_fc1[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1, + IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1, + }; + static const u32 igc_ts_sdp_sel_clr[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1, + IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1, + }; + struct igc_hw *hw = &igc->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(IGC_CTRL); + ctrl_ext = rd32(IGC_CTRL_EXT); + tssdp = rd32(IGC_TSSDP); + + igc_pin_direction(pin, 0, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an input. */ + if ((tssdp & IGC_AUX0_SEL_SDP3) == igc_aux0_sel_sdp[pin]) + tssdp &= ~IGC_AUX0_TS_SDP_EN; + + if ((tssdp & IGC_AUX1_SEL_SDP3) == igc_aux1_sel_sdp[pin]) + tssdp &= ~IGC_AUX1_TS_SDP_EN; + + tssdp &= ~igc_ts_sdp_sel_clr[pin]; + if (freq) { + if (chan == 1) + tssdp |= igc_ts_sdp_sel_fc1[pin]; + else + tssdp |= igc_ts_sdp_sel_fc0[pin]; + } else { + if (chan == 1) + tssdp |= igc_ts_sdp_sel_tt1[pin]; + else + tssdp |= igc_ts_sdp_sel_tt0[pin]; + } + tssdp |= igc_ts_sdp_en[pin]; + + wr32(IGC_TSSDP, tssdp); + wr32(IGC_CTRL, ctrl); + wr32(IGC_CTRL_EXT, ctrl_ext); +} + +static void igc_pin_extts(struct igc_adapter *igc, int chan, int pin) +{ + static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = { + IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3, + }; + static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = { + IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3, + }; + static const u32 igc_ts_sdp_en[IGC_N_SDP] = { + IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN, + }; + struct igc_hw *hw = &igc->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(IGC_CTRL); + ctrl_ext = rd32(IGC_CTRL_EXT); + tssdp = rd32(IGC_TSSDP); + + igc_pin_direction(pin, 1, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an output. */ + tssdp &= ~igc_ts_sdp_en[pin]; + + if (chan == 1) { + tssdp &= ~IGC_AUX1_SEL_SDP3; + tssdp |= igc_aux1_sel_sdp[pin] | IGC_AUX1_TS_SDP_EN; + } else { + tssdp &= ~IGC_AUX0_SEL_SDP3; + tssdp |= igc_aux0_sel_sdp[pin] | IGC_AUX0_TS_SDP_EN; + } + + wr32(IGC_TSSDP, tssdp); + wr32(IGC_CTRL, ctrl); + wr32(IGC_CTRL_EXT, ctrl_ext); +} + static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { + struct igc_adapter *igc = + container_of(ptp, struct igc_adapter, ptp_caps); + struct igc_hw *hw = &igc->hw; + unsigned long flags; + struct timespec64 ts; + int use_freq = 0, pin = -1; + u32 tsim, tsauxc, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout; + s64 ns; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + /* Reject requests failing to enable both edges. */ + if ((rq->extts.flags & PTP_STRICT_FLAGS) && + (rq->extts.flags & PTP_ENABLE_FEATURE) && + (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igc->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (pin < 0) + return -EBUSY; + } + if (rq->extts.index == 1) { + tsauxc_mask = IGC_TSAUXC_EN_TS1; + tsim_mask = IGC_TSICR_AUTT1; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TS0; + tsim_mask = IGC_TSICR_AUTT0; + } + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsauxc = rd32(IGC_TSAUXC); + tsim = rd32(IGC_TSIM); + if (on) { + igc_pin_extts(igc, rq->extts.index, pin); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } else { + tsauxc &= ~tsauxc_mask; + tsim &= ~tsim_mask; + } + wr32(IGC_TSAUXC, tsauxc); + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PEROUT: + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igc->ptp_clock, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + } + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + ns = ns >> 1; + if (on && (ns <= 70000000LL || ns == 125000000LL || + ns == 250000000LL || ns == 500000000LL)) { + if (ns < 8LL) + return -EINVAL; + use_freq = 1; + } + ts = ns_to_timespec64(ns); + if (rq->perout.index == 1) { + if (use_freq) { + tsauxc_mask = IGC_TSAUXC_EN_CLK1; + tsim_mask = 0; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TT1; + tsim_mask = IGC_TSICR_TT1; + } + trgttiml = IGC_TRGTTIML1; + trgttimh = IGC_TRGTTIMH1; + freqout = IGC_FREQOUT1; + } else { + if (use_freq) { + tsauxc_mask = IGC_TSAUXC_EN_CLK0; + tsim_mask = 0; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TT0; + tsim_mask = IGC_TSICR_TT0; + } + trgttiml = IGC_TRGTTIML0; + trgttimh = IGC_TRGTTIMH0; + freqout = IGC_FREQOUT0; + } + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsauxc = rd32(IGC_TSAUXC); + tsim = rd32(IGC_TSIM); + if (rq->perout.index == 1) { + tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1); + tsim &= ~IGC_TSICR_TT1; + } else { + tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0); + tsim &= ~IGC_TSICR_TT0; + } + if (on) { + int i = rq->perout.index; + + igc_pin_perout(igc, i, pin, use_freq); + igc->perout[i].start.tv_sec = rq->perout.start.sec; + igc->perout[i].start.tv_nsec = rq->perout.start.nsec; + igc->perout[i].period.tv_sec = ts.tv_sec; + igc->perout[i].period.tv_nsec = ts.tv_nsec; + wr32(trgttimh, rq->perout.start.sec); + /* For now, always select timer 0 as source. */ + wr32(trgttiml, rq->perout.start.nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + if (use_freq) + wr32(freqout, ns); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } + wr32(IGC_TSAUXC, tsauxc); + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PPS: + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsim = rd32(IGC_TSIM); + if (on) + tsim |= IGC_TSICR_SYS_WRAP; + else + tsim &= ~IGC_TSICR_SYS_WRAP; + igc->pps_sys_wrap_on = on; + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + default: + break; + } + return -EOPNOTSUPP; } +static int igc_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + case PTP_PF_PEROUT: + break; + case PTP_PF_PHYSYNC: + return -1; + } + return 0; +} + /** * igc_ptp_systim_to_hwtstamp - convert system time value to HW timestamp * @adapter: board private structure @@ -153,20 +430,20 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, /** * igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer - * @q_vector: Pointer to interrupt specific structure - * @va: Pointer to address containing Rx buffer - * @skb: Buffer containing timestamp and packet + * @adapter: Pointer to adapter the packet buffer belongs to + * @buf: Pointer to packet buffer * * This function retrieves the timestamp saved in the beginning of packet * buffer. While two timestamps are available, one in timer0 reference and the * other in timer1 reference, this function considers only the timestamp in * timer0 reference. + * + * Returns timestamp value. */ -void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va, - struct sk_buff *skb) +ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf) { - struct igc_adapter *adapter = q_vector->adapter; - u64 regval; + ktime_t timestamp; + u32 secs, nsecs; int adjust; /* Timestamps are saved in little endian at the beginning of the packet @@ -178,9 +455,10 @@ void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va, * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds * part of the timestamp. */ - regval = le32_to_cpu(va[2]); - regval |= (u64)le32_to_cpu(va[3]) << 32; - igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); + nsecs = le32_to_cpu(buf[2]); + secs = le32_to_cpu(buf[3]); + + timestamp = ktime_set(secs, nsecs); /* Adjust timestamp for the RX latency based on link speed */ switch (adapter->link_speed) { @@ -201,8 +479,8 @@ void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va, netdev_warn_once(adapter->netdev, "Imprecise timestamp\n"); break; } - skb_hwtstamps(skb)->hwtstamp = - ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); + + return ktime_sub_ns(timestamp, adjust); } static void igc_ptp_disable_rx_timestamp(struct igc_adapter *adapter) @@ -485,9 +763,17 @@ void igc_ptp_init(struct igc_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct igc_hw *hw = &adapter->hw; + int i; switch (hw->mac.type) { case igc_i225: + for (i = 0; i < IGC_N_SDP; i++) { + struct ptp_pin_desc *ppd = &adapter->sdp_config[i]; + + snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i); + ppd->index = i; + ppd->func = PTP_PF_NONE; + } snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 62499999; @@ -496,6 +782,12 @@ void igc_ptp_init(struct igc_adapter *adapter) adapter->ptp_caps.gettimex64 = igc_ptp_gettimex64_i225; adapter->ptp_caps.settime64 = igc_ptp_settime_i225; adapter->ptp_caps.enable = igc_ptp_feature_enable_i225; + adapter->ptp_caps.pps = 1; + adapter->ptp_caps.pin_config = adapter->sdp_config; + adapter->ptp_caps.n_ext_ts = IGC_N_EXTTS; + adapter->ptp_caps.n_per_out = IGC_N_PEROUT; + adapter->ptp_caps.n_pins = IGC_N_SDP; + adapter->ptp_caps.verify = igc_ptp_verify_pin; break; default: adapter->ptp_clock = NULL; @@ -597,7 +889,9 @@ void igc_ptp_reset(struct igc_adapter *adapter) case igc_i225: wr32(IGC_TSAUXC, 0x0); wr32(IGC_TSSDP, 0x0); - wr32(IGC_TSIM, IGC_TSICR_INTERRUPTS); + wr32(IGC_TSIM, + IGC_TSICR_INTERRUPTS | + (adapter->pps_sys_wrap_on ? IGC_TSICR_SYS_WRAP : 0)); wr32(IGC_IMS, IGC_IMS_TS); break; default: diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index 3e5cb7aef9da..cc174853554b 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -192,6 +192,16 @@ #define IGC_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ #define IGC_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ #define IGC_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ +#define IGC_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ +#define IGC_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ +#define IGC_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ +#define IGC_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define IGC_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */ +#define IGC_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */ +#define IGC_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ +#define IGC_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ +#define IGC_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ +#define IGC_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */ #define IGC_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ #define IGC_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/ diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c new file mode 100644 index 000000000000..11133c4619bb --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_xdp.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Intel Corporation. */ + +#include "igc.h" +#include "igc_xdp.h" + +int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct net_device *dev = adapter->netdev; + bool if_running = netif_running(dev); + struct bpf_prog *old_prog; + + if (dev->mtu > ETH_DATA_LEN) { + /* For now, the driver doesn't support XDP functionality with + * jumbo frames so we return error. + */ + NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported"); + return -EOPNOTSUPP; + } + + if (if_running) + igc_close(dev); + + old_prog = xchg(&adapter->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + if (if_running) + igc_open(dev); + + return 0; +} + +int igc_xdp_register_rxq_info(struct igc_ring *ring) +{ + struct net_device *dev = ring->netdev; + int err; + + err = xdp_rxq_info_reg(&ring->xdp_rxq, dev, ring->queue_index, 0); + if (err) { + netdev_err(dev, "Failed to register xdp rxq info\n"); + return err; + } + + err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, + NULL); + if (err) { + netdev_err(dev, "Failed to register xdp rxq mem model\n"); + xdp_rxq_info_unreg(&ring->xdp_rxq); + return err; + } + + return 0; +} + +void igc_xdp_unregister_rxq_info(struct igc_ring *ring) +{ + xdp_rxq_info_unreg(&ring->xdp_rxq); +} diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.h b/drivers/net/ethernet/intel/igc/igc_xdp.h new file mode 100644 index 000000000000..cfecb515b718 --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_xdp.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020, Intel Corporation. */ + +#ifndef _IGC_XDP_H_ +#define _IGC_XDP_H_ + +int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, + struct netlink_ext_ack *extack); + +int igc_xdp_register_rxq_info(struct igc_ring *ring); +void igc_xdp_unregister_rxq_info(struct igc_ring *ring); + +#endif /* _IGC_XDP_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 8d3798a32f0e..e324e42fab2d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1351,7 +1351,7 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, } /** - * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter + * ixgbe_fdir_add_signature_filter_82599 - Adds a signature hash filter * @hw: pointer to hardware structure * @input: unique input dword * @common: compressed common input dword @@ -1542,6 +1542,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, switch (input_mask->formatted.vm_pool & 0x7F) { case 0x0: fdirm |= IXGBE_FDIRM_POOL; + break; case 0x7F: break; default: @@ -1557,6 +1558,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, hw_dbg(hw, " Error on src/dst port mask\n"); return IXGBE_ERR_CONFIG; } + break; case IXGBE_ATR_L4TYPE_MASK: break; default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 62ddb452f862..03ccbe6b66d2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -93,6 +93,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) default: break; } + break; default: break; } @@ -2707,7 +2708,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) } /** - * ixgbe_enable_rx_buff - Enables the receive data path + * ixgbe_enable_rx_buff_generic - Enables the receive data path * @hw: pointer to hardware structure * * Enables the receive data path @@ -3029,14 +3030,14 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) } /** + * ixgbe_set_vmdq_san_mac_generic - Associate VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @vmdq: VMDq pool index + * * This function should only be involved in the IOV mode. * In IOV mode, Default pool is next pool after the number of * VFs advertized and not 0. * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] - * - * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address - * @hw: pointer to hardware struct - * @vmdq: VMDq pool index **/ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) { @@ -3896,7 +3897,7 @@ static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg, } /** - * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data + * ixgbe_get_thermal_sensor_data_generic - Gathers thermal sensor data * @hw: pointer to hardware structure * * Returns the thermal sensor data structure @@ -4054,8 +4055,7 @@ void ixgbe_get_orom_version(struct ixgbe_hw *hw, } /** - * ixgbe_get_oem_prod_version Etrack ID from EEPROM - * + * ixgbe_get_oem_prod_version - Etrack ID from EEPROM * @hw: pointer to hardware structure * @nvm_ver: pointer to output structure * diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index c00332d2e02a..72e6ebffea33 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -361,7 +361,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) } #ifdef IXGBE_FCOE - /* Reprogam FCoE hardware offloads when the traffic class + /* Reprogram FCoE hardware offloads when the traffic class * FCoE is using changes. This happens if the APP info * changes or the up2tc mapping is updated. */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index a280aa34ca1d..4ceaca0f6ce3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1368,45 +1368,33 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { - char *p = (char *)data; unsigned int i; + u8 *p = data; switch (stringset) { case ETH_SS_TEST: - for (i = 0; i < IXGBE_TEST_LEN; i++) { - memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; - } + for (i = 0; i < IXGBE_TEST_LEN; i++) + ethtool_sprintf(&p, ixgbe_gstrings_test[i]); break; case ETH_SS_STATS: - for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { - memcpy(p, ixgbe_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) + ethtool_sprintf(&p, + ixgbe_gstrings_stats[i].stat_string); for (i = 0; i < netdev->num_tx_queues; i++) { - sprintf(p, "tx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); } for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) { - sprintf(p, "rx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); } for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { - sprintf(p, "tx_pb_%u_pxon", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_pb_%u_pxoff", i); - p += ETH_GSTRING_LEN; + ethtool_sprintf(&p, "tx_pb_%u_pxon", i); + ethtool_sprintf(&p, "tx_pb_%u_pxoff", i); } for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { - sprintf(p, "rx_pb_%u_pxon", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_pb_%u_pxoff", i); - p += ETH_GSTRING_LEN; + ethtool_sprintf(&p, "rx_pb_%u_pxon", i); + ethtool_sprintf(&p, "rx_pb_%u_pxoff", i); } /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index df389a11d3af..0218f6c9b925 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -132,6 +132,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, else *tx = (tc + 4) << 4; /* 96, 112 */ } + break; default: break; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index cffb95f8f632..c5ec17d19c59 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -225,7 +225,7 @@ static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter) } /** - * ixgbe_check_from_parent - Determine whether PCIe info should come from parent + * ixgbe_pcie_from_parent - Determine whether PCIe info should come from parent * @hw: hw specific details * * This function is used by probe to determine whether a device's PCI-Express @@ -6158,7 +6158,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) } /** - * ixgbe_eee_capable - helper function to determine EEE support on X550 + * ixgbe_set_eee_capable - helper function to determine EEE support on X550 * @adapter: board private structure */ static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter) @@ -10201,7 +10201,7 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n, { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_ring *ring; - int drops = 0; + int nxmit = 0; int i; if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) @@ -10225,16 +10225,15 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n, int err; err = ixgbe_xmit_xdp_ring(adapter, xdpf); - if (err != IXGBE_XDP_TX) { - xdp_return_frame_rx_napi(xdpf); - drops++; - } + if (err != IXGBE_XDP_TX) + break; + nxmit++; } if (unlikely(flags & XDP_XMIT_FLUSH)) ixgbe_xdp_ring_update_tail(ring); - return n - drops; + return nxmit; } static const struct net_device_ops ixgbe_netdev_ops = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index fc389eecdd2b..24aa97f993ca 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -380,6 +380,9 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) case X557_PHY_ID2: phy_type = ixgbe_phy_x550em_ext_t; break; + case BCM54616S_E_PHY_ID: + phy_type = ixgbe_phy_ext_1g_t; + break; default: phy_type = ixgbe_phy_unknown; break; @@ -461,12 +464,13 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) } /** - * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without - * the SWFW lock + * ixgbe_read_phy_reg_mdi - read PHY register * @hw: pointer to hardware structure * @reg_addr: 32 bit address of PHY register to read * @device_type: 5 bit device type * @phy_data: Pointer to read data from PHY register + * + * Reads a value from a specified PHY register without the SWFW lock **/ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 22a874eee2e8..23ddfd79fc8b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -999,6 +999,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, switch (config->tx_type) { case HWTSTAMP_TX_OFF: tsync_tx_ctl = 0; + break; case HWTSTAMP_TX_ON: break; default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 2be1c4c72435..2647937f7f4d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1407,6 +1407,7 @@ struct ixgbe_nvm_version { #define QT2022_PHY_ID 0x0043A400 #define ATH_PHY_ID 0x03429050 #define AQ_FW_REV 0x20 +#define BCM54616S_E_PHY_ID 0x03625D10 /* Special PHY Init Routine */ #define IXGBE_PHY_INIT_OFFSET_NL 0x002B @@ -3383,10 +3384,6 @@ struct ixgbe_hw_stats { /* forward declaration */ struct ixgbe_hw; -/* iterator type for walking multicast address lists */ -typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, - u32 *vmdq); - /* Function pointer table */ struct ixgbe_eeprom_operations { s32 (*init_params)(struct ixgbe_hw *); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 4b93ba149ec5..d5cfb51ff648 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -701,7 +701,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) } /** - * ixgbe_release_nvm_semaphore - Release hardware semaphore + * ixgbe_release_swfw_sync_semaphore - Release hardware semaphore * @hw: pointer to hardware structure * * This function clears hardware semaphore bits. diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 5e339afa682a..9724ffb16518 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1248,7 +1248,7 @@ static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw) } /** - * ixgbe_fw_recovery_mode - Check FW NVM recovery mode + * ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode * @hw: pointer t hardware structure * * Returns true if in FW NVM recovery mode. diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index 3771857cf887..91ad5b902673 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -104,6 +104,13 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, xdp_prog = READ_ONCE(rx_ring->xdp_prog); act = bpf_prog_run_xdp(xdp_prog, xdp); + if (likely(act == XDP_REDIRECT)) { + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); + result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED; + rcu_read_unlock(); + return result; + } + switch (act) { case XDP_PASS: break; @@ -115,10 +122,6 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, } result = ixgbe_xmit_xdp_ring(adapter, xdpf); break; - case XDP_REDIRECT: - err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED; - break; default: bpf_warn_invalid_xdp_action(act); fallthrough; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 449d7d5b280d..ba2ed8a43d2d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -2633,6 +2633,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) adapter->num_rx_queues = rss; adapter->num_tx_queues = rss; adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0; + break; default: break; } diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index bfe6dfcec4ab..5fc347abab3c 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -121,9 +121,11 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) } /** + * ixgbevf_hv_reset_hw_vf - reset via Hyper-V + * @hw: pointer to private hardware struct + * * Hyper-V variant; the VF/PF communication is through the PCI * config space. - * @hw: pointer to private hardware struct */ static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw) { @@ -513,9 +515,11 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, } /** - * Hyper-V variant - just a stub. + * ixgbevf_hv_update_mc_addr_list_vf - stub * @hw: unused * @netdev: unused + * + * Hyper-V variant - just a stub. */ static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, struct net_device *netdev) @@ -564,9 +568,11 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) } /** - * Hyper-V variant - just a stub. + * ixgbevf_hv_update_xcast_mode - stub * @hw: unused * @xcast_mode: unused + * + * Hyper-V variant - just a stub. */ static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) { @@ -608,7 +614,7 @@ mbx_err: } /** - * Hyper-V variant - just a stub. + * ixgbevf_hv_set_vfta_vf - * Hyper-V variant - just a stub. * @hw: unused * @vlan: unused * @vind: unused @@ -726,11 +732,13 @@ out: } /** - * Hyper-V variant; there is no mailbox communication. + * ixgbevf_hv_check_mac_link_vf - check link * @hw: pointer to private hardware struct * @speed: pointer to link speed * @link_up: true is link is up, false otherwise * @autoneg_wait_to_complete: unused + * + * Hyper-V variant; there is no mailbox communication. */ static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index d1e9e306653b..1d8209df4162 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -16,9 +16,6 @@ struct ixgbe_hw; -/* iterator type for walking multicast address lists */ -typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, - u32 *vmdq); struct ixgbe_mac_operations { s32 (*init_hw)(struct ixgbe_hw *); s32 (*reset_hw)(struct ixgbe_hw *); diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 925161959b9b..6f987a7ffcb3 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -41,7 +41,10 @@ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/ioport.h> +#include <linux/iopoll.h> #include <linux/in.h> +#include <linux/of_device.h> +#include <linux/of_net.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/delay.h> @@ -54,21 +57,246 @@ #include <linux/ethtool.h> #include <linux/crc32.h> #include <linux/pgtable.h> - -#include <asm/bootinfo.h> -#include <asm/bitops.h> -#include <asm/io.h> -#include <asm/dma.h> - -#include <asm/mach-rc32434/rb.h> -#include <asm/mach-rc32434/rc32434.h> -#include <asm/mach-rc32434/eth.h> -#include <asm/mach-rc32434/dma_v.h> +#include <linux/clk.h> #define DRV_NAME "korina" #define DRV_VERSION "0.20" #define DRV_RELDATE "15Sep2017" +struct eth_regs { + u32 ethintfc; + u32 ethfifott; + u32 etharc; + u32 ethhash0; + u32 ethhash1; + u32 ethu0[4]; /* Reserved. */ + u32 ethpfs; + u32 ethmcp; + u32 eth_u1[10]; /* Reserved. */ + u32 ethspare; + u32 eth_u2[42]; /* Reserved. */ + u32 ethsal0; + u32 ethsah0; + u32 ethsal1; + u32 ethsah1; + u32 ethsal2; + u32 ethsah2; + u32 ethsal3; + u32 ethsah3; + u32 ethrbc; + u32 ethrpc; + u32 ethrupc; + u32 ethrfc; + u32 ethtbc; + u32 ethgpf; + u32 eth_u9[50]; /* Reserved. */ + u32 ethmac1; + u32 ethmac2; + u32 ethipgt; + u32 ethipgr; + u32 ethclrt; + u32 ethmaxf; + u32 eth_u10; /* Reserved. */ + u32 ethmtest; + u32 miimcfg; + u32 miimcmd; + u32 miimaddr; + u32 miimwtd; + u32 miimrdd; + u32 miimind; + u32 eth_u11; /* Reserved. */ + u32 eth_u12; /* Reserved. */ + u32 ethcfsa0; + u32 ethcfsa1; + u32 ethcfsa2; +}; + +/* Ethernet interrupt registers */ +#define ETH_INT_FC_EN BIT(0) +#define ETH_INT_FC_ITS BIT(1) +#define ETH_INT_FC_RIP BIT(2) +#define ETH_INT_FC_JAM BIT(3) +#define ETH_INT_FC_OVR BIT(4) +#define ETH_INT_FC_UND BIT(5) +#define ETH_INT_FC_IOC 0x000000c0 + +/* Ethernet FIFO registers */ +#define ETH_FIFI_TT_TTH_BIT 0 +#define ETH_FIFO_TT_TTH 0x0000007f + +/* Ethernet ARC/multicast registers */ +#define ETH_ARC_PRO BIT(0) +#define ETH_ARC_AM BIT(1) +#define ETH_ARC_AFM BIT(2) +#define ETH_ARC_AB BIT(3) + +/* Ethernet SAL registers */ +#define ETH_SAL_BYTE_5 0x000000ff +#define ETH_SAL_BYTE_4 0x0000ff00 +#define ETH_SAL_BYTE_3 0x00ff0000 +#define ETH_SAL_BYTE_2 0xff000000 + +/* Ethernet SAH registers */ +#define ETH_SAH_BYTE1 0x000000ff +#define ETH_SAH_BYTE0 0x0000ff00 + +/* Ethernet GPF register */ +#define ETH_GPF_PTV 0x0000ffff + +/* Ethernet PFG register */ +#define ETH_PFS_PFD BIT(0) + +/* Ethernet CFSA[0-3] registers */ +#define ETH_CFSA0_CFSA4 0x000000ff +#define ETH_CFSA0_CFSA5 0x0000ff00 +#define ETH_CFSA1_CFSA2 0x000000ff +#define ETH_CFSA1_CFSA3 0x0000ff00 +#define ETH_CFSA1_CFSA0 0x000000ff +#define ETH_CFSA1_CFSA1 0x0000ff00 + +/* Ethernet MAC1 registers */ +#define ETH_MAC1_RE BIT(0) +#define ETH_MAC1_PAF BIT(1) +#define ETH_MAC1_RFC BIT(2) +#define ETH_MAC1_TFC BIT(3) +#define ETH_MAC1_LB BIT(4) +#define ETH_MAC1_MR BIT(31) + +/* Ethernet MAC2 registers */ +#define ETH_MAC2_FD BIT(0) +#define ETH_MAC2_FLC BIT(1) +#define ETH_MAC2_HFE BIT(2) +#define ETH_MAC2_DC BIT(3) +#define ETH_MAC2_CEN BIT(4) +#define ETH_MAC2_PE BIT(5) +#define ETH_MAC2_VPE BIT(6) +#define ETH_MAC2_APE BIT(7) +#define ETH_MAC2_PPE BIT(8) +#define ETH_MAC2_LPE BIT(9) +#define ETH_MAC2_NB BIT(12) +#define ETH_MAC2_BP BIT(13) +#define ETH_MAC2_ED BIT(14) + +/* Ethernet IPGT register */ +#define ETH_IPGT 0x0000007f + +/* Ethernet IPGR registers */ +#define ETH_IPGR_IPGR2 0x0000007f +#define ETH_IPGR_IPGR1 0x00007f00 + +/* Ethernet CLRT registers */ +#define ETH_CLRT_MAX_RET 0x0000000f +#define ETH_CLRT_COL_WIN 0x00003f00 + +/* Ethernet MAXF register */ +#define ETH_MAXF 0x0000ffff + +/* Ethernet test registers */ +#define ETH_TEST_REG BIT(2) +#define ETH_MCP_DIV 0x000000ff + +/* MII registers */ +#define ETH_MII_CFG_RSVD 0x0000000c +#define ETH_MII_CMD_RD BIT(0) +#define ETH_MII_CMD_SCN BIT(1) +#define ETH_MII_REG_ADDR 0x0000001f +#define ETH_MII_PHY_ADDR 0x00001f00 +#define ETH_MII_WTD_DATA 0x0000ffff +#define ETH_MII_RDD_DATA 0x0000ffff +#define ETH_MII_IND_BSY BIT(0) +#define ETH_MII_IND_SCN BIT(1) +#define ETH_MII_IND_NV BIT(2) + +/* Values for the DEVCS field of the Ethernet DMA Rx and Tx descriptors. */ +#define ETH_RX_FD BIT(0) +#define ETH_RX_LD BIT(1) +#define ETH_RX_ROK BIT(2) +#define ETH_RX_FM BIT(3) +#define ETH_RX_MP BIT(4) +#define ETH_RX_BP BIT(5) +#define ETH_RX_VLT BIT(6) +#define ETH_RX_CF BIT(7) +#define ETH_RX_OVR BIT(8) +#define ETH_RX_CRC BIT(9) +#define ETH_RX_CV BIT(10) +#define ETH_RX_DB BIT(11) +#define ETH_RX_LE BIT(12) +#define ETH_RX_LOR BIT(13) +#define ETH_RX_CES BIT(14) +#define ETH_RX_LEN_BIT 16 +#define ETH_RX_LEN 0xffff0000 + +#define ETH_TX_FD BIT(0) +#define ETH_TX_LD BIT(1) +#define ETH_TX_OEN BIT(2) +#define ETH_TX_PEN BIT(3) +#define ETH_TX_CEN BIT(4) +#define ETH_TX_HEN BIT(5) +#define ETH_TX_TOK BIT(6) +#define ETH_TX_MP BIT(7) +#define ETH_TX_BP BIT(8) +#define ETH_TX_UND BIT(9) +#define ETH_TX_OF BIT(10) +#define ETH_TX_ED BIT(11) +#define ETH_TX_EC BIT(12) +#define ETH_TX_LC BIT(13) +#define ETH_TX_TD BIT(14) +#define ETH_TX_CRC BIT(15) +#define ETH_TX_LE BIT(16) +#define ETH_TX_CC 0x001E0000 + +/* DMA descriptor (in physical memory). */ +struct dma_desc { + u32 control; /* Control. use DMAD_* */ + u32 ca; /* Current Address. */ + u32 devcs; /* Device control and status. */ + u32 link; /* Next descriptor in chain. */ +}; + +#define DMA_DESC_COUNT_BIT 0 +#define DMA_DESC_COUNT_MSK 0x0003ffff +#define DMA_DESC_DS_BIT 20 +#define DMA_DESC_DS_MSK 0x00300000 + +#define DMA_DESC_DEV_CMD_BIT 22 +#define DMA_DESC_DEV_CMD_MSK 0x01c00000 + +/* DMA descriptors interrupts */ +#define DMA_DESC_COF BIT(25) /* Chain on finished */ +#define DMA_DESC_COD BIT(26) /* Chain on done */ +#define DMA_DESC_IOF BIT(27) /* Interrupt on finished */ +#define DMA_DESC_IOD BIT(28) /* Interrupt on done */ +#define DMA_DESC_TERM BIT(29) /* Terminated */ +#define DMA_DESC_DONE BIT(30) /* Done */ +#define DMA_DESC_FINI BIT(31) /* Finished */ + +/* DMA register (within Internal Register Map). */ +struct dma_reg { + u32 dmac; /* Control. */ + u32 dmas; /* Status. */ + u32 dmasm; /* Mask. */ + u32 dmadptr; /* Descriptor pointer. */ + u32 dmandptr; /* Next descriptor pointer. */ +}; + +/* DMA channels specific registers */ +#define DMA_CHAN_RUN_BIT BIT(0) +#define DMA_CHAN_DONE_BIT BIT(1) +#define DMA_CHAN_MODE_BIT BIT(2) +#define DMA_CHAN_MODE_MSK 0x0000000c +#define DMA_CHAN_MODE_AUTO 0 +#define DMA_CHAN_MODE_BURST 1 +#define DMA_CHAN_MODE_XFRT 2 +#define DMA_CHAN_MODE_RSVD 3 +#define DMA_CHAN_ACT_BIT BIT(4) + +/* DMA status registers */ +#define DMA_STAT_FINI BIT(0) +#define DMA_STAT_DONE BIT(1) +#define DMA_STAT_CHAIN BIT(2) +#define DMA_STAT_ERR BIT(3) +#define DMA_STAT_HALT BIT(4) + #define STATION_ADDRESS_HIGH(dev) (((dev)->dev_addr[0] << 8) | \ ((dev)->dev_addr[1])) #define STATION_ADDRESS_LOW(dev) (((dev)->dev_addr[2] << 24) | \ @@ -95,24 +323,30 @@ enum chain_status { desc_filled, - desc_empty + desc_is_empty }; +#define DMA_COUNT(count) ((count) & DMA_DESC_COUNT_MSK) #define IS_DMA_FINISHED(X) (((X) & (DMA_DESC_FINI)) != 0) #define IS_DMA_DONE(X) (((X) & (DMA_DESC_DONE)) != 0) #define RCVPKT_LENGTH(X) (((X) & ETH_RX_LEN) >> ETH_RX_LEN_BIT) /* Information that need to be kept for each board. */ struct korina_private { - struct eth_regs *eth_regs; - struct dma_reg *rx_dma_regs; - struct dma_reg *tx_dma_regs; + struct eth_regs __iomem *eth_regs; + struct dma_reg __iomem *rx_dma_regs; + struct dma_reg __iomem *tx_dma_regs; struct dma_desc *td_ring; /* transmit descriptor ring */ struct dma_desc *rd_ring; /* receive descriptor ring */ + dma_addr_t td_dma; + dma_addr_t rd_dma; struct sk_buff *tx_skb[KORINA_NUM_TDS]; struct sk_buff *rx_skb[KORINA_NUM_RDS]; + dma_addr_t rx_skb_dma[KORINA_NUM_RDS]; + dma_addr_t tx_skb_dma[KORINA_NUM_TDS]; + int rx_next_done; int rx_chain_head; int rx_chain_tail; @@ -137,15 +371,18 @@ struct korina_private { struct mii_if_info mii_if; struct work_struct restart_task; struct net_device *dev; - int phy_addr; + struct device *dmadev; + int mii_clock_freq; }; -extern unsigned int idt_cpu_freq; +static dma_addr_t korina_tx_dma(struct korina_private *lp, int idx) +{ + return lp->td_dma + (idx * sizeof(struct dma_desc)); +} -static inline void korina_start_dma(struct dma_reg *ch, u32 dma_addr) +static dma_addr_t korina_rx_dma(struct korina_private *lp, int idx) { - writel(0, &ch->dmandptr); - writel(dma_addr, &ch->dmadptr); + return lp->rd_dma + (idx * sizeof(struct dma_desc)); } static inline void korina_abort_dma(struct net_device *dev, @@ -164,11 +401,6 @@ static inline void korina_abort_dma(struct net_device *dev, writel(0, &ch->dmandptr); } -static inline void korina_chain_dma(struct dma_reg *ch, u32 dma_addr) -{ - writel(dma_addr, &ch->dmandptr); -} - static void korina_abort_tx(struct net_device *dev) { struct korina_private *lp = netdev_priv(dev); @@ -183,30 +415,21 @@ static void korina_abort_rx(struct net_device *dev) korina_abort_dma(dev, lp->rx_dma_regs); } -static void korina_start_rx(struct korina_private *lp, - struct dma_desc *rd) -{ - korina_start_dma(lp->rx_dma_regs, CPHYSADDR(rd)); -} - -static void korina_chain_rx(struct korina_private *lp, - struct dma_desc *rd) -{ - korina_chain_dma(lp->rx_dma_regs, CPHYSADDR(rd)); -} - /* transmit packet */ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev) { struct korina_private *lp = netdev_priv(dev); - unsigned long flags; - u32 length; u32 chain_prev, chain_next; + unsigned long flags; struct dma_desc *td; + dma_addr_t ca; + u32 length; + int idx; spin_lock_irqsave(&lp->lock, flags); - td = &lp->td_ring[lp->tx_chain_tail]; + idx = lp->tx_chain_tail; + td = &lp->td_ring[idx]; /* stop queue when full, drop pkts if queue already full */ if (lp->tx_count >= (KORINA_NUM_TDS - 2)) { @@ -214,38 +437,37 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev) if (lp->tx_count == (KORINA_NUM_TDS - 2)) netif_stop_queue(dev); - else { - dev->stats.tx_dropped++; - dev_kfree_skb_any(skb); - spin_unlock_irqrestore(&lp->lock, flags); - - return NETDEV_TX_OK; - } + else + goto drop_packet; } lp->tx_count++; - lp->tx_skb[lp->tx_chain_tail] = skb; + lp->tx_skb[idx] = skb; length = skb->len; - dma_cache_wback((u32)skb->data, skb->len); /* Setup the transmit descriptor. */ - dma_cache_inv((u32) td, sizeof(*td)); - td->ca = CPHYSADDR(skb->data); - chain_prev = (lp->tx_chain_tail - 1) & KORINA_TDS_MASK; - chain_next = (lp->tx_chain_tail + 1) & KORINA_TDS_MASK; + ca = dma_map_single(lp->dmadev, skb->data, length, DMA_TO_DEVICE); + if (dma_mapping_error(lp->dmadev, ca)) + goto drop_packet; + + lp->tx_skb_dma[idx] = ca; + td->ca = ca; + + chain_prev = (idx - 1) & KORINA_TDS_MASK; + chain_next = (idx + 1) & KORINA_TDS_MASK; if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) { - if (lp->tx_chain_status == desc_empty) { + if (lp->tx_chain_status == desc_is_empty) { /* Update tail */ td->control = DMA_COUNT(length) | DMA_DESC_COF | DMA_DESC_IOF; /* Move tail */ lp->tx_chain_tail = chain_next; /* Write to NDPTR */ - writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), - &lp->tx_dma_regs->dmandptr); + writel(korina_tx_dma(lp, lp->tx_chain_head), + &lp->tx_dma_regs->dmandptr); /* Move head to tail */ lp->tx_chain_head = lp->tx_chain_tail; } else { @@ -256,18 +478,18 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev) lp->td_ring[chain_prev].control &= ~DMA_DESC_COF; /* Link to prev */ - lp->td_ring[chain_prev].link = CPHYSADDR(td); + lp->td_ring[chain_prev].link = korina_tx_dma(lp, idx); /* Move tail */ lp->tx_chain_tail = chain_next; /* Write to NDPTR */ - writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), - &(lp->tx_dma_regs->dmandptr)); + writel(korina_tx_dma(lp, lp->tx_chain_head), + &lp->tx_dma_regs->dmandptr); /* Move head to tail */ lp->tx_chain_head = lp->tx_chain_tail; - lp->tx_chain_status = desc_empty; + lp->tx_chain_status = desc_is_empty; } } else { - if (lp->tx_chain_status == desc_empty) { + if (lp->tx_chain_status == desc_is_empty) { /* Update tail */ td->control = DMA_COUNT(length) | DMA_DESC_COF | DMA_DESC_IOF; @@ -280,44 +502,66 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev) DMA_DESC_COF | DMA_DESC_IOF; lp->td_ring[chain_prev].control &= ~DMA_DESC_COF; - lp->td_ring[chain_prev].link = CPHYSADDR(td); + lp->td_ring[chain_prev].link = korina_tx_dma(lp, idx); lp->tx_chain_tail = chain_next; } } - dma_cache_wback((u32) td, sizeof(*td)); netif_trans_update(dev); spin_unlock_irqrestore(&lp->lock, flags); return NETDEV_TX_OK; + +drop_packet: + dev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&lp->lock, flags); + + return NETDEV_TX_OK; } -static int mdio_read(struct net_device *dev, int mii_id, int reg) +static int korina_mdio_wait(struct korina_private *lp) +{ + u32 value; + + return readl_poll_timeout_atomic(&lp->eth_regs->miimind, + value, value & ETH_MII_IND_BSY, + 1, 1000); +} + +static int korina_mdio_read(struct net_device *dev, int phy, int reg) { struct korina_private *lp = netdev_priv(dev); int ret; - mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8); + ret = korina_mdio_wait(lp); + if (ret < 0) + return ret; - writel(0, &lp->eth_regs->miimcfg); - writel(0, &lp->eth_regs->miimcmd); - writel(mii_id | reg, &lp->eth_regs->miimaddr); - writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd); + writel(phy << 8 | reg, &lp->eth_regs->miimaddr); + writel(1, &lp->eth_regs->miimcmd); + + ret = korina_mdio_wait(lp); + if (ret < 0) + return ret; - ret = (int)(readl(&lp->eth_regs->miimrdd)); + if (readl(&lp->eth_regs->miimind) & ETH_MII_IND_NV) + return -EINVAL; + + ret = readl(&lp->eth_regs->miimrdd); + writel(0, &lp->eth_regs->miimcmd); return ret; } -static void mdio_write(struct net_device *dev, int mii_id, int reg, int val) +static void korina_mdio_write(struct net_device *dev, int phy, int reg, int val) { struct korina_private *lp = netdev_priv(dev); - mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8); + if (korina_mdio_wait(lp)) + return; - writel(0, &lp->eth_regs->miimcfg); - writel(1, &lp->eth_regs->miimcmd); - writel(mii_id | reg, &lp->eth_regs->miimaddr); - writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd); + writel(0, &lp->eth_regs->miimcmd); + writel(phy << 8 | reg, &lp->eth_regs->miimaddr); writel(val, &lp->eth_regs->miimwtd); } @@ -353,12 +597,10 @@ static int korina_rx(struct net_device *dev, int limit) struct korina_private *lp = netdev_priv(dev); struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done]; struct sk_buff *skb, *skb_new; - u8 *pkt_buf; u32 devcs, pkt_len, dmas; + dma_addr_t ca; int count; - dma_cache_inv((u32)rd, sizeof(*rd)); - for (count = 0; count < limit; count++) { skb = lp->rx_skb[lp->rx_next_done]; skb_new = NULL; @@ -392,20 +634,22 @@ static int korina_rx(struct net_device *dev, int limit) goto next; } - pkt_len = RCVPKT_LENGTH(devcs); - - /* must be the (first and) last - * descriptor then */ - pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data; - - /* invalidate the cache */ - dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4); - /* Malloc up new buffer. */ skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE); - if (!skb_new) break; + + ca = dma_map_single(lp->dmadev, skb_new->data, KORINA_RBSIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(lp->dmadev, ca)) { + dev_kfree_skb_any(skb_new); + break; + } + + pkt_len = RCVPKT_LENGTH(devcs); + dma_unmap_single(lp->dmadev, lp->rx_skb_dma[lp->rx_next_done], + pkt_len, DMA_FROM_DEVICE); + /* Do not count the CRC */ skb_put(skb, pkt_len - 4); skb->protocol = eth_type_trans(skb, dev); @@ -420,15 +664,13 @@ static int korina_rx(struct net_device *dev, int limit) dev->stats.multicast++; lp->rx_skb[lp->rx_next_done] = skb_new; + lp->rx_skb_dma[lp->rx_next_done] = ca; next: rd->devcs = 0; /* Restore descriptor's curr_addr */ - if (skb_new) - rd->ca = CPHYSADDR(skb_new->data); - else - rd->ca = CPHYSADDR(skb->data); + rd->ca = lp->rx_skb_dma[lp->rx_next_done]; rd->control = DMA_COUNT(KORINA_RBSIZE) | DMA_DESC_COD | DMA_DESC_IOD; @@ -437,23 +679,21 @@ next: ~DMA_DESC_COD; lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK; - dma_cache_wback((u32)rd, sizeof(*rd)); rd = &lp->rd_ring[lp->rx_next_done]; - writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas); + writel((u32)~DMA_STAT_DONE, &lp->rx_dma_regs->dmas); } dmas = readl(&lp->rx_dma_regs->dmas); if (dmas & DMA_STAT_HALT) { - writel(~(DMA_STAT_HALT | DMA_STAT_ERR), - &lp->rx_dma_regs->dmas); + writel((u32)~(DMA_STAT_HALT | DMA_STAT_ERR), + &lp->rx_dma_regs->dmas); lp->dma_halt_cnt++; rd->devcs = 0; - skb = lp->rx_skb[lp->rx_next_done]; - rd->ca = CPHYSADDR(skb->data); - dma_cache_wback((u32)rd, sizeof(*rd)); - korina_chain_rx(lp, rd); + rd->ca = lp->rx_skb_dma[lp->rx_next_done]; + writel(korina_rx_dma(lp, rd - lp->rd_ring), + &lp->rx_dma_regs->dmandptr); } return count; @@ -576,6 +816,10 @@ static void korina_tx(struct net_device *dev) /* We must always free the original skb */ if (lp->tx_skb[lp->tx_next_done]) { + dma_unmap_single(lp->dmadev, + lp->tx_skb_dma[lp->tx_next_done], + lp->tx_skb[lp->tx_next_done]->len, + DMA_TO_DEVICE); dev_kfree_skb_any(lp->tx_skb[lp->tx_next_done]); lp->tx_skb[lp->tx_next_done] = NULL; } @@ -622,9 +866,9 @@ korina_tx_dma_interrupt(int irq, void *dev_id) if (lp->tx_chain_status == desc_filled && (readl(&(lp->tx_dma_regs->dmandptr)) == 0)) { - writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), - &(lp->tx_dma_regs->dmandptr)); - lp->tx_chain_status = desc_empty; + writel(korina_tx_dma(lp, lp->tx_chain_head), + &lp->tx_dma_regs->dmandptr); + lp->tx_chain_status = desc_is_empty; lp->tx_chain_head = lp->tx_chain_tail; netif_trans_update(dev); } @@ -643,7 +887,7 @@ static void korina_check_media(struct net_device *dev, unsigned int init_media) { struct korina_private *lp = netdev_priv(dev); - mii_check_media(&lp->mii_if, 0, init_media); + mii_check_media(&lp->mii_if, 1, init_media); if (lp->mii_if.full_duplex) writel(readl(&lp->eth_regs->ethmac2) | ETH_MAC2_FD, @@ -743,6 +987,7 @@ static int korina_alloc_ring(struct net_device *dev) { struct korina_private *lp = netdev_priv(dev); struct sk_buff *skb; + dma_addr_t ca; int i; /* Initialize the transmit descriptors */ @@ -754,7 +999,7 @@ static int korina_alloc_ring(struct net_device *dev) } lp->tx_next_done = lp->tx_chain_head = lp->tx_chain_tail = lp->tx_full = lp->tx_count = 0; - lp->tx_chain_status = desc_empty; + lp->tx_chain_status = desc_is_empty; /* Initialize the receive descriptors */ for (i = 0; i < KORINA_NUM_RDS; i++) { @@ -765,19 +1010,24 @@ static int korina_alloc_ring(struct net_device *dev) lp->rd_ring[i].control = DMA_DESC_IOD | DMA_COUNT(KORINA_RBSIZE); lp->rd_ring[i].devcs = 0; - lp->rd_ring[i].ca = CPHYSADDR(skb->data); - lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]); + ca = dma_map_single(lp->dmadev, skb->data, KORINA_RBSIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(lp->dmadev, ca)) + return -ENOMEM; + lp->rd_ring[i].ca = ca; + lp->rx_skb_dma[i] = ca; + lp->rd_ring[i].link = korina_rx_dma(lp, i + 1); } /* loop back receive descriptors, so the last * descriptor points to the first one */ - lp->rd_ring[i - 1].link = CPHYSADDR(&lp->rd_ring[0]); + lp->rd_ring[i - 1].link = lp->rd_dma; lp->rd_ring[i - 1].control |= DMA_DESC_COD; lp->rx_next_done = 0; lp->rx_chain_head = 0; lp->rx_chain_tail = 0; - lp->rx_chain_status = desc_empty; + lp->rx_chain_status = desc_is_empty; return 0; } @@ -789,16 +1039,22 @@ static void korina_free_ring(struct net_device *dev) for (i = 0; i < KORINA_NUM_RDS; i++) { lp->rd_ring[i].control = 0; - if (lp->rx_skb[i]) + if (lp->rx_skb[i]) { + dma_unmap_single(lp->dmadev, lp->rx_skb_dma[i], + KORINA_RBSIZE, DMA_FROM_DEVICE); dev_kfree_skb_any(lp->rx_skb[i]); - lp->rx_skb[i] = NULL; + lp->rx_skb[i] = NULL; + } } for (i = 0; i < KORINA_NUM_TDS; i++) { lp->td_ring[i].control = 0; - if (lp->tx_skb[i]) + if (lp->tx_skb[i]) { + dma_unmap_single(lp->dmadev, lp->tx_skb_dma[i], + lp->tx_skb[i]->len, DMA_TO_DEVICE); dev_kfree_skb_any(lp->tx_skb[i]); - lp->tx_skb[i] = NULL; + lp->tx_skb[i] = NULL; + } } } @@ -830,7 +1086,8 @@ static int korina_init(struct net_device *dev) writel(0, &lp->rx_dma_regs->dmas); /* Start Rx DMA */ - korina_start_rx(lp, &lp->rd_ring[0]); + writel(0, &lp->rx_dma_regs->dmandptr); + writel(korina_rx_dma(lp, 0), &lp->rx_dma_regs->dmadptr); writel(readl(&lp->tx_dma_regs->dmasm) & ~(DMA_STAT_FINI | DMA_STAT_ERR), @@ -867,14 +1124,17 @@ static int korina_init(struct net_device *dev) /* Management Clock Prescaler Divisor * Clock independent setting */ - writel(((idt_cpu_freq) / MII_CLOCK + 1) & ~1, - &lp->eth_regs->ethmcp); + writel(((lp->mii_clock_freq) / MII_CLOCK + 1) & ~1, + &lp->eth_regs->ethmcp); + writel(0, &lp->eth_regs->miimcfg); /* don't transmit until fifo contains 48b */ writel(48, &lp->eth_regs->ethfifott); writel(ETH_MAC1_RE, &lp->eth_regs->ethmac1); + korina_check_media(dev, 1); + napi_enable(&lp->napi); netif_start_queue(dev); @@ -1022,86 +1282,94 @@ static const struct net_device_ops korina_netdev_ops = { static int korina_probe(struct platform_device *pdev) { - struct korina_device *bif = platform_get_drvdata(pdev); + u8 *mac_addr = dev_get_platdata(&pdev->dev); struct korina_private *lp; struct net_device *dev; - struct resource *r; + struct clk *clk; + void __iomem *p; int rc; - dev = alloc_etherdev(sizeof(struct korina_private)); + dev = devm_alloc_etherdev(&pdev->dev, sizeof(struct korina_private)); if (!dev) return -ENOMEM; SET_NETDEV_DEV(dev, &pdev->dev); lp = netdev_priv(dev); - bif->dev = dev; - memcpy(dev->dev_addr, bif->mac, ETH_ALEN); + if (mac_addr) + ether_addr_copy(dev->dev_addr, mac_addr); + else if (of_get_mac_address(pdev->dev.of_node, dev->dev_addr) < 0) + eth_hw_addr_random(dev); + + clk = devm_clk_get_optional(&pdev->dev, "mdioclk"); + if (IS_ERR(clk)) + return PTR_ERR(clk); + if (clk) { + clk_prepare_enable(clk); + lp->mii_clock_freq = clk_get_rate(clk); + } else { + lp->mii_clock_freq = 200000000; /* max possible input clk */ + } - lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx"); - lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx"); + lp->rx_irq = platform_get_irq_byname(pdev, "rx"); + lp->tx_irq = platform_get_irq_byname(pdev, "tx"); - r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs"); - dev->base_addr = r->start; - lp->eth_regs = ioremap(r->start, resource_size(r)); - if (!lp->eth_regs) { + p = devm_platform_ioremap_resource_byname(pdev, "emac"); + if (!p) { printk(KERN_ERR DRV_NAME ": cannot remap registers\n"); - rc = -ENXIO; - goto probe_err_out; + return -ENOMEM; } + lp->eth_regs = p; - r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx"); - lp->rx_dma_regs = ioremap(r->start, resource_size(r)); - if (!lp->rx_dma_regs) { + p = devm_platform_ioremap_resource_byname(pdev, "dma_rx"); + if (!p) { printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n"); - rc = -ENXIO; - goto probe_err_dma_rx; + return -ENOMEM; } + lp->rx_dma_regs = p; - r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx"); - lp->tx_dma_regs = ioremap(r->start, resource_size(r)); - if (!lp->tx_dma_regs) { + p = devm_platform_ioremap_resource_byname(pdev, "dma_tx"); + if (!p) { printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n"); - rc = -ENXIO; - goto probe_err_dma_tx; - } - - lp->td_ring = kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL); - if (!lp->td_ring) { - rc = -ENXIO; - goto probe_err_td_ring; + return -ENOMEM; } + lp->tx_dma_regs = p; - dma_cache_inv((unsigned long)(lp->td_ring), - TD_RING_SIZE + RD_RING_SIZE); + lp->td_ring = dmam_alloc_coherent(&pdev->dev, TD_RING_SIZE, + &lp->td_dma, GFP_KERNEL); + if (!lp->td_ring) + return -ENOMEM; - /* now convert TD_RING pointer to KSEG1 */ - lp->td_ring = (struct dma_desc *)KSEG1ADDR(lp->td_ring); - lp->rd_ring = &lp->td_ring[KORINA_NUM_TDS]; + lp->rd_ring = dmam_alloc_coherent(&pdev->dev, RD_RING_SIZE, + &lp->rd_dma, GFP_KERNEL); + if (!lp->rd_ring) + return -ENOMEM; spin_lock_init(&lp->lock); /* just use the rx dma irq */ dev->irq = lp->rx_irq; lp->dev = dev; + lp->dmadev = &pdev->dev; dev->netdev_ops = &korina_netdev_ops; dev->ethtool_ops = &netdev_ethtool_ops; dev->watchdog_timeo = TX_TIMEOUT; netif_napi_add(dev, &lp->napi, korina_poll, NAPI_POLL_WEIGHT); - lp->phy_addr = (((lp->rx_irq == 0x2c? 1:0) << 8) | 0x05); lp->mii_if.dev = dev; - lp->mii_if.mdio_read = mdio_read; - lp->mii_if.mdio_write = mdio_write; - lp->mii_if.phy_id = lp->phy_addr; + lp->mii_if.mdio_read = korina_mdio_read; + lp->mii_if.mdio_write = korina_mdio_write; + lp->mii_if.phy_id = 1; lp->mii_if.phy_id_mask = 0x1f; lp->mii_if.reg_num_mask = 0x1f; + platform_set_drvdata(pdev, dev); + rc = register_netdev(dev); if (rc < 0) { printk(KERN_ERR DRV_NAME ": cannot register net device: %d\n", rc); - goto probe_err_register; + return rc; } timer_setup(&lp->media_check_timer, korina_poll_media, 0); @@ -1109,40 +1377,33 @@ static int korina_probe(struct platform_device *pdev) printk(KERN_INFO "%s: " DRV_NAME "-" DRV_VERSION " " DRV_RELDATE "\n", dev->name); -out: return rc; - -probe_err_register: - kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring)); -probe_err_td_ring: - iounmap(lp->tx_dma_regs); -probe_err_dma_tx: - iounmap(lp->rx_dma_regs); -probe_err_dma_rx: - iounmap(lp->eth_regs); -probe_err_out: - free_netdev(dev); - goto out; } static int korina_remove(struct platform_device *pdev) { - struct korina_device *bif = platform_get_drvdata(pdev); - struct korina_private *lp = netdev_priv(bif->dev); - - iounmap(lp->eth_regs); - iounmap(lp->rx_dma_regs); - iounmap(lp->tx_dma_regs); - kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring)); + struct net_device *dev = platform_get_drvdata(pdev); - unregister_netdev(bif->dev); - free_netdev(bif->dev); + unregister_netdev(dev); return 0; } +#ifdef CONFIG_OF +static const struct of_device_id korina_match[] = { + { + .compatible = "idt,3243x-emac", + }, + { } +}; +MODULE_DEVICE_TABLE(of, korina_match); +#endif + static struct platform_driver korina_driver = { - .driver.name = "korina", + .driver = { + .name = "korina", + .of_match_table = of_match_ptr(korina_match), + }, .probe = korina_probe, .remove = korina_remove, }; diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c index 51ed8a54d380..41c2ad210bc9 100644 --- a/drivers/net/ethernet/lantiq_xrx200.c +++ b/drivers/net/ethernet/lantiq_xrx200.c @@ -435,7 +435,6 @@ static int xrx200_probe(struct platform_device *pdev) struct resource *res; struct xrx200_priv *priv; struct net_device *net_dev; - const u8 *mac; int err; /* alloc the network device */ @@ -460,10 +459,8 @@ static int xrx200_probe(struct platform_device *pdev) } priv->pmac_reg = devm_ioremap_resource(dev, res); - if (IS_ERR(priv->pmac_reg)) { - dev_err(dev, "failed to request and remap io ranges\n"); + if (IS_ERR(priv->pmac_reg)) return PTR_ERR(priv->pmac_reg); - } priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx"); if (priv->chan_rx.dma.irq < 0) @@ -479,10 +476,8 @@ static int xrx200_probe(struct platform_device *pdev) return PTR_ERR(priv->clk); } - mac = of_get_mac_address(np); - if (!IS_ERR(mac)) - ether_addr_copy(net_dev->dev_addr, mac); - else + err = of_get_mac_address(np, net_dev->dev_addr); + if (err) eth_hw_addr_random(net_dev); /* bring up the dma engine and IP core */ diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 3bfb659b5c99..d207bfcaf31d 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -700,7 +700,8 @@ static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb, ip_hdr(skb)->ihl << TX_IHL_SHIFT; /* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL - * it seems we don't need to pass the initial checksum. */ + * it seems we don't need to pass the initial checksum. + */ switch (ip_hdr(skb)->protocol) { case IPPROTO_UDP: cmd |= UDP_FRAME; @@ -790,7 +791,8 @@ txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length, WARN(1, "failed to prepare checksum!"); /* Should we set this? Can't use the value from skb_tx_csum() - * as it's not the correct initial L4 checksum to use. */ + * as it's not the correct initial L4 checksum to use. + */ desc->l4i_chk = 0; desc->byte_cnt = hdr_len; @@ -2700,7 +2702,6 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, struct platform_device *ppdev; struct mv643xx_eth_platform_data ppd; struct resource res; - const char *mac_addr; int ret; int dev_num = 0; @@ -2731,9 +2732,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, return -EINVAL; } - mac_addr = of_get_mac_address(pnp); - if (!IS_ERR(mac_addr)) - ether_addr_copy(ppd.mac_addr, mac_addr); + of_get_mac_address(pnp, ppd.mac_addr); mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size); mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index a635cf84608a..7d5cd9bc6c99 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1087,7 +1087,7 @@ static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize, return 0; } -static int mvneta_bm_port_mbus_init(struct mvneta_port *pp) +static int mvneta_bm_port_mbus_init(struct mvneta_port *pp) { u32 wsize; u8 target, attr; @@ -2137,7 +2137,7 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame, { struct mvneta_port *pp = netdev_priv(dev); struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); - int i, nxmit_byte = 0, nxmit = num_frame; + int i, nxmit_byte = 0, nxmit = 0; int cpu = smp_processor_id(); struct mvneta_tx_queue *txq; struct netdev_queue *nq; @@ -2155,12 +2155,11 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame, __netif_tx_lock(nq, cpu); for (i = 0; i < num_frame; i++) { ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true); - if (ret == MVNETA_XDP_TX) { - nxmit_byte += frames[i]->len; - } else { - xdp_return_frame_rx_napi(frames[i]); - nxmit--; - } + if (ret != MVNETA_XDP_TX) + break; + + nxmit_byte += frames[i]->len; + nxmit++; } if (unlikely(flags & XDP_XMIT_FLUSH)) @@ -3994,7 +3993,8 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, /* Armada 370 documentation says we can only change the port mode * and in-band enable when the link is down, so force it down - * while making these changes. We also do this for GMAC_CTRL2 */ + * while making these changes. We also do this for GMAC_CTRL2 + */ if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X || (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE || (new_an ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) { @@ -4176,9 +4176,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp) rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); if (cpu == elected_cpu) - /* Map the default receive queue queue to the - * elected CPU - */ + /* Map the default receive queue to the elected CPU */ rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def); /* We update the TX queue map only if we have one @@ -4908,7 +4906,8 @@ static int mvneta_ethtool_set_eee(struct net_device *dev, u32 lpi_ctl0; /* The Armada 37x documents do not give limits for this other than - * it being an 8-bit register. */ + * it being an 8-bit register. + */ if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255) return -EINVAL; @@ -5142,7 +5141,6 @@ static int mvneta_probe(struct platform_device *pdev) struct net_device *dev; struct phylink *phylink; struct phy *comphy; - const char *dt_mac_addr; char hw_mac_addr[ETH_ALEN]; phy_interface_t phy_mode; const char *mac_from; @@ -5238,10 +5236,9 @@ static int mvneta_probe(struct platform_device *pdev) goto err_free_ports; } - dt_mac_addr = of_get_mac_address(dn); - if (!IS_ERR(dt_mac_addr)) { + err = of_get_mac_address(dn, dev->dev_addr); + if (!err) { mac_from = "device tree"; - ether_addr_copy(dev->dev_addr, dt_mac_addr); } else { mvneta_get_mac_addr(pp, hw_mac_addr); if (is_valid_ether_addr(hw_mac_addr)) { diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 1767c60056c5..ec706d614cac 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3744,7 +3744,7 @@ mvpp2_xdp_xmit(struct net_device *dev, int num_frame, struct xdp_frame **frames, u32 flags) { struct mvpp2_port *port = netdev_priv(dev); - int i, nxmit_byte = 0, nxmit = num_frame; + int i, nxmit_byte = 0, nxmit = 0; struct mvpp2_pcpu_stats *stats; u16 txq_id; u32 ret; @@ -3762,12 +3762,11 @@ mvpp2_xdp_xmit(struct net_device *dev, int num_frame, for (i = 0; i < num_frame; i++) { ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true); - if (ret == MVPP2_XDP_TX) { - nxmit_byte += frames[i]->len; - } else { - xdp_return_frame_rx_napi(frames[i]); - nxmit--; - } + if (ret != MVPP2_XDP_TX) + break; + + nxmit_byte += frames[i]->len; + nxmit++; } if (likely(nxmit > 0)) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index 4812cdb4609e..7cc7d72d761e 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -918,9 +918,8 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - /* Set L4 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, - sizeof(struct iphdr) - 4, + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); @@ -1335,7 +1334,7 @@ static void mvpp2_prs_vid_init(struct mvpp2 *priv) static int mvpp2_prs_etype_init(struct mvpp2 *priv) { struct mvpp2_prs_entry pe; - int tid; + int tid, ihl; /* Ethertype: PPPoE */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, @@ -1427,67 +1426,43 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv) MVPP2_PRS_RI_UDF3_MASK); mvpp2_prs_hw_write(priv, &pe); - /* Ethertype: IPv4 without options */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); - pe.index = tid; - - mvpp2_prs_match_etype(&pe, 0, ETH_P_IP); - mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, - MVPP2_PRS_IPV4_HEAD_MASK | - MVPP2_PRS_IPV4_IHL_MASK); - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, - MVPP2_PRS_RI_L3_PROTO_MASK); - /* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + - sizeof(struct iphdr) - 4, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - /* Set L3 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = false; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, - MVPP2_PRS_RI_L3_PROTO_MASK); - mvpp2_prs_hw_write(priv, &pe); - - /* Ethertype: IPv4 with options */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - pe.index = tid; + /* Ethertype: IPv4 with header length >= 5 */ + for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) { + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; - mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_IPV4_HEAD, - MVPP2_PRS_IPV4_HEAD_MASK); + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; - /* Clear ri before updating */ - pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0; - pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, - MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_match_etype(&pe, 0, ETH_P_IP); + mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_IPV4_HEAD | ihl, + MVPP2_PRS_IPV4_HEAD_MASK | + MVPP2_PRS_IPV4_IHL_MASK); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + + sizeof(struct iphdr) - 4, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L4 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + MVPP2_ETH_TYPE_LEN + (ihl * 4), + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = false; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT, - MVPP2_PRS_RI_L3_PROTO_MASK); - mvpp2_prs_hw_write(priv, &pe); + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = false; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + } /* Ethertype: IPv6 without options */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, @@ -1674,7 +1649,8 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) pe.index = tid; mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, + MVPP2_PRS_IPV4_HEAD | + MVPP2_PRS_IPV4_IHL_MIN, MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK); @@ -1788,9 +1764,8 @@ static int mvpp2_prs_ip4_init(struct mvpp2 *priv) mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - /* Set L4 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, - sizeof(struct iphdr) - 4, + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h index c16e5b9947bd..5ce5907be591 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h @@ -28,7 +28,8 @@ #define MVPP2_PRS_IPV4_MC 0xe0 #define MVPP2_PRS_IPV4_MC_MASK 0xf0 #define MVPP2_PRS_IPV4_BC_MASK 0xff -#define MVPP2_PRS_IPV4_IHL 0x5 +#define MVPP2_PRS_IPV4_IHL_MIN 0x5 +#define MVPP2_PRS_IPV4_IHL_MAX 0xf #define MVPP2_PRS_IPV4_IHL_MASK 0xf #define MVPP2_PRS_IPV6_MC 0xff #define MVPP2_PRS_IPV6_MC_MASK 0xff diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 68deae529bc9..fac6474ad694 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -30,10 +30,35 @@ static LIST_HEAD(cgx_list); /* Convert firmware speed encoding to user format(Mbps) */ -static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX]; +static const u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX] = { + [CGX_LINK_NONE] = 0, + [CGX_LINK_10M] = 10, + [CGX_LINK_100M] = 100, + [CGX_LINK_1G] = 1000, + [CGX_LINK_2HG] = 2500, + [CGX_LINK_5G] = 5000, + [CGX_LINK_10G] = 10000, + [CGX_LINK_20G] = 20000, + [CGX_LINK_25G] = 25000, + [CGX_LINK_40G] = 40000, + [CGX_LINK_50G] = 50000, + [CGX_LINK_80G] = 80000, + [CGX_LINK_100G] = 100000, +}; /* Convert firmware lmac type encoding to string */ -static char *cgx_lmactype_string[LMAC_MODE_MAX]; +static const char *cgx_lmactype_string[LMAC_MODE_MAX] = { + [LMAC_MODE_SGMII] = "SGMII", + [LMAC_MODE_XAUI] = "XAUI", + [LMAC_MODE_RXAUI] = "RXAUI", + [LMAC_MODE_10G_R] = "10G_R", + [LMAC_MODE_40G_R] = "40G_R", + [LMAC_MODE_QSGMII] = "QSGMII", + [LMAC_MODE_25G_R] = "25G_R", + [LMAC_MODE_50G_R] = "50G_R", + [LMAC_MODE_100G_R] = "100G_R", + [LMAC_MODE_USXGMII] = "USXGMII", +}; /* CGX PHY management internal APIs */ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en); @@ -659,34 +684,6 @@ int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id) return err; } -static inline void cgx_link_usertable_init(void) -{ - cgx_speed_mbps[CGX_LINK_NONE] = 0; - cgx_speed_mbps[CGX_LINK_10M] = 10; - cgx_speed_mbps[CGX_LINK_100M] = 100; - cgx_speed_mbps[CGX_LINK_1G] = 1000; - cgx_speed_mbps[CGX_LINK_2HG] = 2500; - cgx_speed_mbps[CGX_LINK_5G] = 5000; - cgx_speed_mbps[CGX_LINK_10G] = 10000; - cgx_speed_mbps[CGX_LINK_20G] = 20000; - cgx_speed_mbps[CGX_LINK_25G] = 25000; - cgx_speed_mbps[CGX_LINK_40G] = 40000; - cgx_speed_mbps[CGX_LINK_50G] = 50000; - cgx_speed_mbps[CGX_LINK_80G] = 80000; - cgx_speed_mbps[CGX_LINK_100G] = 100000; - - cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII"; - cgx_lmactype_string[LMAC_MODE_XAUI] = "XAUI"; - cgx_lmactype_string[LMAC_MODE_RXAUI] = "RXAUI"; - cgx_lmactype_string[LMAC_MODE_10G_R] = "10G_R"; - cgx_lmactype_string[LMAC_MODE_40G_R] = "40G_R"; - cgx_lmactype_string[LMAC_MODE_QSGMII] = "QSGMII"; - cgx_lmactype_string[LMAC_MODE_25G_R] = "25G_R"; - cgx_lmactype_string[LMAC_MODE_50G_R] = "50G_R"; - cgx_lmactype_string[LMAC_MODE_100G_R] = "100G_R"; - cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII"; -} - static int cgx_link_usertable_index_map(int speed) { switch (speed) { @@ -828,7 +825,7 @@ static inline void link_status_user_format(u64 lstat, struct cgx_link_user_info *linfo, struct cgx *cgx, u8 lmac_id) { - char *lmac_string; + const char *lmac_string; linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat); linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat); @@ -1377,7 +1374,6 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) list_add(&cgx->cgx_list, &cgx_list); - cgx_link_usertable_init(); cgx_populate_features(cgx); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index ea456099b33c..cedb2616c509 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -74,13 +74,13 @@ struct otx2_mbox { struct otx2_mbox_dev *dev; }; -/* Header which preceeds all mbox messages */ +/* Header which precedes all mbox messages */ struct mbox_hdr { u64 msg_size; /* Total msgs size embedded */ u16 num_msgs; /* No of msgs embedded */ }; -/* Header which preceeds every msg and is also part of it */ +/* Header which precedes every msg and is also part of it */ struct mbox_msghdr { u16 pcifunc; /* Who's sending this msg */ u16 id; /* Mbox message ID */ @@ -177,6 +177,9 @@ M(CPT_LF_ALLOC, 0xA00, cpt_lf_alloc, cpt_lf_alloc_req_msg, \ M(CPT_LF_FREE, 0xA01, cpt_lf_free, msg_req, msg_rsp) \ M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg, \ cpt_rd_wr_reg_msg) \ +M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \ +M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \ + msg_rsp) \ /* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \ M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry, npc_mcam_alloc_entry_req,\ npc_mcam_alloc_entry_rsp) \ @@ -216,6 +219,9 @@ M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \ npc_mcam_read_entry_rsp) \ M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \ msg_req, npc_mcam_read_base_rule_rsp) \ +M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \ + npc_mcam_get_stats_req, \ + npc_mcam_get_stats_rsp) \ /* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \ M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \ nix_lf_alloc_req, nix_lf_alloc_rsp) \ @@ -277,8 +283,8 @@ struct msg_req { struct mbox_msghdr hdr; }; -/* Generic rsponse msg used a ack or response for those mbox - * messages which doesn't have a specific rsp msg format. +/* Generic response msg used an ack or response for those mbox + * messages which don't have a specific rsp msg format. */ struct msg_rsp { struct mbox_msghdr hdr; @@ -299,7 +305,7 @@ struct ready_msg_rsp { /* Structure for requesting resource provisioning. * 'modify' flag to be used when either requesting more - * or to detach partial of a cetain resource type. + * or to detach partial of a certain resource type. * Rest of the fields specify how many of what type to * be attached. * To request LFs from two blocks of same type this mailbox @@ -489,7 +495,7 @@ struct cgx_set_link_mode_rsp { }; #define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */ -#define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precison time protocol */ +#define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precision time protocol */ #define RVU_MAC_VERSION BIT_ULL(2) #define RVU_MAC_CGX BIT_ULL(3) #define RVU_MAC_RPM BIT_ULL(4) @@ -605,6 +611,7 @@ enum nix_af_status { NIX_AF_INVAL_SSO_PF_FUNC = -420, NIX_AF_ERR_TX_VTAG_NOSPC = -421, NIX_AF_ERR_RX_VTAG_INUSE = -422, + NIX_AF_ERR_NPC_KEY_NOT_SUPP = -423, }; /* For NIX RX vtag action */ @@ -1141,6 +1148,7 @@ struct npc_install_flow_req { u64 features; u16 entry; u16 channel; + u16 chan_mask; u8 intf; u8 set_cntr; /* If counter is available set counter for this entry ? */ u8 default_rule; @@ -1193,6 +1201,17 @@ struct npc_mcam_read_base_rule_rsp { struct mcam_entry entry; }; +struct npc_mcam_get_stats_req { + struct mbox_msghdr hdr; + u16 entry; /* mcam entry */ +}; + +struct npc_mcam_get_stats_rsp { + struct mbox_msghdr hdr; + u64 stat; /* counter stats */ + u8 stat_ena; /* enabled */ +}; + enum ptp_op { PTP_OP_ADJFINE = 0, PTP_OP_GET_CLOCK = 1, @@ -1239,4 +1258,62 @@ struct cpt_lf_alloc_req_msg { int blkaddr; }; +/* Mailbox message request and response format for CPT stats. */ +struct cpt_sts_req { + struct mbox_msghdr hdr; + u8 blkaddr; +}; + +struct cpt_sts_rsp { + struct mbox_msghdr hdr; + u64 inst_req_pc; + u64 inst_lat_pc; + u64 rd_req_pc; + u64 rd_lat_pc; + u64 rd_uc_pc; + u64 active_cycles_pc; + u64 ctx_mis_pc; + u64 ctx_hit_pc; + u64 ctx_aop_pc; + u64 ctx_aop_lat_pc; + u64 ctx_ifetch_pc; + u64 ctx_ifetch_lat_pc; + u64 ctx_ffetch_pc; + u64 ctx_ffetch_lat_pc; + u64 ctx_wback_pc; + u64 ctx_wback_lat_pc; + u64 ctx_psh_pc; + u64 ctx_psh_lat_pc; + u64 ctx_err; + u64 ctx_enc_id; + u64 ctx_flush_timer; + u64 rxc_time; + u64 rxc_time_cfg; + u64 rxc_active_sts; + u64 rxc_zombie_sts; + u64 busy_sts_ae; + u64 free_sts_ae; + u64 busy_sts_se; + u64 free_sts_se; + u64 busy_sts_ie; + u64 free_sts_ie; + u64 exe_err_info; + u64 cptclk_cnt; + u64 diag; + u64 rxc_dfrg; + u64 x2p_link_cfg0; + u64 x2p_link_cfg1; +}; + +/* Mailbox message request format to configure reassembly timeout. */ +struct cpt_rxc_time_cfg_req { + struct mbox_msghdr hdr; + int blkaddr; + u32 step; + u16 zombie_thres; + u16 zombie_limit; + u16 active_thres; + u16 active_limit; +}; + #endif /* MBOX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index 3c640f6aba92..1e012e787260 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -167,6 +167,8 @@ enum key_fields { NPC_IPPROTO_SCTP, NPC_IPPROTO_AH, NPC_IPPROTO_ESP, + NPC_IPPROTO_ICMP, + NPC_IPPROTO_ICMP6, NPC_SPORT_TCP, NPC_DPORT_TCP, NPC_SPORT_UDP, @@ -420,6 +422,11 @@ struct nix_tx_action { #define TX_VTAG1_LID_MASK GENMASK_ULL(42, 40) #define TX_VTAG1_RELPTR_MASK GENMASK_ULL(39, 32) +/* NPC MCAM reserved entry index per nixlf */ +#define NIXLF_UCAST_ENTRY 0 +#define NIXLF_BCAST_ENTRY 1 +#define NIXLF_PROMISC_ENTRY 2 + struct npc_mcam_kex { /* MKEX Profle Header */ u64 mkex_sign; /* "mcam-kex-profile" (8 bytes/ASCII characters) */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 76f399229ddb..c2cc4806d13c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -548,6 +548,12 @@ static inline int is_afvf(u16 pcifunc) return !(pcifunc & ~RVU_PFVF_FUNC_MASK); } +/* check if PF_FUNC is AF */ +static inline bool is_pffunc_af(u16 pcifunc) +{ + return !pcifunc; +} + static inline bool is_rvu_fwdata_valid(struct rvu *rvu) { return (rvu->fwdata->header_magic == RVU_FWDATA_HEADER_MAGIC) && @@ -640,7 +646,8 @@ int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool en); void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan, u8 *mac_addr); void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, - int nixlf, u64 chan, bool allmulti); + int nixlf, u64 chan, u8 chan_cnt, + bool allmulti); void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, @@ -665,9 +672,6 @@ int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena); int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel); int npc_flow_steering_init(struct rvu *rvu, int blkaddr); const char *npc_get_field_name(u8 hdr); -bool rvu_npc_write_default_rule(struct rvu *rvu, int blkaddr, int nixlf, - u16 pcifunc, u8 intf, struct mcam_entry *entry, - int *entry_index); int npc_get_bank(struct npc_mcam *mcam, int index); void npc_mcam_enable_flows(struct rvu *rvu, u16 target); void npc_mcam_disable_flows(struct rvu *rvu, u16 target); @@ -680,6 +684,11 @@ bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature); u32 rvu_cgx_get_fifolen(struct rvu *rvu); void *rvu_first_cgx_pdata(struct rvu *rvu); +int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf, + int type); +bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, + int index); + /* CPT APIs */ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c index 0945c3a3b180..89253f7bdadb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2020 Marvell. */ +#include <linux/bitfield.h> #include <linux/pci.h> #include "rvu_struct.h" #include "rvu_reg.h" @@ -9,6 +10,28 @@ /* CPT PF device id */ #define PCI_DEVID_OTX2_CPT_PF 0xA0FD +#define PCI_DEVID_OTX2_CPT10K_PF 0xA0F2 + +/* Length of initial context fetch in 128 byte words */ +#define CPT_CTX_ILEN 2 + +#define cpt_get_eng_sts(e_min, e_max, rsp, etype) \ +({ \ + u64 free_sts = 0, busy_sts = 0; \ + typeof(rsp) _rsp = rsp; \ + u32 e, i; \ + \ + for (e = (e_min), i = 0; e < (e_max); e++, i++) { \ + reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e)); \ + if (reg & 0x1) \ + busy_sts |= 1ULL << i; \ + \ + if (reg & 0x2) \ + free_sts |= 1ULL << i; \ + } \ + (_rsp)->busy_sts_##etype = busy_sts; \ + (_rsp)->free_sts_##etype = free_sts; \ +}) static int get_cpt_pf_num(struct rvu *rvu) { @@ -21,7 +44,8 @@ static int get_cpt_pf_num(struct rvu *rvu) if (!pdev) continue; - if (pdev->device == PCI_DEVID_OTX2_CPT_PF) { + if (pdev->device == PCI_DEVID_OTX2_CPT_PF || + pdev->device == PCI_DEVID_OTX2_CPT10K_PF) { cpt_pf_num = i; put_device(&pdev->dev); break; @@ -55,6 +79,17 @@ static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc) return true; } +static int validate_and_get_cpt_blkaddr(int req_blkaddr) +{ + int blkaddr; + + blkaddr = req_blkaddr ? req_blkaddr : BLKADDR_CPT0; + if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1) + return -EINVAL; + + return blkaddr; +} + int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu, struct cpt_lf_alloc_req_msg *req, struct msg_rsp *rsp) @@ -65,9 +100,9 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu, int num_lfs, slot; u64 val; - blkaddr = req->blkaddr ? req->blkaddr : BLKADDR_CPT0; - if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1) - return -ENODEV; + blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); + if (blkaddr < 0) + return blkaddr; if (req->eng_grpmsk == 0x0) return CPT_AF_ERR_GRP_INVALID; @@ -103,6 +138,9 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu, /* Set CPT LF group and priority */ val = (u64)req->eng_grpmsk << 48 | 1; + if (!is_rvu_otx2(rvu)) + val |= (CPT_CTX_ILEN << 17); + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val); /* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC */ @@ -162,7 +200,9 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req) struct rvu_block *block; struct rvu_pfvf *pfvf; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0); + blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); + if (blkaddr < 0) + return blkaddr; /* Registers that can be accessed from PF/VF */ if ((offset & 0xFF000) == CPT_AF_LFX_CTL(0) || @@ -192,6 +232,7 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req) case CPT_AF_PF_FUNC: case CPT_AF_BLK_RST: case CPT_AF_CONSTANTS1: + case CPT_AF_CTX_FLUSH_TIMER: return true; } @@ -217,9 +258,9 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu, { int blkaddr; - blkaddr = req->blkaddr ? req->blkaddr : BLKADDR_CPT0; - if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1) - return -ENODEV; + blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); + if (blkaddr < 0) + return blkaddr; /* This message is accepted only if sent from CPT PF/VF */ if (!is_cpt_pf(rvu, req->hdr.pcifunc) && @@ -241,6 +282,141 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu, return 0; } +static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr) +{ + if (is_rvu_otx2(rvu)) + return; + + rsp->ctx_mis_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_MIS_PC); + rsp->ctx_hit_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_HIT_PC); + rsp->ctx_aop_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_AOP_PC); + rsp->ctx_aop_lat_pc = rvu_read64(rvu, blkaddr, + CPT_AF_CTX_AOP_LATENCY_PC); + rsp->ctx_ifetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_IFETCH_PC); + rsp->ctx_ifetch_lat_pc = rvu_read64(rvu, blkaddr, + CPT_AF_CTX_IFETCH_LATENCY_PC); + rsp->ctx_ffetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC); + rsp->ctx_ffetch_lat_pc = rvu_read64(rvu, blkaddr, + CPT_AF_CTX_FFETCH_LATENCY_PC); + rsp->ctx_wback_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC); + rsp->ctx_wback_lat_pc = rvu_read64(rvu, blkaddr, + CPT_AF_CTX_FFETCH_LATENCY_PC); + rsp->ctx_psh_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC); + rsp->ctx_psh_lat_pc = rvu_read64(rvu, blkaddr, + CPT_AF_CTX_FFETCH_LATENCY_PC); + rsp->ctx_err = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ERR); + rsp->ctx_enc_id = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ENC_ID); + rsp->ctx_flush_timer = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FLUSH_TIMER); + + rsp->rxc_time = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME); + rsp->rxc_time_cfg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG); + rsp->rxc_active_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS); + rsp->rxc_zombie_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS); + rsp->rxc_dfrg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG); + rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0)); + rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1)); +} + +static void get_eng_sts(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr) +{ + u16 max_ses, max_ies, max_aes; + u32 e_min = 0, e_max = 0; + u64 reg; + + reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1); + max_ses = reg & 0xffff; + max_ies = (reg >> 16) & 0xffff; + max_aes = (reg >> 32) & 0xffff; + + /* Get AE status */ + e_min = max_ses + max_ies; + e_max = max_ses + max_ies + max_aes; + cpt_get_eng_sts(e_min, e_max, rsp, ae); + /* Get SE status */ + e_min = 0; + e_max = max_ses; + cpt_get_eng_sts(e_min, e_max, rsp, se); + /* Get IE status */ + e_min = max_ses; + e_max = max_ses + max_ies; + cpt_get_eng_sts(e_min, e_max, rsp, ie); +} + +int rvu_mbox_handler_cpt_sts(struct rvu *rvu, struct cpt_sts_req *req, + struct cpt_sts_rsp *rsp) +{ + int blkaddr; + + blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); + if (blkaddr < 0) + return blkaddr; + + /* This message is accepted only if sent from CPT PF/VF */ + if (!is_cpt_pf(rvu, req->hdr.pcifunc) && + !is_cpt_vf(rvu, req->hdr.pcifunc)) + return CPT_AF_ERR_ACCESS_DENIED; + + get_ctx_pc(rvu, rsp, blkaddr); + + /* Get CPT engines status */ + get_eng_sts(rvu, rsp, blkaddr); + + /* Read CPT instruction PC registers */ + rsp->inst_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC); + rsp->inst_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC); + rsp->rd_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC); + rsp->rd_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC); + rsp->rd_uc_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC); + rsp->active_cycles_pc = rvu_read64(rvu, blkaddr, + CPT_AF_ACTIVE_CYCLES_PC); + rsp->exe_err_info = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO); + rsp->cptclk_cnt = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT); + rsp->diag = rvu_read64(rvu, blkaddr, CPT_AF_DIAG); + + return 0; +} + +#define RXC_ZOMBIE_THRES GENMASK_ULL(59, 48) +#define RXC_ZOMBIE_LIMIT GENMASK_ULL(43, 32) +#define RXC_ACTIVE_THRES GENMASK_ULL(27, 16) +#define RXC_ACTIVE_LIMIT GENMASK_ULL(11, 0) +#define RXC_ACTIVE_COUNT GENMASK_ULL(60, 48) +#define RXC_ZOMBIE_COUNT GENMASK_ULL(60, 48) + +static void cpt_rxc_time_cfg(struct rvu *rvu, struct cpt_rxc_time_cfg_req *req, + int blkaddr) +{ + u64 dfrg_reg; + + dfrg_reg = FIELD_PREP(RXC_ZOMBIE_THRES, req->zombie_thres); + dfrg_reg |= FIELD_PREP(RXC_ZOMBIE_LIMIT, req->zombie_limit); + dfrg_reg |= FIELD_PREP(RXC_ACTIVE_THRES, req->active_thres); + dfrg_reg |= FIELD_PREP(RXC_ACTIVE_LIMIT, req->active_limit); + + rvu_write64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG, req->step); + rvu_write64(rvu, blkaddr, CPT_AF_RXC_DFRG, dfrg_reg); +} + +int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu, + struct cpt_rxc_time_cfg_req *req, + struct msg_rsp *rsp) +{ + int blkaddr; + + blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); + if (blkaddr < 0) + return blkaddr; + + /* This message is accepted only if sent from CPT PF/VF */ + if (!is_cpt_pf(rvu, req->hdr.pcifunc) && + !is_cpt_vf(rvu, req->hdr.pcifunc)) + return CPT_AF_ERR_ACCESS_DENIED; + + cpt_rxc_time_cfg(rvu, req, blkaddr); + + return 0; +} + #define INPROG_INFLIGHT(reg) ((reg) & 0x1FF) #define INPROG_GRB_PARTIAL(reg) ((reg) & BIT_ULL(31)) #define INPROG_GRB(reg) (((reg) >> 32) & 0xFF) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index de3968d2e5ce..9bf8eaabf9ab 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -2017,7 +2017,7 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype)); break; case NPC_OUTER_VID: - seq_printf(s, "%d ", ntohs(rule->packet.vlan_tci)); + seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci)); seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.vlan_tci)); break; @@ -2160,7 +2160,7 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused) seq_printf(s, "\tmcam entry: %d\n", iter->entry); rvu_dbg_npc_mcam_show_flows(s, iter); - if (iter->intf == NIX_INTF_RX) { + if (is_npc_intf_rx(iter->intf)) { target = iter->rx_action.pf_func; pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; seq_printf(s, "\tForward to: PF%d ", pf); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 3d068b7d46bd..0a8bd667cb11 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -273,7 +273,8 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) pfvf->rx_chan_cnt = 1; pfvf->tx_chan_cnt = 1; rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, - pfvf->rx_chan_base, false); + pfvf->rx_chan_base, + pfvf->rx_chan_cnt, false); break; } @@ -3088,7 +3089,8 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf); else rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, - pfvf->rx_chan_base, allmulti); + pfvf->rx_chan_base, + pfvf->rx_chan_cnt, allmulti); return 0; } @@ -3635,9 +3637,7 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, if (err) return err; - rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); - - npc_mcam_disable_flows(rvu, pcifunc); + rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); return rvu_cgx_start_stop_io(rvu, pcifunc, false); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 0bd49c7080a6..0bc4529691ec 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -22,10 +22,6 @@ #define RSVD_MCAM_ENTRIES_PER_PF 2 /* Bcast & Promisc */ #define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */ -#define NIXLF_UCAST_ENTRY 0 -#define NIXLF_BCAST_ENTRY 1 -#define NIXLF_PROMISC_ENTRY 2 - #define NPC_PARSE_RESULT_DMAC_OFFSET 8 #define NPC_HW_TSTAMP_OFFSET 8 #define NPC_KEX_CHAN_MASK 0xFFFULL @@ -96,6 +92,10 @@ int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel) if (is_npc_intf_tx(intf)) return 0; + /* return in case of AF installed rules */ + if (is_pffunc_af(pcifunc)) + return 0; + if (is_afvf(pcifunc)) { end = rvu_get_num_lbk_chans(); if (end < 0) @@ -196,8 +196,8 @@ static int npc_get_ucast_mcam_index(struct npc_mcam *mcam, u16 pcifunc, return mcam->nixlf_offset + (max + nixlf) * RSVD_MCAM_ENTRIES_PER_NIXLF; } -static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, - u16 pcifunc, int nixlf, int type) +int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, + u16 pcifunc, int nixlf, int type) { int pf = rvu_get_pf(pcifunc); int index; @@ -230,8 +230,8 @@ int npc_get_bank(struct npc_mcam *mcam, int index) return bank; } -static bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, - int blkaddr, int index) +bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, int index) { int bank = npc_get_bank(mcam, index); u64 cfg; @@ -647,13 +647,17 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, } void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, - int nixlf, u64 chan, bool allmulti) + int nixlf, u64 chan, u8 chan_cnt, + bool allmulti) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct npc_install_flow_req req = { 0 }; + struct npc_install_flow_rsp rsp = { 0 }; struct npc_mcam *mcam = &rvu->hw->mcam; - int blkaddr, ucast_idx, index, kwi; - struct mcam_entry entry = { {0} }; - struct nix_rx_action action = { }; + int blkaddr, ucast_idx, index; + u8 mac_addr[ETH_ALEN] = { 0 }; + struct nix_rx_action action; + u64 relaxed_mask; /* Only PF or AF VF can add a promiscuous entry */ if ((pcifunc & RVU_PFVF_FUNC_MASK) && !is_afvf(pcifunc)) @@ -663,24 +667,15 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, if (blkaddr < 0) return; + *(u64 *)&action = 0x00; index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_PROMISC_ENTRY); - entry.kw[0] = chan; - entry.kw_mask[0] = 0xFFFULL; - - if (allmulti) { - kwi = NPC_KEXOF_DMAC / sizeof(u64); - entry.kw[kwi] = BIT_ULL(40); /* LSB bit of 1st byte in DMAC */ - entry.kw_mask[kwi] = BIT_ULL(40); - } - - ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, - nixlf, NIXLF_UCAST_ENTRY); - /* If the corresponding PF's ucast action is RSS, * use the same action for promisc also */ + ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_UCAST_ENTRY); if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx)) *(u64 *)&action = npc_get_mcam_action(rvu, mcam, blkaddr, ucast_idx); @@ -691,9 +686,36 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, action.pf_func = pcifunc; } - entry.action = *(u64 *)&action; - npc_config_mcam_entry(rvu, mcam, blkaddr, index, - pfvf->nix_rx_intf, &entry, true); + if (allmulti) { + mac_addr[0] = 0x01; /* LSB bit of 1st byte in DMAC */ + ether_addr_copy(req.packet.dmac, mac_addr); + ether_addr_copy(req.mask.dmac, mac_addr); + req.features = BIT_ULL(NPC_DMAC); + } + + req.chan_mask = 0xFFFU; + if (chan_cnt > 1) { + if (!is_power_of_2(chan_cnt)) { + dev_err(rvu->dev, + "%s: channel count more than 1, must be power of 2\n", __func__); + return; + } + relaxed_mask = GENMASK_ULL(BITS_PER_LONG_LONG - 1, + ilog2(chan_cnt)); + req.chan_mask &= relaxed_mask; + } + + req.channel = chan; + req.intf = pfvf->nix_rx_intf; + req.entry = index; + req.op = action.op; + req.hdr.pcifunc = 0; /* AF is requester */ + req.vf = pcifunc; + req.index = action.index; + req.match_id = action.match_id; + req.flow_key_alg = action.flow_key_alg; + + rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); } static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc, @@ -728,12 +750,14 @@ void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf) void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan) { + struct rvu_pfvf *pfvf; + struct npc_install_flow_req req = { 0 }; + struct npc_install_flow_rsp rsp = { 0 }; struct npc_mcam *mcam = &rvu->hw->mcam; - struct mcam_entry entry = { {0} }; struct rvu_hwinfo *hw = rvu->hw; - struct nix_rx_action action; - struct rvu_pfvf *pfvf; int blkaddr, index; + u32 req_index = 0; + u8 op; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) @@ -755,32 +779,29 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_BCAST_ENTRY); - /* Match ingress channel */ - entry.kw[0] = chan; - entry.kw_mask[0] = 0xfffull; - - /* Match broadcast MAC address. - * DMAC is extracted at 0th bit of PARSE_KEX::KW1 - */ - entry.kw[1] = 0xffffffffffffull; - entry.kw_mask[1] = 0xffffffffffffull; - - *(u64 *)&action = 0x00; if (!hw->cap.nix_rx_multicast) { /* Early silicon doesn't support pkt replication, * so install entry with UCAST action, so that PF * receives all broadcast packets. */ - action.op = NIX_RX_ACTIONOP_UCAST; - action.pf_func = pcifunc; + op = NIX_RX_ACTIONOP_UCAST; } else { - action.index = pfvf->bcast_mce_idx; - action.op = NIX_RX_ACTIONOP_MCAST; + op = NIX_RX_ACTIONOP_MCAST; + req_index = pfvf->bcast_mce_idx; } - entry.action = *(u64 *)&action; - npc_config_mcam_entry(rvu, mcam, blkaddr, index, - pfvf->nix_rx_intf, &entry, true); + eth_broadcast_addr((u8 *)&req.packet.dmac); + eth_broadcast_addr((u8 *)&req.mask.dmac); + req.features = BIT_ULL(NPC_DMAC); + req.channel = chan; + req.intf = pfvf->nix_rx_intf; + req.entry = index; + req.op = op; + req.hdr.pcifunc = 0; /* AF is requester */ + req.vf = pcifunc; + req.index = req_index; + + rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); } void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable) @@ -967,7 +988,7 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct npc_mcam *mcam = &rvu->hw->mcam; - struct rvu_npc_mcam_rule *rule; + struct rvu_npc_mcam_rule *rule, *tmp; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); @@ -977,15 +998,18 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) mutex_lock(&mcam->lock); /* Disable MCAM entries directing traffic to this 'pcifunc' */ - list_for_each_entry(rule, &mcam->mcam_rules, list) { + list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) { if (is_npc_intf_rx(rule->intf) && rule->rx_action.pf_func == pcifunc) { npc_enable_mcam_entry(rvu, mcam, blkaddr, rule->entry, false); rule->enable = false; /* Indicate that default rule is disabled */ - if (rule->default_rule) + if (rule->default_rule) { pfvf->def_ucast_rule = NULL; + list_del(&rule->list); + kfree(rule); + } } } @@ -1674,6 +1698,9 @@ void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc, static int npc_mcam_verify_entry(struct npc_mcam *mcam, u16 pcifunc, int entry) { + /* verify AF installed entries */ + if (is_pffunc_af(pcifunc)) + return 0; /* Verify if entry is valid and if it is indeed * allocated to the requesting PFFUNC. */ @@ -2268,6 +2295,10 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, goto exit; } + /* For AF installed rules, the nix_intf should be set to target NIX */ + if (is_pffunc_af(req->hdr.pcifunc)) + nix_intf = req->intf; + npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, nix_intf, &req->entry_data, req->enable_entry); @@ -2730,30 +2761,6 @@ int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req, return 0; } -bool rvu_npc_write_default_rule(struct rvu *rvu, int blkaddr, int nixlf, - u16 pcifunc, u8 intf, struct mcam_entry *entry, - int *index) -{ - struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); - struct npc_mcam *mcam = &rvu->hw->mcam; - bool enable; - u8 nix_intf; - - if (is_npc_intf_tx(intf)) - nix_intf = pfvf->nix_tx_intf; - else - nix_intf = pfvf->nix_rx_intf; - - *index = npc_get_nixlf_mcam_index(mcam, pcifunc, - nixlf, NIXLF_UCAST_ENTRY); - /* dont force enable unicast entry */ - enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, *index); - npc_config_mcam_entry(rvu, mcam, blkaddr, *index, nix_intf, - entry, enable); - - return enable; -} - int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu, struct msg_req *req, struct npc_mcam_read_base_rule_rsp *rsp) @@ -2799,3 +2806,42 @@ read_entry: out: return rc; } + +int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu, + struct npc_mcam_get_stats_req *req, + struct npc_mcam_get_stats_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 index, cntr; + int blkaddr; + u64 regval; + u32 bank; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + + index = req->entry & (mcam->banksize - 1); + bank = npc_get_bank(mcam, req->entry); + + /* read MCAM entry STAT_ACT register */ + regval = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank)); + + if (!(regval & BIT_ULL(9))) { + rsp->stat_ena = 0; + mutex_unlock(&mcam->lock); + return 0; + } + + cntr = regval & 0x1FF; + + rsp->stat_ena = 1; + rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(cntr)); + rsp->stat &= BIT_ULL(48) - 1; + + mutex_unlock(&mcam->lock); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index 4ba9d54ce4e3..7f35b62eea13 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -29,6 +29,8 @@ static const char * const npc_flow_names[] = { [NPC_IPPROTO_TCP] = "ip proto tcp", [NPC_IPPROTO_UDP] = "ip proto udp", [NPC_IPPROTO_SCTP] = "ip proto sctp", + [NPC_IPPROTO_ICMP] = "ip proto icmp", + [NPC_IPPROTO_ICMP6] = "ip proto icmp6", [NPC_IPPROTO_AH] = "ip proto AH", [NPC_IPPROTO_ESP] = "ip proto ESP", [NPC_SPORT_TCP] = "tcp source port", @@ -427,6 +429,7 @@ do { \ * packet header fields below. * Example: Source IP is 4 bytes and starts at 12th byte of IP header */ + NPC_SCAN_HDR(NPC_TOS, NPC_LID_LC, NPC_LT_LC_IP, 1, 1); NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4); NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4); NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16); @@ -477,9 +480,12 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) BIT_ULL(NPC_IPPROTO_SCTP); } - /* for AH, check if corresponding layer type is present in the key */ - if (npc_check_field(rvu, blkaddr, NPC_LD, intf)) + /* for AH/ICMP/ICMPv6/, check if corresponding layer type is present in the key */ + if (npc_check_field(rvu, blkaddr, NPC_LD, intf)) { *features |= BIT_ULL(NPC_IPPROTO_AH); + *features |= BIT_ULL(NPC_IPPROTO_ICMP); + *features |= BIT_ULL(NPC_IPPROTO_ICMP6); + } /* for ESP, check if corresponding layer type is present in the key */ if (npc_check_field(rvu, blkaddr, NPC_LE, intf)) @@ -597,7 +603,7 @@ static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf) dev_info(rvu->dev, "Unsupported flow(s):\n"); for_each_set_bit(bit, (unsigned long *)&unsupported, 64) dev_info(rvu->dev, "%s ", npc_get_field_name(bit)); - return -EOPNOTSUPP; + return NIX_AF_ERR_NPC_KEY_NOT_SUPP; } return 0; @@ -769,6 +775,12 @@ static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry, if (features & BIT_ULL(NPC_IPPROTO_SCTP)) npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP, 0, ~0ULL, 0, intf); + if (features & BIT_ULL(NPC_IPPROTO_ICMP)) + npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP, + 0, ~0ULL, 0, intf); + if (features & BIT_ULL(NPC_IPPROTO_ICMP6)) + npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP6, + 0, ~0ULL, 0, intf); if (features & BIT_ULL(NPC_OUTER_VID)) npc_update_entry(rvu, NPC_LB, entry, @@ -798,6 +810,7 @@ do { \ NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0); NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0, ntohs(mask->etype), 0); + NPC_WRITE_FLOW(NPC_TOS, tos, pkt->tos, 0, mask->tos, 0); NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0, ntohl(mask->ip4src), 0); NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0, @@ -903,9 +916,11 @@ static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, struct npc_install_flow_req *req, u16 target) { struct nix_rx_action action; + u64 chan_mask; - npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, - ~0ULL, 0, NIX_INTF_RX); + chan_mask = req->chan_mask ? req->chan_mask : ~0ULL; + npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, chan_mask, 0, + NIX_INTF_RX); *(u64 *)&action = 0x00; action.pf_func = target; @@ -998,33 +1013,21 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target, if (is_npc_intf_tx(req->intf)) goto find_rule; - if (def_ucast_rule) + if (req->default_rule) { + entry_index = npc_get_nixlf_mcam_index(mcam, target, nixlf, + NIXLF_UCAST_ENTRY); + enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, entry_index); + } + + /* update mcam entry with default unicast rule attributes */ + if (def_ucast_rule && (msg_from_vf || (req->default_rule && req->append))) { missing_features = (def_ucast_rule->features ^ features) & def_ucast_rule->features; - - if (req->default_rule && req->append) { - /* add to default rule */ if (missing_features) npc_update_flow(rvu, entry, missing_features, &def_ucast_rule->packet, &def_ucast_rule->mask, &dummy, req->intf); - enable = rvu_npc_write_default_rule(rvu, blkaddr, - nixlf, target, - pfvf->nix_rx_intf, entry, - &entry_index); - installed_features = req->features | missing_features; - } else if (req->default_rule && !req->append) { - /* overwrite default rule */ - enable = rvu_npc_write_default_rule(rvu, blkaddr, - nixlf, target, - pfvf->nix_rx_intf, entry, - &entry_index); - } else if (msg_from_vf) { - /* normal rule - include default rule also to it for VF */ - npc_update_flow(rvu, entry, missing_features, - &def_ucast_rule->packet, &def_ucast_rule->mask, - &dummy, req->intf); installed_features = req->features | missing_features; } @@ -1036,12 +1039,9 @@ find_rule: return -ENOMEM; new = true; } - /* no counter for default rule */ - if (req->default_rule) - goto update_rule; /* allocate new counter if rule has no counter */ - if (req->set_cntr && !rule->has_cntr) + if (!req->default_rule && req->set_cntr && !rule->has_cntr) rvu_mcam_add_counter_to_rule(rvu, owner, rule, rsp); /* if user wants to delete an existing counter for a rule then @@ -1051,7 +1051,14 @@ find_rule: rvu_mcam_remove_counter_from_rule(rvu, owner, rule); write_req.hdr.pcifunc = owner; - write_req.entry = req->entry; + + /* AF owns the default rules so change the owner just to relax + * the checks in rvu_mbox_handler_npc_mcam_write_entry + */ + if (req->default_rule) + write_req.hdr.pcifunc = 0; + + write_req.entry = entry_index; write_req.intf = req->intf; write_req.enable_entry = (u8)enable; /* if counter is available then clear and use it */ @@ -1069,7 +1076,7 @@ find_rule: kfree(rule); return err; } -update_rule: + /* update rule */ memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet)); memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask)); rule->entry = entry_index; @@ -1145,8 +1152,13 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, else target = req->hdr.pcifunc; - if (npc_check_unsupported_flows(rvu, req->features, req->intf)) - return -EOPNOTSUPP; + /* ignore chan_mask in case pf func is not AF, revisit later */ + if (!is_pffunc_af(req->hdr.pcifunc)) + req->chan_mask = 0xFFF; + + err = npc_check_unsupported_flows(rvu, req->features, req->intf); + if (err) + return err; if (npc_mcam_verify_channel(rvu, target, req->intf, req->channel)) return -EINVAL; @@ -1278,6 +1290,7 @@ static int npc_update_dmac_value(struct rvu *rvu, int npcblkaddr, write_req.hdr.pcifunc = rule->owner; write_req.entry = rule->entry; + write_req.intf = pfvf->nix_rx_intf; mutex_unlock(&mcam->lock); err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, &rsp); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 3e401fd8ac63..ac71c0f2f960 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -494,6 +494,27 @@ #define CPT_AF_RAS_INT_W1S (0x47028) #define CPT_AF_RAS_INT_ENA_W1S (0x47030) #define CPT_AF_RAS_INT_ENA_W1C (0x47038) +#define CPT_AF_CTX_FLUSH_TIMER (0x48000ull) +#define CPT_AF_CTX_ERR (0x48008ull) +#define CPT_AF_CTX_ENC_ID (0x48010ull) +#define CPT_AF_CTX_MIS_PC (0x49400ull) +#define CPT_AF_CTX_HIT_PC (0x49408ull) +#define CPT_AF_CTX_AOP_PC (0x49410ull) +#define CPT_AF_CTX_AOP_LATENCY_PC (0x49418ull) +#define CPT_AF_CTX_IFETCH_PC (0x49420ull) +#define CPT_AF_CTX_IFETCH_LATENCY_PC (0x49428ull) +#define CPT_AF_CTX_FFETCH_PC (0x49430ull) +#define CPT_AF_CTX_FFETCH_LATENCY_PC (0x49438ull) +#define CPT_AF_CTX_WBACK_PC (0x49440ull) +#define CPT_AF_CTX_WBACK_LATENCY_PC (0x49448ull) +#define CPT_AF_CTX_PSH_PC (0x49450ull) +#define CPT_AF_CTX_PSH_LATENCY_PC (0x49458ull) +#define CPT_AF_RXC_TIME (0x50010ull) +#define CPT_AF_RXC_TIME_CFG (0x50018ull) +#define CPT_AF_RXC_DFRG (0x50020ull) +#define CPT_AF_RXC_ACTIVE_STS (0x50028ull) +#define CPT_AF_RXC_ZOMBIE_STS (0x50030ull) +#define CPT_AF_X2PX_LINK_CFG(a) (0x51000ull | (u64)(a) << 3) #define AF_BAR2_ALIASX(a, b) (0x9100000ull | (a) << 12 | (b)) #define CPT_AF_BAR2_SEL 0x9000000 diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index 745aa8a19499..457c94793e63 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ - otx2_ptp.o otx2_flows.o cn10k.o + otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o rvu_nicvf-y := otx2_vf.o ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index a518c2283f18..45730d0d92f2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -18,6 +18,7 @@ #include <linux/ptp_clock_kernel.h> #include <linux/timecounter.h> #include <linux/soc/marvell/octeontx2/asm.h> +#include <net/pkt_cls.h> #include <mbox.h> #include <npc.h> @@ -264,6 +265,7 @@ struct otx2_flow_config { #define OTX2_MAX_NTUPLE_FLOWS 32 #define OTX2_MAX_UNICAST_FLOWS 8 #define OTX2_MAX_VLAN_FLOWS 1 +#define OTX2_MAX_TC_FLOWS OTX2_MAX_NTUPLE_FLOWS #define OTX2_MCAM_COUNT (OTX2_MAX_NTUPLE_FLOWS + \ OTX2_MAX_UNICAST_FLOWS + \ OTX2_MAX_VLAN_FLOWS) @@ -274,10 +276,20 @@ struct otx2_flow_config { #define OTX2_PER_VF_VLAN_FLOWS 2 /* rx+tx per VF */ #define OTX2_VF_VLAN_RX_INDEX 0 #define OTX2_VF_VLAN_TX_INDEX 1 + u32 tc_flower_offset; u32 ntuple_max_flows; + u32 tc_max_flows; struct list_head flow_list; }; +struct otx2_tc_info { + /* hash table to store TC offloaded flows */ + struct rhashtable flow_table; + struct rhashtable_params flow_ht_params; + DECLARE_BITMAP(tc_entries_bitmap, OTX2_MAX_TC_FLOWS); + unsigned long num_entries; +}; + struct dev_hw_ops { int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura); void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq, @@ -305,6 +317,8 @@ struct otx2_nic { #define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8) #define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9) #define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10) +#define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11) +#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12) u64 flags; struct otx2_qset qset; @@ -347,6 +361,7 @@ struct otx2_nic { struct hwtstamp_config tstamp; struct otx2_flow_config *flow_cfg; + struct otx2_tc_info tc_info; }; static inline bool is_otx2_lbkvf(struct pci_dev *pdev) @@ -802,4 +817,9 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac); int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); u16 otx2_get_max_mtu(struct otx2_nic *pfvf); +/* tc support */ +int otx2_init_tc(struct otx2_nic *nic); +void otx2_shutdown_tc(struct otx2_nic *nic); +int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data); #endif /* OTX2_COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index dc1778420978..0b4fa92ba821 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -57,10 +57,13 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf) flow_cfg->ntuple_max_flows = rsp->count; flow_cfg->ntuple_offset = 0; pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT; + flow_cfg->tc_max_flows = flow_cfg->ntuple_max_flows; + pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT; } else { flow_cfg->vf_vlan_offset = 0; flow_cfg->ntuple_offset = flow_cfg->vf_vlan_offset + vf_vlan_max_flows; + flow_cfg->tc_flower_offset = flow_cfg->ntuple_offset; flow_cfg->unicast_offset = flow_cfg->ntuple_offset + OTX2_MAX_NTUPLE_FLOWS; flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset + @@ -69,6 +72,7 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf) pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT; pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT; pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT; + pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT; } for (i = 0; i < rsp->count; i++) @@ -93,6 +97,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) INIT_LIST_HEAD(&pf->flow_cfg->flow_list); pf->flow_cfg->ntuple_max_flows = OTX2_MAX_NTUPLE_FLOWS; + pf->flow_cfg->tc_max_flows = pf->flow_cfg->ntuple_max_flows; err = otx2_alloc_mcam_entries(pf); if (err) @@ -303,6 +308,35 @@ static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp, sizeof(pmask->ip4dst)); req->features |= BIT_ULL(NPC_DIP_IPV4); } + if (ipv4_usr_mask->tos) { + pkt->tos = ipv4_usr_hdr->tos; + pmask->tos = ipv4_usr_mask->tos; + req->features |= BIT_ULL(NPC_TOS); + } + if (ipv4_usr_mask->proto) { + switch (ipv4_usr_hdr->proto) { + case IPPROTO_ICMP: + req->features |= BIT_ULL(NPC_IPPROTO_ICMP); + break; + case IPPROTO_TCP: + req->features |= BIT_ULL(NPC_IPPROTO_TCP); + break; + case IPPROTO_UDP: + req->features |= BIT_ULL(NPC_IPPROTO_UDP); + break; + case IPPROTO_SCTP: + req->features |= BIT_ULL(NPC_IPPROTO_SCTP); + break; + case IPPROTO_AH: + req->features |= BIT_ULL(NPC_IPPROTO_AH); + break; + case IPPROTO_ESP: + req->features |= BIT_ULL(NPC_IPPROTO_ESP); + break; + default: + return -EOPNOTSUPP; + } + } pkt->etype = cpu_to_be16(ETH_P_IP); pmask->etype = cpu_to_be16(0xFFFF); req->features |= BIT_ULL(NPC_ETYPE); @@ -327,6 +361,11 @@ static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp, sizeof(pmask->ip4dst)); req->features |= BIT_ULL(NPC_DIP_IPV4); } + if (ipv4_l4_mask->tos) { + pkt->tos = ipv4_l4_hdr->tos; + pmask->tos = ipv4_l4_mask->tos; + req->features |= BIT_ULL(NPC_TOS); + } if (ipv4_l4_mask->psrc) { memcpy(&pkt->sport, &ipv4_l4_hdr->psrc, sizeof(pkt->sport)); @@ -377,10 +416,14 @@ static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp, sizeof(pmask->ip4dst)); req->features |= BIT_ULL(NPC_DIP_IPV4); } + if (ah_esp_mask->tos) { + pkt->tos = ah_esp_hdr->tos; + pmask->tos = ah_esp_mask->tos; + req->features |= BIT_ULL(NPC_TOS); + } /* NPC profile doesn't extract AH/ESP header fields */ - if ((ah_esp_mask->spi & ah_esp_hdr->spi) || - (ah_esp_mask->tos & ah_esp_mask->tos)) + if (ah_esp_mask->spi & ah_esp_hdr->spi) return -EOPNOTSUPP; if (flow_type == AH_V4_FLOW) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 2fd3d235d292..03004fdac0c6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1765,6 +1765,24 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; } +static netdev_features_t otx2_fix_features(struct net_device *dev, + netdev_features_t features) +{ + /* check if n-tuple filters are ON */ + if ((features & NETIF_F_HW_TC) && (dev->features & NETIF_F_NTUPLE)) { + netdev_info(dev, "Disabling n-tuple filters\n"); + features &= ~NETIF_F_NTUPLE; + } + + /* check if tc hw offload is ON */ + if ((features & NETIF_F_NTUPLE) && (dev->features & NETIF_F_HW_TC)) { + netdev_info(dev, "Disabling TC hardware offload\n"); + features &= ~NETIF_F_HW_TC; + } + + return features; +} + static void otx2_set_rx_mode(struct net_device *netdev) { struct otx2_nic *pf = netdev_priv(netdev); @@ -1827,6 +1845,12 @@ static int otx2_set_features(struct net_device *netdev, if ((changed & NETIF_F_NTUPLE) && !ntuple) otx2_destroy_ntuple_flows(pf); + if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && + pf->tc_info.num_entries) { + netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n"); + return -EBUSY; + } + return 0; } @@ -2225,6 +2249,7 @@ static const struct net_device_ops otx2_netdev_ops = { .ndo_open = otx2_open, .ndo_stop = otx2_stop, .ndo_start_xmit = otx2_xmit, + .ndo_fix_features = otx2_fix_features, .ndo_set_mac_address = otx2_set_mac_address, .ndo_change_mtu = otx2_change_mtu, .ndo_set_rx_mode = otx2_set_rx_mode, @@ -2235,6 +2260,7 @@ static const struct net_device_ops otx2_netdev_ops = { .ndo_set_vf_mac = otx2_set_vf_mac, .ndo_set_vf_vlan = otx2_set_vf_vlan, .ndo_get_vf_config = otx2_get_vf_config, + .ndo_setup_tc = otx2_setup_tc, }; static int otx2_wq_init(struct otx2_nic *pf) @@ -2454,6 +2480,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) NETIF_F_HW_VLAN_STAG_RX; netdev->features |= netdev->hw_features; + /* HW supports tc offload but mutually exclusive with n-tuple filters */ + if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT) + netdev->hw_features |= NETIF_F_HW_TC; + netdev->gso_max_segs = OTX2_MAX_GSO_SEGS; netdev->watchdog_timeo = OTX2_TX_TIMEOUT; @@ -2475,6 +2505,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) otx2_set_ethtool_ops(netdev); + err = otx2_init_tc(pf); + if (err) + goto err_mcam_flow_del; + /* Enable link notifications */ otx2_cgx_config_linkevents(pf, true); @@ -2484,6 +2518,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; +err_mcam_flow_del: + otx2_mcam_flow_del(pf); err_unreg_netdev: unregister_netdev(netdev); err_del_mcam_entries: @@ -2651,6 +2687,7 @@ static void otx2_remove(struct pci_dev *pdev) otx2_ptp_destroy(pf); otx2_mcam_flow_del(pf); + otx2_shutdown_tc(pf); otx2_detach_resources(&pf->mbox); if (pf->hw.lmt_base) iounmap(pf->hw.lmt_base); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h index 21b811c6ee0f..f4fd72ee9a25 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h @@ -152,6 +152,7 @@ #define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16) #define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16) #define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16) +#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16) #define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16) #define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16) #define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c new file mode 100644 index 000000000000..51157b283f6f --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -0,0 +1,787 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Physcial Function ethernet driver + * + * Copyright (C) 2021 Marvell. + */ +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/inetdevice.h> +#include <linux/rhashtable.h> +#include <linux/bitfield.h> +#include <net/flow_dissector.h> +#include <net/pkt_cls.h> +#include <net/tc_act/tc_gact.h> +#include <net/tc_act/tc_mirred.h> +#include <net/tc_act/tc_vlan.h> +#include <net/ipv6.h> + +#include "otx2_common.h" + +/* Egress rate limiting definitions */ +#define MAX_BURST_EXPONENT 0x0FULL +#define MAX_BURST_MANTISSA 0xFFULL +#define MAX_BURST_SIZE 130816ULL +#define MAX_RATE_DIVIDER_EXPONENT 12ULL +#define MAX_RATE_EXPONENT 0x0FULL +#define MAX_RATE_MANTISSA 0xFFULL + +/* Bitfields in NIX_TLX_PIR register */ +#define TLX_RATE_MANTISSA GENMASK_ULL(8, 1) +#define TLX_RATE_EXPONENT GENMASK_ULL(12, 9) +#define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13) +#define TLX_BURST_MANTISSA GENMASK_ULL(36, 29) +#define TLX_BURST_EXPONENT GENMASK_ULL(40, 37) + +struct otx2_tc_flow_stats { + u64 bytes; + u64 pkts; + u64 used; +}; + +struct otx2_tc_flow { + struct rhash_head node; + unsigned long cookie; + u16 entry; + unsigned int bitpos; + struct rcu_head rcu; + struct otx2_tc_flow_stats stats; + spinlock_t lock; /* lock for stats */ +}; + +static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp, + u32 *burst_mantissa) +{ + unsigned int tmp; + + /* Burst is calculated as + * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256 + * Max supported burst size is 130,816 bytes. + */ + burst = min_t(u32, burst, MAX_BURST_SIZE); + if (burst) { + *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0; + tmp = burst - rounddown_pow_of_two(burst); + if (burst < MAX_BURST_MANTISSA) + *burst_mantissa = tmp * 2; + else + *burst_mantissa = tmp / (1ULL << (*burst_exp - 7)); + } else { + *burst_exp = MAX_BURST_EXPONENT; + *burst_mantissa = MAX_BURST_MANTISSA; + } +} + +static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp, + u32 *mantissa, u32 *div_exp) +{ + unsigned int tmp; + + /* Rate calculation by hardware + * + * PIR_ADD = ((256 + mantissa) << exp) / 256 + * rate = (2 * PIR_ADD) / ( 1 << div_exp) + * The resultant rate is in Mbps. + */ + + /* 2Mbps to 100Gbps can be expressed with div_exp = 0. + * Setting this to '0' will ease the calculation of + * exponent and mantissa. + */ + *div_exp = 0; + + if (maxrate) { + *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0; + tmp = maxrate - rounddown_pow_of_two(maxrate); + if (maxrate < MAX_RATE_MANTISSA) + *mantissa = tmp * 2; + else + *mantissa = tmp / (1ULL << (*exp - 7)); + } else { + /* Instead of disabling rate limiting, set all values to max */ + *exp = MAX_RATE_EXPONENT; + *mantissa = MAX_RATE_MANTISSA; + } +} + +static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate) +{ + struct otx2_hw *hw = &nic->hw; + struct nix_txschq_config *req; + u32 burst_exp, burst_mantissa; + u32 exp, mantissa, div_exp; + int txschq, err; + + /* All SQs share the same TL4, so pick the first scheduler */ + txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; + + /* Get exponent and mantissa values from the desired rate */ + otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa); + otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp); + + mutex_lock(&nic->mbox.lock); + req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox); + if (!req) { + mutex_unlock(&nic->mbox.lock); + return -ENOMEM; + } + + req->lvl = NIX_TXSCH_LVL_TL4; + req->num_regs = 1; + req->reg[0] = NIX_AF_TL4X_PIR(txschq); + req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) | + FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) | + FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | + FIELD_PREP(TLX_RATE_EXPONENT, exp) | + FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); + + err = otx2_sync_mbox_msg(&nic->mbox); + mutex_unlock(&nic->mbox.lock); + return err; +} + +static int otx2_tc_validate_flow(struct otx2_nic *nic, + struct flow_action *actions, + struct netlink_ext_ack *extack) +{ + if (nic->flags & OTX2_FLAG_INTF_DOWN) { + NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); + return -EINVAL; + } + + if (!flow_action_has_entries(actions)) { + NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action"); + return -EINVAL; + } + + if (!flow_offload_has_one_action(actions)) { + NL_SET_ERR_MSG_MOD(extack, + "Egress MATCHALL offload supports only 1 policing action"); + return -EINVAL; + } + return 0; +} + +static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, + struct tc_cls_matchall_offload *cls) +{ + struct netlink_ext_ack *extack = cls->common.extack; + struct flow_action *actions = &cls->rule->action; + struct flow_action_entry *entry; + u32 rate; + int err; + + err = otx2_tc_validate_flow(nic, actions, extack); + if (err) + return err; + + if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) { + NL_SET_ERR_MSG_MOD(extack, + "Only one Egress MATCHALL ratelimiter can be offloaded"); + return -ENOMEM; + } + + entry = &cls->rule->action.entries[0]; + switch (entry->id) { + case FLOW_ACTION_POLICE: + if (entry->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); + return -EOPNOTSUPP; + } + /* Convert bytes per second to Mbps */ + rate = entry->police.rate_bytes_ps * 8; + rate = max_t(u32, rate / 1000000, 1); + err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate); + if (err) + return err; + nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; + break; + default: + NL_SET_ERR_MSG_MOD(extack, + "Only police action is supported with Egress MATCHALL offload"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic, + struct tc_cls_matchall_offload *cls) +{ + struct netlink_ext_ack *extack = cls->common.extack; + int err; + + if (nic->flags & OTX2_FLAG_INTF_DOWN) { + NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); + return -EINVAL; + } + + err = otx2_set_matchall_egress_rate(nic, 0, 0); + nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; + return err; +} + +static int otx2_tc_parse_actions(struct otx2_nic *nic, + struct flow_action *flow_action, + struct npc_install_flow_req *req) +{ + struct flow_action_entry *act; + struct net_device *target; + struct otx2_nic *priv; + int i; + + if (!flow_action_has_entries(flow_action)) { + netdev_info(nic->netdev, "no tc actions specified"); + return -EINVAL; + } + + flow_action_for_each(i, act, flow_action) { + switch (act->id) { + case FLOW_ACTION_DROP: + req->op = NIX_RX_ACTIONOP_DROP; + return 0; + case FLOW_ACTION_ACCEPT: + req->op = NIX_RX_ACTION_DEFAULT; + return 0; + case FLOW_ACTION_REDIRECT_INGRESS: + target = act->dev; + priv = netdev_priv(target); + /* npc_install_flow_req doesn't support passing a target pcifunc */ + if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { + netdev_info(nic->netdev, + "can't redirect to other pf/vf\n"); + return -EOPNOTSUPP; + } + req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; + req->op = NIX_RX_ACTION_DEFAULT; + return 0; + case FLOW_ACTION_VLAN_POP: + req->vtag0_valid = true; + /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */ + req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; + break; + default: + return -EOPNOTSUPP; + } + } + + return 0; +} + +static int otx2_tc_prepare_flow(struct otx2_nic *nic, + struct flow_cls_offload *f, + struct npc_install_flow_req *req) +{ + struct flow_msg *flow_spec = &req->packet; + struct flow_msg *flow_mask = &req->mask; + struct flow_dissector *dissector; + struct flow_rule *rule; + u8 ip_proto = 0; + + rule = flow_cls_offload_flow_rule(f); + dissector = rule->match.dissector; + + if ((dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_VLAN) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_PORTS) | + BIT(FLOW_DISSECTOR_KEY_IP)))) { + netdev_info(nic->netdev, "unsupported flow used key 0x%x", + dissector->used_keys); + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + + /* All EtherTypes can be matched, no hw limitation */ + flow_spec->etype = match.key->n_proto; + flow_mask->etype = match.mask->n_proto; + req->features |= BIT_ULL(NPC_ETYPE); + + if (match.mask->ip_proto && + (match.key->ip_proto != IPPROTO_TCP && + match.key->ip_proto != IPPROTO_UDP && + match.key->ip_proto != IPPROTO_SCTP && + match.key->ip_proto != IPPROTO_ICMP && + match.key->ip_proto != IPPROTO_ICMPV6)) { + netdev_info(nic->netdev, + "ip_proto=0x%x not supported\n", + match.key->ip_proto); + return -EOPNOTSUPP; + } + if (match.mask->ip_proto) + ip_proto = match.key->ip_proto; + + if (ip_proto == IPPROTO_UDP) + req->features |= BIT_ULL(NPC_IPPROTO_UDP); + else if (ip_proto == IPPROTO_TCP) + req->features |= BIT_ULL(NPC_IPPROTO_TCP); + else if (ip_proto == IPPROTO_SCTP) + req->features |= BIT_ULL(NPC_IPPROTO_SCTP); + else if (ip_proto == IPPROTO_ICMP) + req->features |= BIT_ULL(NPC_IPPROTO_ICMP); + else if (ip_proto == IPPROTO_ICMPV6) + req->features |= BIT_ULL(NPC_IPPROTO_ICMP6); + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + if (!is_zero_ether_addr(match.mask->src)) { + netdev_err(nic->netdev, "src mac match not supported\n"); + return -EOPNOTSUPP; + } + + if (!is_zero_ether_addr(match.mask->dst)) { + ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst); + ether_addr_copy(flow_mask->dmac, + (u8 *)&match.mask->dst); + req->features |= BIT_ULL(NPC_DMAC); + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { + struct flow_match_ip match; + + flow_rule_match_ip(rule, &match); + if ((ntohs(flow_spec->etype) != ETH_P_IP) && + match.mask->tos) { + netdev_err(nic->netdev, "tos not supported\n"); + return -EOPNOTSUPP; + } + if (match.mask->ttl) { + netdev_err(nic->netdev, "ttl not supported\n"); + return -EOPNOTSUPP; + } + flow_spec->tos = match.key->tos; + flow_mask->tos = match.mask->tos; + req->features |= BIT_ULL(NPC_TOS); + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + u16 vlan_tci, vlan_tci_mask; + + flow_rule_match_vlan(rule, &match); + + if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) { + netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n", + ntohs(match.key->vlan_tpid)); + return -EOPNOTSUPP; + } + + if (match.mask->vlan_id || + match.mask->vlan_dei || + match.mask->vlan_priority) { + vlan_tci = match.key->vlan_id | + match.key->vlan_dei << 12 | + match.key->vlan_priority << 13; + + vlan_tci_mask = match.mask->vlan_id | + match.key->vlan_dei << 12 | + match.key->vlan_priority << 13; + + flow_spec->vlan_tci = htons(vlan_tci); + flow_mask->vlan_tci = htons(vlan_tci_mask); + req->features |= BIT_ULL(NPC_OUTER_VID); + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(rule, &match); + + flow_spec->ip4dst = match.key->dst; + flow_mask->ip4dst = match.mask->dst; + req->features |= BIT_ULL(NPC_DIP_IPV4); + + flow_spec->ip4src = match.key->src; + flow_mask->ip4src = match.mask->src; + req->features |= BIT_ULL(NPC_SIP_IPV4); + } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + struct flow_match_ipv6_addrs match; + + flow_rule_match_ipv6_addrs(rule, &match); + + if (ipv6_addr_loopback(&match.key->dst) || + ipv6_addr_loopback(&match.key->src)) { + netdev_err(nic->netdev, + "Flow matching on IPv6 loopback addr is not supported\n"); + return -EOPNOTSUPP; + } + + if (!ipv6_addr_any(&match.mask->dst)) { + memcpy(&flow_spec->ip6dst, + (struct in6_addr *)&match.key->dst, + sizeof(flow_spec->ip6dst)); + memcpy(&flow_mask->ip6dst, + (struct in6_addr *)&match.mask->dst, + sizeof(flow_spec->ip6dst)); + req->features |= BIT_ULL(NPC_DIP_IPV6); + } + + if (!ipv6_addr_any(&match.mask->src)) { + memcpy(&flow_spec->ip6src, + (struct in6_addr *)&match.key->src, + sizeof(flow_spec->ip6src)); + memcpy(&flow_mask->ip6src, + (struct in6_addr *)&match.mask->src, + sizeof(flow_spec->ip6src)); + req->features |= BIT_ULL(NPC_SIP_IPV6); + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_ports(rule, &match); + + flow_spec->dport = match.key->dst; + flow_mask->dport = match.mask->dst; + if (ip_proto == IPPROTO_UDP) + req->features |= BIT_ULL(NPC_DPORT_UDP); + else if (ip_proto == IPPROTO_TCP) + req->features |= BIT_ULL(NPC_DPORT_TCP); + else if (ip_proto == IPPROTO_SCTP) + req->features |= BIT_ULL(NPC_DPORT_SCTP); + + flow_spec->sport = match.key->src; + flow_mask->sport = match.mask->src; + if (ip_proto == IPPROTO_UDP) + req->features |= BIT_ULL(NPC_SPORT_UDP); + else if (ip_proto == IPPROTO_TCP) + req->features |= BIT_ULL(NPC_SPORT_TCP); + else if (ip_proto == IPPROTO_SCTP) + req->features |= BIT_ULL(NPC_SPORT_SCTP); + } + + return otx2_tc_parse_actions(nic, &rule->action, req); +} + +static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry) +{ + struct npc_delete_flow_req *req; + int err; + + mutex_lock(&nic->mbox.lock); + req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox); + if (!req) { + mutex_unlock(&nic->mbox.lock); + return -ENOMEM; + } + + req->entry = entry; + + /* Send message to AF */ + err = otx2_sync_mbox_msg(&nic->mbox); + if (err) { + netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n", + entry); + mutex_unlock(&nic->mbox.lock); + return -EFAULT; + } + mutex_unlock(&nic->mbox.lock); + + return 0; +} + +static int otx2_tc_del_flow(struct otx2_nic *nic, + struct flow_cls_offload *tc_flow_cmd) +{ + struct otx2_tc_info *tc_info = &nic->tc_info; + struct otx2_tc_flow *flow_node; + + flow_node = rhashtable_lookup_fast(&tc_info->flow_table, + &tc_flow_cmd->cookie, + tc_info->flow_ht_params); + if (!flow_node) { + netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n", + tc_flow_cmd->cookie); + return -EINVAL; + } + + otx2_del_mcam_flow_entry(nic, flow_node->entry); + + WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table, + &flow_node->node, + nic->tc_info.flow_ht_params)); + kfree_rcu(flow_node, rcu); + + clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap); + tc_info->num_entries--; + + return 0; +} + +static int otx2_tc_add_flow(struct otx2_nic *nic, + struct flow_cls_offload *tc_flow_cmd) +{ + struct otx2_tc_info *tc_info = &nic->tc_info; + struct otx2_tc_flow *new_node, *old_node; + struct npc_install_flow_req *req; + int rc; + + if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) + return -ENOMEM; + + /* allocate memory for the new flow and it's node */ + new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); + if (!new_node) + return -ENOMEM; + spin_lock_init(&new_node->lock); + new_node->cookie = tc_flow_cmd->cookie; + + mutex_lock(&nic->mbox.lock); + req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); + if (!req) { + mutex_unlock(&nic->mbox.lock); + return -ENOMEM; + } + + rc = otx2_tc_prepare_flow(nic, tc_flow_cmd, req); + if (rc) { + otx2_mbox_reset(&nic->mbox.mbox, 0); + mutex_unlock(&nic->mbox.lock); + return rc; + } + + /* If a flow exists with the same cookie, delete it */ + old_node = rhashtable_lookup_fast(&tc_info->flow_table, + &tc_flow_cmd->cookie, + tc_info->flow_ht_params); + if (old_node) + otx2_tc_del_flow(nic, tc_flow_cmd); + + if (bitmap_full(tc_info->tc_entries_bitmap, nic->flow_cfg->tc_max_flows)) { + netdev_err(nic->netdev, "Not enough MCAM space to add the flow\n"); + otx2_mbox_reset(&nic->mbox.mbox, 0); + mutex_unlock(&nic->mbox.lock); + return -ENOMEM; + } + + new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap, + nic->flow_cfg->tc_max_flows); + req->channel = nic->hw.rx_chan_base; + req->entry = nic->flow_cfg->entry[nic->flow_cfg->tc_flower_offset + + nic->flow_cfg->tc_max_flows - new_node->bitpos]; + req->intf = NIX_INTF_RX; + req->set_cntr = 1; + new_node->entry = req->entry; + + /* Send message to AF */ + rc = otx2_sync_mbox_msg(&nic->mbox); + if (rc) { + netdev_err(nic->netdev, "Failed to install MCAM flow entry\n"); + mutex_unlock(&nic->mbox.lock); + goto out; + } + mutex_unlock(&nic->mbox.lock); + + /* add new flow to flow-table */ + rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node, + nic->tc_info.flow_ht_params); + if (rc) { + otx2_del_mcam_flow_entry(nic, req->entry); + kfree_rcu(new_node, rcu); + goto out; + } + + set_bit(new_node->bitpos, tc_info->tc_entries_bitmap); + tc_info->num_entries++; +out: + return rc; +} + +static int otx2_tc_get_flow_stats(struct otx2_nic *nic, + struct flow_cls_offload *tc_flow_cmd) +{ + struct otx2_tc_info *tc_info = &nic->tc_info; + struct npc_mcam_get_stats_req *req; + struct npc_mcam_get_stats_rsp *rsp; + struct otx2_tc_flow_stats *stats; + struct otx2_tc_flow *flow_node; + int err; + + flow_node = rhashtable_lookup_fast(&tc_info->flow_table, + &tc_flow_cmd->cookie, + tc_info->flow_ht_params); + if (!flow_node) { + netdev_info(nic->netdev, "tc flow not found for cookie %lx", + tc_flow_cmd->cookie); + return -EINVAL; + } + + mutex_lock(&nic->mbox.lock); + + req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox); + if (!req) { + mutex_unlock(&nic->mbox.lock); + return -ENOMEM; + } + + req->entry = flow_node->entry; + + err = otx2_sync_mbox_msg(&nic->mbox); + if (err) { + netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n", + req->entry); + mutex_unlock(&nic->mbox.lock); + return -EFAULT; + } + + rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp + (&nic->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + mutex_unlock(&nic->mbox.lock); + return PTR_ERR(rsp); + } + + mutex_unlock(&nic->mbox.lock); + + if (!rsp->stat_ena) + return -EINVAL; + + stats = &flow_node->stats; + + spin_lock(&flow_node->lock); + flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0, + FLOW_ACTION_HW_STATS_IMMEDIATE); + stats->pkts = rsp->stat; + spin_unlock(&flow_node->lock); + + return 0; +} + +static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, + struct flow_cls_offload *cls_flower) +{ + switch (cls_flower->command) { + case FLOW_CLS_REPLACE: + return otx2_tc_add_flow(nic, cls_flower); + case FLOW_CLS_DESTROY: + return otx2_tc_del_flow(nic, cls_flower); + case FLOW_CLS_STATS: + return otx2_tc_get_flow_stats(nic, cls_flower); + default: + return -EOPNOTSUPP; + } +} + +static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct otx2_nic *nic = cb_priv; + + if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return otx2_setup_tc_cls_flower(nic, type_data); + default: + break; + } + + return -EOPNOTSUPP; +} + +static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic, + struct tc_cls_matchall_offload *cls_matchall) +{ + switch (cls_matchall->command) { + case TC_CLSMATCHALL_REPLACE: + return otx2_tc_egress_matchall_install(nic, cls_matchall); + case TC_CLSMATCHALL_DESTROY: + return otx2_tc_egress_matchall_delete(nic, cls_matchall); + case TC_CLSMATCHALL_STATS: + default: + break; + } + + return -EOPNOTSUPP; +} + +static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct otx2_nic *nic = cb_priv; + + if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSMATCHALL: + return otx2_setup_tc_egress_matchall(nic, type_data); + default: + break; + } + + return -EOPNOTSUPP; +} + +static LIST_HEAD(otx2_block_cb_list); + +static int otx2_setup_tc_block(struct net_device *netdev, + struct flow_block_offload *f) +{ + struct otx2_nic *nic = netdev_priv(netdev); + flow_setup_cb_t *cb; + bool ingress; + + if (f->block_shared) + return -EOPNOTSUPP; + + if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { + cb = otx2_setup_tc_block_ingress_cb; + ingress = true; + } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { + cb = otx2_setup_tc_block_egress_cb; + ingress = false; + } else { + return -EOPNOTSUPP; + } + + return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb, + nic, nic, ingress); +} + +int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_BLOCK: + return otx2_setup_tc_block(netdev, type_data); + default: + return -EOPNOTSUPP; + } +} + +static const struct rhashtable_params tc_flow_ht_params = { + .head_offset = offsetof(struct otx2_tc_flow, node), + .key_offset = offsetof(struct otx2_tc_flow, cookie), + .key_len = sizeof(((struct otx2_tc_flow *)0)->cookie), + .automatic_shrinking = true, +}; + +int otx2_init_tc(struct otx2_nic *nic) +{ + struct otx2_tc_info *tc = &nic->tc_info; + + tc->flow_ht_params = tc_flow_ht_params; + return rhashtable_init(&tc->flow_table, &tc->flow_ht_params); +} + +void otx2_shutdown_tc(struct otx2_nic *nic) +{ + struct otx2_tc_info *tc = &nic->tc_info; + + rhashtable_destroy(&tc->flow_table); +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index 25dd903a3e92..2768c78528a5 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -431,7 +431,8 @@ static void prestera_port_handle_event(struct prestera_switch *sw, netif_carrier_on(port->dev); if (!delayed_work_pending(caching_dw)) queue_delayed_work(prestera_wq, caching_dw, 0); - } else { + } else if (netif_running(port->dev) && + netif_carrier_ok(port->dev)) { netif_carrier_off(port->dev); if (delayed_work_pending(caching_dw)) cancel_delayed_work(caching_dw); @@ -456,20 +457,17 @@ static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw) { struct device_node *base_mac_np; struct device_node *np; - const char *base_mac; + int ret; np = of_find_compatible_node(NULL, NULL, "marvell,prestera"); base_mac_np = of_parse_phandle(np, "base-mac-provider", 0); - base_mac = of_get_mac_address(base_mac_np); - of_node_put(base_mac_np); - if (!IS_ERR(base_mac)) - ether_addr_copy(sw->base_mac, base_mac); - - if (!is_valid_ether_addr(sw->base_mac)) { + ret = of_get_mac_address(base_mac_np, sw->base_mac); + if (ret) { eth_random_addr(sw->base_mac); dev_info(prestera_dev(sw), "using random base mac address\n"); } + of_node_put(base_mac_np); return prestera_hw_switch_mac_set(sw, sw->base_mac); } diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c index be5677623455..298110119272 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c @@ -756,6 +756,7 @@ static void prestera_pci_remove(struct pci_dev *pdev) static const struct pci_device_id prestera_pci_devices[] = { { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC804) }, + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC80C) }, { } }; MODULE_DEVICE_TABLE(pci, prestera_pci_devices); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c index 49e052273f30..cb564890a3dc 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c @@ -798,7 +798,7 @@ static void prestera_fdb_event_work(struct work_struct *work) switch (swdev_work->event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: fdb_info = &swdev_work->fdb_info; - if (!fdb_info->added_by_user) + if (!fdb_info->added_by_user || fdb_info->is_local) break; err = prestera_port_fdb_set(port, fdb_info, true); diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 3712e1786091..e967867828d8 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1392,7 +1392,6 @@ static int pxa168_eth_probe(struct platform_device *pdev) struct resource *res; struct clk *clk; struct device_node *np; - const unsigned char *mac_addr = NULL; int err; printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n"); @@ -1435,12 +1434,8 @@ static int pxa168_eth_probe(struct platform_device *pdev) INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task); - if (pdev->dev.of_node) - mac_addr = of_get_mac_address(pdev->dev.of_node); - - if (!IS_ERR_OR_NULL(mac_addr)) { - ether_addr_copy(dev->dev_addr, mac_addr); - } else { + err = of_get_mac_address(pdev->dev.of_node, dev->dev_addr); + if (err) { /* try reading the mac address, if set by the bootloader */ pxa168_eth_get_mac_address(dev, dev->dev_addr); if (!is_valid_ether_addr(dev->dev_addr)) { diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 8a9c0f490bfb..d4bb27ba1419 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -1617,7 +1617,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port) xm_write16(hw, port, XM_TX_THR, 512); /* - * Enable the reception of all error frames. This is is + * Enable the reception of all error frames. This is * a necessary evil due to the design of the XMAC. The * XMAC's receive FIFO is only 8K in size, however jumbo * frames can be up to 9000 bytes in length. When bad @@ -2959,8 +2959,9 @@ static void genesis_set_multicast(struct net_device *dev) static void yukon_add_filter(u8 filter[8], const u8 *addr) { - u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f; - filter[bit/8] |= 1 << (bit%8); + u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f; + + filter[bit / 8] |= 1 << (bit % 8); } static void yukon_set_multicast(struct net_device *dev) @@ -3849,7 +3850,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, /* Only used for Genesis XMAC */ if (is_genesis(hw)) - timer_setup(&skge->link_timer, xm_link_timer, 0); + timer_setup(&skge->link_timer, xm_link_timer, 0); else { dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_RXCSUM; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index dbec8e187a68..222c32367b2c 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -55,7 +55,8 @@ #define RX_DEF_PENDING RX_MAX_PENDING /* This is the worst case number of transmit list elements for a single skb: - VLAN:GSO + CKSUM + Data + skb_frags * DMA */ + * VLAN:GSO + CKSUM + Data + skb_frags * DMA + */ #define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1)) #define TX_MIN_PENDING (MAX_SKB_TX_LE+1) #define TX_MAX_PENDING 1024 @@ -1529,7 +1530,8 @@ static void sky2_rx_start(struct sky2_port *sky2) sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX); /* These chips have no ram buffer? - * MAC Rx RAM Read is controlled by hardware */ + * MAC Rx RAM Read is controlled by hardware + */ if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev > CHIP_REV_YU_EC_U_A0) sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS); @@ -4135,7 +4137,7 @@ static int sky2_set_coalesce(struct net_device *dev, /* * Hardware is limited to min of 128 and max of 2048 for ring size * and rounded up to next power of two - * to avoid division in modulus calclation + * to avoid division in modulus calculation */ static unsigned long roundup_ring_size(unsigned long pending) { @@ -4684,7 +4686,8 @@ static __exit void sky2_debug_cleanup(void) #endif /* Two copies of network device operations to handle special case of - not allowing netpoll on second port */ + * not allowing netpoll on second port + */ static const struct net_device_ops sky2_netdev_ops[2] = { { .ndo_open = sky2_open, @@ -4725,7 +4728,7 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port, { struct sky2_port *sky2; struct net_device *dev = alloc_etherdev(sizeof(*sky2)); - const void *iap; + int ret; if (!dev) return NULL; @@ -4795,10 +4798,8 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port, * 1) from device tree data * 2) from internal registers set by bootloader */ - iap = of_get_mac_address(hw->pdev->dev.of_node); - if (!IS_ERR(iap)) - ether_addr_copy(dev->dev_addr, iap); - else + ret = of_get_mac_address(hw->pdev->dev.of_node, dev->dev_addr); + if (ret) memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig index 3362b148de23..c357c193378e 100644 --- a/drivers/net/ethernet/mediatek/Kconfig +++ b/drivers/net/ethernet/mediatek/Kconfig @@ -9,7 +9,9 @@ if NET_VENDOR_MEDIATEK config NET_MEDIATEK_SOC tristate "MediaTek SoC Gigabit Ethernet support" + depends on NET_DSA || !NET_DSA select PHYLINK + select DIMLIB help This driver supports the gigabit ethernet MACs in the MediaTek SoC family. diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile index 3a777b4a6cd3..79d4cdbbcbf5 100644 --- a/drivers/net/ethernet/mediatek/Makefile +++ b/drivers/net/ethernet/mediatek/Makefile @@ -4,5 +4,5 @@ # obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o -mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o +mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 01d3ee4b5829..ed4eacef17ce 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -19,6 +19,8 @@ #include <linux/interrupt.h> #include <linux/pinctrl/devinfo.h> #include <linux/phylink.h> +#include <linux/jhash.h> +#include <net/dsa.h> #include "mtk_eth_soc.h" @@ -85,7 +87,7 @@ static int mtk_mdio_busy_wait(struct mtk_eth *eth) return 0; if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT)) break; - usleep_range(10, 20); + cond_resched(); } dev_err(eth->dev, "mdio: MDIO timeout\n"); @@ -776,13 +778,18 @@ static inline int mtk_max_buf_size(int frag_size) return buf_size; } -static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd, +static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd, struct mtk_rx_dma *dma_rxd) { - rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); rxd->rxd2 = READ_ONCE(dma_rxd->rxd2); + if (!(rxd->rxd2 & RX_DMA_DONE)) + return false; + + rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); rxd->rxd3 = READ_ONCE(dma_rxd->rxd3); rxd->rxd4 = READ_ONCE(dma_rxd->rxd4); + + return true; } /* the qdma core needs scratch memory to be setup */ @@ -857,7 +864,8 @@ static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma) return ((void *)dma - (void *)ring->dma) / sizeof(*dma); } -static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf) +static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, + bool napi) { if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { @@ -889,8 +897,12 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf) tx_buf->flags = 0; if (tx_buf->skb && - (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) - dev_kfree_skb_any(tx_buf->skb); + (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) { + if (napi) + napi_consume_skb(tx_buf->skb, napi); + else + dev_kfree_skb_any(tx_buf->skb); + } tx_buf->skb = NULL; } @@ -1068,7 +1080,7 @@ err_dma: tx_buf = mtk_desc_to_tx_buf(ring, itxd); /* unmap dma */ - mtk_tx_unmap(eth, tx_buf); + mtk_tx_unmap(eth, tx_buf, false); itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) @@ -1125,17 +1137,6 @@ static void mtk_wake_queue(struct mtk_eth *eth) } } -static void mtk_stop_queue(struct mtk_eth *eth) -{ - int i; - - for (i = 0; i < MTK_MAC_COUNT; i++) { - if (!eth->netdev[i]) - continue; - netif_stop_queue(eth->netdev[i]); - } -} - static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct mtk_mac *mac = netdev_priv(dev); @@ -1156,7 +1157,7 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_num = mtk_cal_txd_req(skb); if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { - mtk_stop_queue(eth); + netif_stop_queue(dev); netif_err(eth, tx_queued, dev, "Tx Ring full when queue awake!\n"); spin_unlock(ð->page_lock); @@ -1182,7 +1183,7 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) goto drop; if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) - mtk_stop_queue(eth); + netif_stop_queue(dev); spin_unlock(ð->page_lock); @@ -1238,17 +1239,19 @@ static void mtk_update_rx_cpu_idx(struct mtk_eth *eth) static int mtk_poll_rx(struct napi_struct *napi, int budget, struct mtk_eth *eth) { + struct dim_sample dim_sample = {}; struct mtk_rx_ring *ring; int idx; struct sk_buff *skb; u8 *data, *new_data; struct mtk_rx_dma *rxd, trxd; - int done = 0; + int done = 0, bytes = 0; while (done < budget) { struct net_device *netdev; unsigned int pktlen; dma_addr_t dma_addr; + u32 hash; int mac; ring = mtk_get_rx_ring(eth); @@ -1259,18 +1262,16 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, rxd = &ring->dma[idx]; data = ring->data[idx]; - mtk_rx_get_desc(&trxd, rxd); - if (!(trxd.rxd2 & RX_DMA_DONE)) + if (!mtk_rx_get_desc(&trxd, rxd)) break; /* find out which mac the packet come from. values start at 1 */ - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) || + (trxd.rxd4 & RX_DMA_SPECIAL_TAG)) mac = 0; - } else { - mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & - RX_DMA_FPORT_MASK; - mac--; - } + else + mac = ((trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & + RX_DMA_FPORT_MASK) - 1; if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT || !eth->netdev[mac])) @@ -1298,17 +1299,18 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, goto release_desc; } + dma_unmap_single(eth->dev, trxd.rxd1, + ring->buf_size, DMA_FROM_DEVICE); + /* receive data */ skb = build_skb(data, ring->frag_size); if (unlikely(!skb)) { - skb_free_frag(new_data); + skb_free_frag(data); netdev->stats.rx_dropped++; - goto release_desc; + goto skip_rx; } skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); - dma_unmap_single(eth->dev, trxd.rxd1, - ring->buf_size, DMA_FROM_DEVICE); pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); skb->dev = netdev; skb_put(skb, pktlen); @@ -1317,14 +1319,22 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, else skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, netdev); + bytes += pktlen; + + hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY; + if (hash != MTK_RXD4_FOE_ENTRY) { + hash = jhash_1word(hash, 0); + skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); + } if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX && - RX_DMA_VID(trxd.rxd3)) + (trxd.rxd2 & RX_DMA_VTAG)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), RX_DMA_VID(trxd.rxd3)); skb_record_rx_queue(skb, 0); napi_gro_receive(napi, skb); +skip_rx: ring->data[idx] = new_data; rxd->rxd1 = (unsigned int)dma_addr; @@ -1348,6 +1358,12 @@ rx_done: mtk_update_rx_cpu_idx(eth); } + eth->rx_packets += done; + eth->rx_bytes += bytes; + dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes, + &dim_sample); + net_dim(ð->rx_dim, dim_sample); + return done; } @@ -1360,7 +1376,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, struct mtk_tx_buf *tx_buf; u32 cpu, dma; - cpu = mtk_r32(eth, MTK_QTX_CRX_PTR); + cpu = ring->last_free_ptr; dma = mtk_r32(eth, MTK_QTX_DRX_PTR); desc = mtk_qdma_phys_to_virt(ring, cpu); @@ -1386,7 +1402,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, done[mac]++; budget--; } - mtk_tx_unmap(eth, tx_buf); + mtk_tx_unmap(eth, tx_buf, true); ring->last_free = desc; atomic_inc(&ring->free_count); @@ -1394,6 +1410,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, cpu = next_cpu; } + ring->last_free_ptr = cpu; mtk_w32(eth, cpu, MTK_QTX_CRX_PTR); return budget; @@ -1423,7 +1440,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, budget--; } - mtk_tx_unmap(eth, tx_buf); + mtk_tx_unmap(eth, tx_buf, true); desc = &ring->dma[cpu]; ring->last_free = desc; @@ -1440,6 +1457,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, static int mtk_poll_tx(struct mtk_eth *eth, int budget) { struct mtk_tx_ring *ring = ð->tx_ring; + struct dim_sample dim_sample = {}; unsigned int done[MTK_MAX_DEVS]; unsigned int bytes[MTK_MAX_DEVS]; int total = 0, i; @@ -1457,8 +1475,14 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget) continue; netdev_completed_queue(eth->netdev[i], done[i], bytes[i]); total += done[i]; + eth->tx_packets += done[i]; + eth->tx_bytes += bytes[i]; } + dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes, + &dim_sample); + net_dim(ð->tx_dim, dim_sample); + if (mtk_queue_stopped(eth) && (atomic_read(&ring->free_count) > ring->thresh)) mtk_wake_queue(eth); @@ -1480,7 +1504,6 @@ static void mtk_handle_status_irq(struct mtk_eth *eth) static int mtk_napi_tx(struct napi_struct *napi, int budget) { struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi); - u32 status, mask; int tx_done = 0; if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) @@ -1489,22 +1512,20 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget) tx_done = mtk_poll_tx(eth, budget); if (unlikely(netif_msg_intr(eth))) { - status = mtk_r32(eth, eth->tx_int_status_reg); - mask = mtk_r32(eth, eth->tx_int_mask_reg); dev_info(eth->dev, - "done tx %d, intr 0x%08x/0x%x\n", - tx_done, status, mask); + "done tx %d, intr 0x%08x/0x%x\n", tx_done, + mtk_r32(eth, eth->tx_int_status_reg), + mtk_r32(eth, eth->tx_int_mask_reg)); } if (tx_done == budget) return budget; - status = mtk_r32(eth, eth->tx_int_status_reg); - if (status & MTK_TX_DONE_INT) + if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT) return budget; - napi_complete(napi); - mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); + if (napi_complete_done(napi, tx_done)) + mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); return tx_done; } @@ -1512,35 +1533,33 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget) static int mtk_napi_rx(struct napi_struct *napi, int budget) { struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); - u32 status, mask; - int rx_done = 0; - int remain_budget = budget; + int rx_done_total = 0; mtk_handle_status_irq(eth); -poll_again: - mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS); - rx_done = mtk_poll_rx(napi, remain_budget, eth); + do { + int rx_done; - if (unlikely(netif_msg_intr(eth))) { - status = mtk_r32(eth, MTK_PDMA_INT_STATUS); - mask = mtk_r32(eth, MTK_PDMA_INT_MASK); - dev_info(eth->dev, - "done rx %d, intr 0x%08x/0x%x\n", - rx_done, status, mask); - } - if (rx_done == remain_budget) - return budget; + mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS); + rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth); + rx_done_total += rx_done; - status = mtk_r32(eth, MTK_PDMA_INT_STATUS); - if (status & MTK_RX_DONE_INT) { - remain_budget -= rx_done; - goto poll_again; - } - napi_complete(napi); - mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); + if (unlikely(netif_msg_intr(eth))) { + dev_info(eth->dev, + "done rx %d, intr 0x%08x/0x%x\n", rx_done, + mtk_r32(eth, MTK_PDMA_INT_STATUS), + mtk_r32(eth, MTK_PDMA_INT_MASK)); + } - return rx_done + budget - remain_budget; + if (rx_done_total == budget) + return budget; + + } while (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT); + + if (napi_complete_done(napi, rx_done_total)) + mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); + + return rx_done_total; } static int mtk_tx_alloc(struct mtk_eth *eth) @@ -1587,6 +1606,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth) atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); ring->next_free = &ring->dma[0]; ring->last_free = &ring->dma[MTK_DMA_SIZE - 1]; + ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz)); ring->thresh = MAX_SKB_FRAGS; /* make sure that all changes to the dma ring are flushed before we @@ -1600,9 +1620,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth) mtk_w32(eth, ring->phys + ((MTK_DMA_SIZE - 1) * sz), MTK_QTX_CRX_PTR); - mtk_w32(eth, - ring->phys + ((MTK_DMA_SIZE - 1) * sz), - MTK_QTX_DRX_PTR); + mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR); mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0)); } else { @@ -1625,7 +1643,7 @@ static void mtk_tx_clean(struct mtk_eth *eth) if (ring->buf) { for (i = 0; i < MTK_DMA_SIZE; i++) - mtk_tx_unmap(eth, &ring->buf[i]); + mtk_tx_unmap(eth, &ring->buf[i], false); kfree(ring->buf); ring->buf = NULL; } @@ -2015,25 +2033,22 @@ static int mtk_set_features(struct net_device *dev, netdev_features_t features) /* wait for DMA to finish whatever it is doing before we start using it again */ static int mtk_dma_busy_wait(struct mtk_eth *eth) { - unsigned long t_start = jiffies; + unsigned int reg; + int ret; + u32 val; - while (1) { - if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { - if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) & - (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY))) - return 0; - } else { - if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) & - (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY))) - return 0; - } + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + reg = MTK_QDMA_GLO_CFG; + else + reg = MTK_PDMA_GLO_CFG; - if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT)) - break; - } + ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val, + !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)), + 5, MTK_DMA_BUSY_TIMEOUT_US); + if (ret) + dev_err(eth->dev, "DMA init timeout\n"); - dev_err(eth->dev, "DMA init timeout\n"); - return -1; + return ret; } static int mtk_dma_init(struct mtk_eth *eth) @@ -2133,6 +2148,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth) { struct mtk_eth *eth = _eth; + eth->rx_events++; if (likely(napi_schedule_prep(ð->rx_napi))) { __napi_schedule(ð->rx_napi); mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); @@ -2145,6 +2161,7 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth) { struct mtk_eth *eth = _eth; + eth->tx_events++; if (likely(napi_schedule_prep(ð->tx_napi))) { __napi_schedule(ð->tx_napi); mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); @@ -2197,7 +2214,7 @@ static int mtk_start_dma(struct mtk_eth *eth) if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | - MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO | + MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO | MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | MTK_RX_BT_32DWORDS, MTK_QDMA_GLO_CFG); @@ -2233,6 +2250,9 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config) val |= config; + if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0])) + val |= MTK_GDMA_SPECIAL_TAG; + mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); } /* Reset and enable PSE */ @@ -2255,12 +2275,17 @@ static int mtk_open(struct net_device *dev) /* we run 2 netdevs on the same dma ring so we only bring it up once */ if (!refcount_read(ð->dma_refcnt)) { - int err = mtk_start_dma(eth); + u32 gdm_config = MTK_GDMA_TO_PDMA; + int err; + err = mtk_start_dma(eth); if (err) return err; - mtk_gdm_config(eth, MTK_GDMA_TO_PDMA); + if (eth->soc->offload_version && mtk_ppe_start(ð->ppe) == 0) + gdm_config = MTK_GDMA_TO_PPE; + + mtk_gdm_config(eth, gdm_config); napi_enable(ð->tx_napi); napi_enable(ð->rx_napi); @@ -2321,12 +2346,18 @@ static int mtk_stop(struct net_device *dev) napi_disable(ð->tx_napi); napi_disable(ð->rx_napi); + cancel_work_sync(ð->rx_dim.work); + cancel_work_sync(ð->tx_dim.work); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); mtk_stop_dma(eth, MTK_PDMA_GLO_CFG); mtk_dma_free(eth); + if (eth->soc->offload_version) + mtk_ppe_stop(ð->ppe); + return 0; } @@ -2370,6 +2401,64 @@ err_disable_clks: return ret; } +static void mtk_dim_rx(struct work_struct *work) +{ + struct dim *dim = container_of(work, struct dim, work); + struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim); + struct dim_cq_moder cur_profile; + u32 val, cur; + + cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode, + dim->profile_ix); + spin_lock_bh(ð->dim_lock); + + val = mtk_r32(eth, MTK_PDMA_DELAY_INT); + val &= MTK_PDMA_DELAY_TX_MASK; + val |= MTK_PDMA_DELAY_RX_EN; + + cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK); + val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT; + + cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK); + val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT; + + mtk_w32(eth, val, MTK_PDMA_DELAY_INT); + mtk_w32(eth, val, MTK_QDMA_DELAY_INT); + + spin_unlock_bh(ð->dim_lock); + + dim->state = DIM_START_MEASURE; +} + +static void mtk_dim_tx(struct work_struct *work) +{ + struct dim *dim = container_of(work, struct dim, work); + struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim); + struct dim_cq_moder cur_profile; + u32 val, cur; + + cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode, + dim->profile_ix); + spin_lock_bh(ð->dim_lock); + + val = mtk_r32(eth, MTK_PDMA_DELAY_INT); + val &= MTK_PDMA_DELAY_RX_MASK; + val |= MTK_PDMA_DELAY_TX_EN; + + cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK); + val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT; + + cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK); + val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT; + + mtk_w32(eth, val, MTK_PDMA_DELAY_INT); + mtk_w32(eth, val, MTK_QDMA_DELAY_INT); + + spin_unlock_bh(ð->dim_lock); + + dim->state = DIM_START_MEASURE; +} + static int mtk_hw_init(struct mtk_eth *eth) { int i, val, ret; @@ -2391,9 +2480,6 @@ static int mtk_hw_init(struct mtk_eth *eth) goto err_disable_pm; } - /* enable interrupt delay for RX */ - mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT); - /* disable delay and normal interrupt */ mtk_tx_irq_disable(eth, ~0); mtk_rx_irq_disable(eth, ~0); @@ -2432,11 +2518,11 @@ static int mtk_hw_init(struct mtk_eth *eth) /* Enable RX VLan Offloading */ mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); - /* enable interrupt delay for RX */ - mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT); + /* set interrupt delays based on current Net DIM sample */ + mtk_dim_rx(ð->rx_dim.work); + mtk_dim_tx(ð->tx_dim.work); /* disable delay and normal interrupt */ - mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); mtk_tx_irq_disable(eth, ~0); mtk_rx_irq_disable(eth, ~0); @@ -2473,14 +2559,11 @@ static int __init mtk_init(struct net_device *dev) { struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; - const char *mac_addr; - - mac_addr = of_get_mac_address(mac->of_node); - if (!IS_ERR(mac_addr)) - ether_addr_copy(dev->dev_addr, mac_addr); + int ret; - /* If the mac address is invalid, use random mac address */ - if (!is_valid_ether_addr(dev->dev_addr)) { + ret = of_get_mac_address(mac->of_node, dev->dev_addr); + if (ret) { + /* If the mac address is invalid, use random mac address */ eth_hw_addr_random(dev); dev_err(eth->dev, "generated random MAC address %pM\n", dev->dev_addr); @@ -2832,6 +2915,7 @@ static const struct net_device_ops mtk_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mtk_poll_controller, #endif + .ndo_setup_tc = mtk_eth_setup_tc, }; static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) @@ -2973,6 +3057,13 @@ static int mtk_probe(struct platform_device *pdev) spin_lock_init(ð->page_lock); spin_lock_init(ð->tx_irq_lock); spin_lock_init(ð->rx_irq_lock); + spin_lock_init(ð->dim_lock); + + eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + INIT_WORK(ð->rx_dim.work, mtk_dim_rx); + + eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + INIT_WORK(ð->tx_dim.work, mtk_dim_tx); if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, @@ -3088,6 +3179,17 @@ static int mtk_probe(struct platform_device *pdev) goto err_free_dev; } + if (eth->soc->offload_version) { + err = mtk_ppe_init(ð->ppe, eth->dev, + eth->base + MTK_ETH_PPE_BASE, 2); + if (err) + goto err_free_dev; + + err = mtk_eth_offload_init(eth); + if (err) + goto err_free_dev; + } + for (i = 0; i < MTK_MAX_DEVS; i++) { if (!eth->netdev[i]) continue; @@ -3162,6 +3264,7 @@ static const struct mtk_soc_data mt7621_data = { .hw_features = MTK_HW_FEATURES, .required_clks = MT7621_CLKS_BITMAP, .required_pctl = false, + .offload_version = 2, }; static const struct mtk_soc_data mt7622_data = { @@ -3170,6 +3273,7 @@ static const struct mtk_soc_data mt7622_data = { .hw_features = MTK_HW_FEATURES, .required_clks = MT7622_CLKS_BITMAP, .required_pctl = false, + .offload_version = 2, }; static const struct mtk_soc_data mt7623_data = { @@ -3177,6 +3281,7 @@ static const struct mtk_soc_data mt7623_data = { .hw_features = MTK_HW_FEATURES, .required_clks = MT7623_CLKS_BITMAP, .required_pctl = true, + .offload_version = 2, }; static const struct mtk_soc_data mt7629_data = { diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index fd3cec8f06ba..11331b44ba07 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -15,12 +15,15 @@ #include <linux/u64_stats_sync.h> #include <linux/refcount.h> #include <linux/phylink.h> +#include <linux/rhashtable.h> +#include <linux/dim.h> +#include "mtk_ppe.h" #define MTK_QDMA_PAGE_SIZE 2048 #define MTK_MAX_RX_LENGTH 1536 #define MTK_MAX_RX_LENGTH_2K 2048 #define MTK_TX_DMA_BUF_LEN 0x3fff -#define MTK_DMA_SIZE 256 +#define MTK_DMA_SIZE 512 #define MTK_NAPI_WEIGHT 64 #define MTK_MAC_COUNT 2 #define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN) @@ -40,7 +43,8 @@ NETIF_F_HW_VLAN_CTAG_RX | \ NETIF_F_SG | NETIF_F_TSO | \ NETIF_F_TSO6 | \ - NETIF_F_IPV6_CSUM) + NETIF_F_IPV6_CSUM |\ + NETIF_F_HW_TC) #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM) #define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1)) @@ -82,10 +86,12 @@ /* GDM Exgress Control Register */ #define MTK_GDMA_FWD_CFG(x) (0x500 + (x * 0x1000)) +#define MTK_GDMA_SPECIAL_TAG BIT(24) #define MTK_GDMA_ICS_EN BIT(22) #define MTK_GDMA_TCS_EN BIT(21) #define MTK_GDMA_UCS_EN BIT(20) #define MTK_GDMA_TO_PDMA 0x0 +#define MTK_GDMA_TO_PPE 0x4444 #define MTK_GDMA_DROP_ALL 0x7777 /* Unicast Filter MAC Address Register - Low */ @@ -132,13 +138,18 @@ /* PDMA Delay Interrupt Register */ #define MTK_PDMA_DELAY_INT 0xa0c +#define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0) #define MTK_PDMA_DELAY_RX_EN BIT(15) -#define MTK_PDMA_DELAY_RX_PINT 4 #define MTK_PDMA_DELAY_RX_PINT_SHIFT 8 -#define MTK_PDMA_DELAY_RX_PTIME 4 -#define MTK_PDMA_DELAY_RX_DELAY \ - (MTK_PDMA_DELAY_RX_EN | MTK_PDMA_DELAY_RX_PTIME | \ - (MTK_PDMA_DELAY_RX_PINT << MTK_PDMA_DELAY_RX_PINT_SHIFT)) +#define MTK_PDMA_DELAY_RX_PTIME_SHIFT 0 + +#define MTK_PDMA_DELAY_TX_MASK GENMASK(31, 16) +#define MTK_PDMA_DELAY_TX_EN BIT(31) +#define MTK_PDMA_DELAY_TX_PINT_SHIFT 24 +#define MTK_PDMA_DELAY_TX_PTIME_SHIFT 16 + +#define MTK_PDMA_DELAY_PINT_MASK 0x7f +#define MTK_PDMA_DELAY_PTIME_MASK 0xff /* PDMA Interrupt Status Register */ #define MTK_PDMA_INT_STATUS 0xa20 @@ -198,12 +209,12 @@ #define MTK_RX_BT_32DWORDS (3 << 11) #define MTK_NDP_CO_PRO BIT(10) #define MTK_TX_WB_DDONE BIT(6) -#define MTK_DMA_SIZE_16DWORDS (2 << 4) +#define MTK_TX_BT_32DWORDS (3 << 4) #define MTK_RX_DMA_BUSY BIT(3) #define MTK_TX_DMA_BUSY BIT(1) #define MTK_RX_DMA_EN BIT(2) #define MTK_TX_DMA_EN BIT(0) -#define MTK_DMA_BUSY_TIMEOUT HZ +#define MTK_DMA_BUSY_TIMEOUT_US 1000000 /* QDMA Reset Index Register */ #define MTK_QDMA_RST_IDX 0x1A08 @@ -220,6 +231,7 @@ /* QDMA Interrupt Status Register */ #define MTK_QDMA_INT_STATUS 0x1A18 #define MTK_RX_DONE_DLY BIT(30) +#define MTK_TX_DONE_DLY BIT(28) #define MTK_RX_DONE_INT3 BIT(19) #define MTK_RX_DONE_INT2 BIT(18) #define MTK_RX_DONE_INT1 BIT(17) @@ -229,8 +241,7 @@ #define MTK_TX_DONE_INT1 BIT(1) #define MTK_TX_DONE_INT0 BIT(0) #define MTK_RX_DONE_INT MTK_RX_DONE_DLY -#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \ - MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3) +#define MTK_TX_DONE_INT MTK_TX_DONE_DLY /* QDMA Interrupt grouping registers */ #define MTK_QDMA_INT_GRP1 0x1a20 @@ -296,15 +307,23 @@ #define RX_DMA_LSO BIT(30) #define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16) #define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff) +#define RX_DMA_VTAG BIT(15) /* QDMA descriptor rxd3 */ #define RX_DMA_VID(_x) ((_x) & 0xfff) /* QDMA descriptor rxd4 */ +#define MTK_RXD4_FOE_ENTRY GENMASK(13, 0) +#define MTK_RXD4_PPE_CPU_REASON GENMASK(18, 14) +#define MTK_RXD4_SRC_PORT GENMASK(21, 19) +#define MTK_RXD4_ALG GENMASK(31, 22) + +/* QDMA descriptor rxd4 */ #define RX_DMA_L4_VALID BIT(24) #define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */ #define RX_DMA_FPORT_SHIFT 19 #define RX_DMA_FPORT_MASK 0x7 +#define RX_DMA_SPECIAL_TAG BIT(22) /* PHY Indirect Access Control registers */ #define MTK_PHY_IAC 0x10004 @@ -623,6 +642,7 @@ struct mtk_tx_buf { * @phys: The physical addr of tx_buf * @next_free: Pointer to the next free descriptor * @last_free: Pointer to the last free descriptor + * @last_free_ptr: Hardware pointer value of the last free descriptor * @thresh: The threshold of minimum amount of free descriptors * @free_count: QDMA uses a linked list. Track how many free descriptors * are present @@ -633,6 +653,7 @@ struct mtk_tx_ring { dma_addr_t phys; struct mtk_tx_dma *next_free; struct mtk_tx_dma *last_free; + u32 last_free_ptr; u16 thresh; atomic_t free_count; int dma_size; @@ -802,6 +823,7 @@ struct mtk_soc_data { u32 caps; u32 required_clks; bool required_pctl; + u8 offload_version; netdev_features_t hw_features; }; @@ -835,6 +857,7 @@ struct mtk_sgmii { * @page_lock: Make sure that register operations are atomic * @tx_irq__lock: Make sure that IRQ register operations are atomic * @rx_irq__lock: Make sure that IRQ register operations are atomic + * @dim_lock: Make sure that Net DIM operations are atomic * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a * dummy for NAPI to work * @netdev: The netdev instances @@ -853,6 +876,14 @@ struct mtk_sgmii { * @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX ring * @tx_napi: The TX NAPI struct * @rx_napi: The RX NAPI struct + * @rx_events: Net DIM RX event counter + * @rx_packets: Net DIM RX packet counter + * @rx_bytes: Net DIM RX byte counter + * @rx_dim: Net DIM RX context + * @tx_events: Net DIM TX event counter + * @tx_packets: Net DIM TX packet counter + * @tx_bytes: Net DIM TX byte counter + * @tx_dim: Net DIM TX context * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring * @phy_scratch_ring: physical address of scratch_ring * @scratch_head: The scratch memory that scratch_ring points to. @@ -897,10 +928,25 @@ struct mtk_eth { const struct mtk_soc_data *soc; + spinlock_t dim_lock; + + u32 rx_events; + u32 rx_packets; + u32 rx_bytes; + struct dim rx_dim; + + u32 tx_events; + u32 tx_packets; + u32 tx_bytes; + struct dim tx_dim; + u32 tx_int_mask_reg; u32 tx_int_status_reg; u32 rx_dma_l4_valid; int ip_align; + + struct mtk_ppe ppe; + struct rhashtable flow_table; }; /* struct mtk_mac - the structure that holds the info about the MACs of the @@ -945,4 +991,9 @@ int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id); int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id); int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id); +int mtk_eth_offload_init(struct mtk_eth *eth); +int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data); + + #endif /* MTK_ETH_H */ diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c new file mode 100644 index 000000000000..3ad10c793308 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -0,0 +1,509 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ + +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/etherdevice.h> +#include <linux/platform_device.h> +#include "mtk_ppe.h" +#include "mtk_ppe_regs.h" + +static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val) +{ + writel(val, ppe->base + reg); +} + +static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg) +{ + return readl(ppe->base + reg); +} + +static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set) +{ + u32 val; + + val = ppe_r32(ppe, reg); + val &= ~mask; + val |= set; + ppe_w32(ppe, reg, val); + + return val; +} + +static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val) +{ + return ppe_m32(ppe, reg, 0, val); +} + +static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val) +{ + return ppe_m32(ppe, reg, val, 0); +} + +static int mtk_ppe_wait_busy(struct mtk_ppe *ppe) +{ + int ret; + u32 val; + + ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val, + !(val & MTK_PPE_GLO_CFG_BUSY), + 20, MTK_PPE_WAIT_TIMEOUT_US); + + if (ret) + dev_err(ppe->dev, "PPE table busy"); + + return ret; +} + +static void mtk_ppe_cache_clear(struct mtk_ppe *ppe) +{ + ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR); + ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR); +} + +static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable) +{ + mtk_ppe_cache_clear(ppe); + + ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN, + enable * MTK_PPE_CACHE_CTL_EN); +} + +static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e) +{ + u32 hv1, hv2, hv3; + u32 hash; + + switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) { + case MTK_PPE_PKT_TYPE_BRIDGE: + hv1 = e->bridge.src_mac_lo; + hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16); + hv2 = e->bridge.src_mac_hi >> 16; + hv2 ^= e->bridge.dest_mac_lo; + hv3 = e->bridge.dest_mac_hi; + break; + case MTK_PPE_PKT_TYPE_IPV4_ROUTE: + case MTK_PPE_PKT_TYPE_IPV4_HNAPT: + hv1 = e->ipv4.orig.ports; + hv2 = e->ipv4.orig.dest_ip; + hv3 = e->ipv4.orig.src_ip; + break; + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T: + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T: + hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3]; + hv1 ^= e->ipv6.ports; + + hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2]; + hv2 ^= e->ipv6.dest_ip[0]; + + hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1]; + hv3 ^= e->ipv6.src_ip[0]; + break; + case MTK_PPE_PKT_TYPE_IPV4_DSLITE: + case MTK_PPE_PKT_TYPE_IPV6_6RD: + default: + WARN_ON_ONCE(1); + return MTK_PPE_HASH_MASK; + } + + hash = (hv1 & hv2) | ((~hv1) & hv3); + hash = (hash >> 24) | ((hash & 0xffffff) << 8); + hash ^= hv1 ^ hv2 ^ hv3; + hash ^= hash >> 16; + hash <<= 1; + hash &= MTK_PPE_ENTRIES - 1; + + return hash; +} + +static inline struct mtk_foe_mac_info * +mtk_foe_entry_l2(struct mtk_foe_entry *entry) +{ + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); + + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) + return &entry->ipv6.l2; + + return &entry->ipv4.l2; +} + +static inline u32 * +mtk_foe_entry_ib2(struct mtk_foe_entry *entry) +{ + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); + + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) + return &entry->ipv6.ib2; + + return &entry->ipv4.ib2; +} + +int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto, + u8 pse_port, u8 *src_mac, u8 *dest_mac) +{ + struct mtk_foe_mac_info *l2; + u32 ports_pad, val; + + memset(entry, 0, sizeof(*entry)); + + val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) | + FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) | + FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) | + MTK_FOE_IB1_BIND_TTL | + MTK_FOE_IB1_BIND_CACHE; + entry->ib1 = val; + + val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) | + FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) | + FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port); + + if (is_multicast_ether_addr(dest_mac)) + val |= MTK_FOE_IB2_MULTICAST; + + ports_pad = 0xa5a5a500 | (l4proto & 0xff); + if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE) + entry->ipv4.orig.ports = ports_pad; + if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T) + entry->ipv6.ports = ports_pad; + + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) { + entry->ipv6.ib2 = val; + l2 = &entry->ipv6.l2; + } else { + entry->ipv4.ib2 = val; + l2 = &entry->ipv4.l2; + } + + l2->dest_mac_hi = get_unaligned_be32(dest_mac); + l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4); + l2->src_mac_hi = get_unaligned_be32(src_mac); + l2->src_mac_lo = get_unaligned_be16(src_mac + 4); + + if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T) + l2->etype = ETH_P_IPV6; + else + l2->etype = ETH_P_IP; + + return 0; +} + +int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port) +{ + u32 *ib2 = mtk_foe_entry_ib2(entry); + u32 val; + + val = *ib2; + val &= ~MTK_FOE_IB2_DEST_PORT; + val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port); + *ib2 = val; + + return 0; +} + +int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress, + __be32 src_addr, __be16 src_port, + __be32 dest_addr, __be16 dest_port) +{ + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); + struct mtk_ipv4_tuple *t; + + switch (type) { + case MTK_PPE_PKT_TYPE_IPV4_HNAPT: + if (egress) { + t = &entry->ipv4.new; + break; + } + fallthrough; + case MTK_PPE_PKT_TYPE_IPV4_DSLITE: + case MTK_PPE_PKT_TYPE_IPV4_ROUTE: + t = &entry->ipv4.orig; + break; + case MTK_PPE_PKT_TYPE_IPV6_6RD: + entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr); + entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr); + return 0; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + + t->src_ip = be32_to_cpu(src_addr); + t->dest_ip = be32_to_cpu(dest_addr); + + if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE) + return 0; + + t->src_port = be16_to_cpu(src_port); + t->dest_port = be16_to_cpu(dest_port); + + return 0; +} + +int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry, + __be32 *src_addr, __be16 src_port, + __be32 *dest_addr, __be16 dest_port) +{ + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); + u32 *src, *dest; + int i; + + switch (type) { + case MTK_PPE_PKT_TYPE_IPV4_DSLITE: + src = entry->dslite.tunnel_src_ip; + dest = entry->dslite.tunnel_dest_ip; + break; + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T: + case MTK_PPE_PKT_TYPE_IPV6_6RD: + entry->ipv6.src_port = be16_to_cpu(src_port); + entry->ipv6.dest_port = be16_to_cpu(dest_port); + fallthrough; + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T: + src = entry->ipv6.src_ip; + dest = entry->ipv6.dest_ip; + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + + for (i = 0; i < 4; i++) + src[i] = be32_to_cpu(src_addr[i]); + for (i = 0; i < 4; i++) + dest[i] = be32_to_cpu(dest_addr[i]); + + return 0; +} + +int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port) +{ + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry); + + l2->etype = BIT(port); + + if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER)) + entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1); + else + l2->etype |= BIT(8); + + entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG; + + return 0; +} + +int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid) +{ + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry); + + switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) { + case 0: + entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG | + FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1); + l2->vlan1 = vid; + return 0; + case 1: + if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) { + l2->vlan1 = vid; + l2->etype |= BIT(8); + } else { + l2->vlan2 = vid; + entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1); + } + return 0; + default: + return -ENOSPC; + } +} + +int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid) +{ + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry); + + if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) || + (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) + l2->etype = ETH_P_PPP_SES; + + entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE; + l2->pppoe_id = sid; + + return 0; +} + +static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry) +{ + return !(entry->ib1 & MTK_FOE_IB1_STATIC) && + FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND; +} + +int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, + u16 timestamp) +{ + struct mtk_foe_entry *hwe; + u32 hash; + + timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP; + entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP; + entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp); + + hash = mtk_ppe_hash_entry(entry); + hwe = &ppe->foe_table[hash]; + if (!mtk_foe_entry_usable(hwe)) { + hwe++; + hash++; + + if (!mtk_foe_entry_usable(hwe)) + return -ENOSPC; + } + + memcpy(&hwe->data, &entry->data, sizeof(hwe->data)); + wmb(); + hwe->ib1 = entry->ib1; + + dma_wmb(); + + mtk_ppe_cache_clear(ppe); + + return hash; +} + +int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base, + int version) +{ + struct mtk_foe_entry *foe; + + /* need to allocate a separate device, since it PPE DMA access is + * not coherent. + */ + ppe->base = base; + ppe->dev = dev; + ppe->version = version; + + foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe), + &ppe->foe_phys, GFP_KERNEL); + if (!foe) + return -ENOMEM; + + ppe->foe_table = foe; + + mtk_ppe_debugfs_init(ppe); + + return 0; +} + +static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe) +{ + static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 }; + int i, k; + + memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table)); + + if (!IS_ENABLED(CONFIG_SOC_MT7621)) + return; + + /* skip all entries that cross the 1024 byte boundary */ + for (i = 0; i < MTK_PPE_ENTRIES; i += 128) + for (k = 0; k < ARRAY_SIZE(skip); k++) + ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC; +} + +int mtk_ppe_start(struct mtk_ppe *ppe) +{ + u32 val; + + mtk_ppe_init_foe_table(ppe); + ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys); + + val = MTK_PPE_TB_CFG_ENTRY_80B | + MTK_PPE_TB_CFG_AGE_NON_L4 | + MTK_PPE_TB_CFG_AGE_UNBIND | + MTK_PPE_TB_CFG_AGE_TCP | + MTK_PPE_TB_CFG_AGE_UDP | + MTK_PPE_TB_CFG_AGE_TCP_FIN | + FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS, + MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) | + FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE, + MTK_PPE_KEEPALIVE_DISABLE) | + FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) | + FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE, + MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) | + FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM, + MTK_PPE_ENTRIES_SHIFT); + ppe_w32(ppe, MTK_PPE_TB_CFG, val); + + ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK, + MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6); + + mtk_ppe_cache_enable(ppe, true); + + val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG | + MTK_PPE_FLOW_CFG_IP4_UDP_FRAG | + MTK_PPE_FLOW_CFG_IP6_3T_ROUTE | + MTK_PPE_FLOW_CFG_IP6_5T_ROUTE | + MTK_PPE_FLOW_CFG_IP6_6RD | + MTK_PPE_FLOW_CFG_IP4_NAT | + MTK_PPE_FLOW_CFG_IP4_NAPT | + MTK_PPE_FLOW_CFG_IP4_DSLITE | + MTK_PPE_FLOW_CFG_L2_BRIDGE | + MTK_PPE_FLOW_CFG_IP4_NAT_FRAG; + ppe_w32(ppe, MTK_PPE_FLOW_CFG, val); + + val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) | + FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3); + ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val); + + val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) | + FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1); + ppe_w32(ppe, MTK_PPE_BIND_AGE0, val); + + val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) | + FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7); + ppe_w32(ppe, MTK_PPE_BIND_AGE1, val); + + val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF; + ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val); + + val = MTK_PPE_BIND_LIMIT1_FULL | + FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1); + ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val); + + val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) | + FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1); + ppe_w32(ppe, MTK_PPE_BIND_RATE, val); + + /* enable PPE */ + val = MTK_PPE_GLO_CFG_EN | + MTK_PPE_GLO_CFG_IP4_L4_CS_DROP | + MTK_PPE_GLO_CFG_IP4_CS_DROP | + MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE; + ppe_w32(ppe, MTK_PPE_GLO_CFG, val); + + ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0); + + return 0; +} + +int mtk_ppe_stop(struct mtk_ppe *ppe) +{ + u32 val; + int i; + + for (i = 0; i < MTK_PPE_ENTRIES; i++) + ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE, + MTK_FOE_STATE_INVALID); + + mtk_ppe_cache_enable(ppe, false); + + /* disable offload engine */ + ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN); + ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0); + + /* disable aging */ + val = MTK_PPE_TB_CFG_AGE_NON_L4 | + MTK_PPE_TB_CFG_AGE_UNBIND | + MTK_PPE_TB_CFG_AGE_TCP | + MTK_PPE_TB_CFG_AGE_UDP | + MTK_PPE_TB_CFG_AGE_TCP_FIN; + ppe_clear(ppe, MTK_PPE_TB_CFG, val); + + return mtk_ppe_wait_busy(ppe); +} diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h new file mode 100644 index 000000000000..242fb8f2ae65 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ + +#ifndef __MTK_PPE_H +#define __MTK_PPE_H + +#include <linux/kernel.h> +#include <linux/bitfield.h> + +#define MTK_ETH_PPE_BASE 0xc00 + +#define MTK_PPE_ENTRIES_SHIFT 3 +#define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT) +#define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1) +#define MTK_PPE_WAIT_TIMEOUT_US 1000000 + +#define MTK_FOE_IB1_UNBIND_TIMESTAMP GENMASK(7, 0) +#define MTK_FOE_IB1_UNBIND_PACKETS GENMASK(23, 8) +#define MTK_FOE_IB1_UNBIND_PREBIND BIT(24) + +#define MTK_FOE_IB1_BIND_TIMESTAMP GENMASK(14, 0) +#define MTK_FOE_IB1_BIND_KEEPALIVE BIT(15) +#define MTK_FOE_IB1_BIND_VLAN_LAYER GENMASK(18, 16) +#define MTK_FOE_IB1_BIND_PPPOE BIT(19) +#define MTK_FOE_IB1_BIND_VLAN_TAG BIT(20) +#define MTK_FOE_IB1_BIND_PKT_SAMPLE BIT(21) +#define MTK_FOE_IB1_BIND_CACHE BIT(22) +#define MTK_FOE_IB1_BIND_TUNNEL_DECAP BIT(23) +#define MTK_FOE_IB1_BIND_TTL BIT(24) + +#define MTK_FOE_IB1_PACKET_TYPE GENMASK(27, 25) +#define MTK_FOE_IB1_STATE GENMASK(29, 28) +#define MTK_FOE_IB1_UDP BIT(30) +#define MTK_FOE_IB1_STATIC BIT(31) + +enum { + MTK_PPE_PKT_TYPE_IPV4_HNAPT = 0, + MTK_PPE_PKT_TYPE_IPV4_ROUTE = 1, + MTK_PPE_PKT_TYPE_BRIDGE = 2, + MTK_PPE_PKT_TYPE_IPV4_DSLITE = 3, + MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T = 4, + MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T = 5, + MTK_PPE_PKT_TYPE_IPV6_6RD = 7, +}; + +#define MTK_FOE_IB2_QID GENMASK(3, 0) +#define MTK_FOE_IB2_PSE_QOS BIT(4) +#define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5) +#define MTK_FOE_IB2_MULTICAST BIT(8) + +#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12) +#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16) +#define MTK_FOE_IB2_WHNAT_NAT BIT(17) + +#define MTK_FOE_IB2_PORT_MG GENMASK(17, 12) + +#define MTK_FOE_IB2_PORT_AG GENMASK(23, 18) + +#define MTK_FOE_IB2_DSCP GENMASK(31, 24) + +#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0) +#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6) +#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14) + +enum { + MTK_FOE_STATE_INVALID, + MTK_FOE_STATE_UNBIND, + MTK_FOE_STATE_BIND, + MTK_FOE_STATE_FIN +}; + +struct mtk_foe_mac_info { + u16 vlan1; + u16 etype; + + u32 dest_mac_hi; + + u16 vlan2; + u16 dest_mac_lo; + + u32 src_mac_hi; + + u16 pppoe_id; + u16 src_mac_lo; +}; + +struct mtk_foe_bridge { + u32 dest_mac_hi; + + u16 src_mac_lo; + u16 dest_mac_lo; + + u32 src_mac_hi; + + u32 ib2; + + u32 _rsv[5]; + + u32 udf_tsid; + struct mtk_foe_mac_info l2; +}; + +struct mtk_ipv4_tuple { + u32 src_ip; + u32 dest_ip; + union { + struct { + u16 dest_port; + u16 src_port; + }; + struct { + u8 protocol; + u8 _pad[3]; /* fill with 0xa5a5a5 */ + }; + u32 ports; + }; +}; + +struct mtk_foe_ipv4 { + struct mtk_ipv4_tuple orig; + + u32 ib2; + + struct mtk_ipv4_tuple new; + + u16 timestamp; + u16 _rsv0[3]; + + u32 udf_tsid; + + struct mtk_foe_mac_info l2; +}; + +struct mtk_foe_ipv4_dslite { + struct mtk_ipv4_tuple ip4; + + u32 tunnel_src_ip[4]; + u32 tunnel_dest_ip[4]; + + u8 flow_label[3]; + u8 priority; + + u32 udf_tsid; + + u32 ib2; + + struct mtk_foe_mac_info l2; +}; + +struct mtk_foe_ipv6 { + u32 src_ip[4]; + u32 dest_ip[4]; + + union { + struct { + u8 protocol; + u8 _pad[3]; /* fill with 0xa5a5a5 */ + }; /* 3-tuple */ + struct { + u16 dest_port; + u16 src_port; + }; /* 5-tuple */ + u32 ports; + }; + + u32 _rsv[3]; + + u32 udf; + + u32 ib2; + struct mtk_foe_mac_info l2; +}; + +struct mtk_foe_ipv6_6rd { + u32 src_ip[4]; + u32 dest_ip[4]; + u16 dest_port; + u16 src_port; + + u32 tunnel_src_ip; + u32 tunnel_dest_ip; + + u16 hdr_csum; + u8 dscp; + u8 ttl; + + u8 flag; + u8 pad; + u8 per_flow_6rd_id; + u8 pad2; + + u32 ib2; + struct mtk_foe_mac_info l2; +}; + +struct mtk_foe_entry { + u32 ib1; + + union { + struct mtk_foe_bridge bridge; + struct mtk_foe_ipv4 ipv4; + struct mtk_foe_ipv4_dslite dslite; + struct mtk_foe_ipv6 ipv6; + struct mtk_foe_ipv6_6rd ipv6_6rd; + u32 data[19]; + }; +}; + +enum { + MTK_PPE_CPU_REASON_TTL_EXCEEDED = 0x02, + MTK_PPE_CPU_REASON_OPTION_HEADER = 0x03, + MTK_PPE_CPU_REASON_NO_FLOW = 0x07, + MTK_PPE_CPU_REASON_IPV4_FRAG = 0x08, + MTK_PPE_CPU_REASON_IPV4_DSLITE_FRAG = 0x09, + MTK_PPE_CPU_REASON_IPV4_DSLITE_NO_TCP_UDP = 0x0a, + MTK_PPE_CPU_REASON_IPV6_6RD_NO_TCP_UDP = 0x0b, + MTK_PPE_CPU_REASON_TCP_FIN_SYN_RST = 0x0c, + MTK_PPE_CPU_REASON_UN_HIT = 0x0d, + MTK_PPE_CPU_REASON_HIT_UNBIND = 0x0e, + MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f, + MTK_PPE_CPU_REASON_HIT_BIND_TCP_FIN = 0x10, + MTK_PPE_CPU_REASON_HIT_TTL_1 = 0x11, + MTK_PPE_CPU_REASON_HIT_BIND_VLAN_VIOLATION = 0x12, + MTK_PPE_CPU_REASON_KEEPALIVE_UC_OLD_HDR = 0x13, + MTK_PPE_CPU_REASON_KEEPALIVE_MC_NEW_HDR = 0x14, + MTK_PPE_CPU_REASON_KEEPALIVE_DUP_OLD_HDR = 0x15, + MTK_PPE_CPU_REASON_HIT_BIND_FORCE_CPU = 0x16, + MTK_PPE_CPU_REASON_TUNNEL_OPTION_HEADER = 0x17, + MTK_PPE_CPU_REASON_MULTICAST_TO_CPU = 0x18, + MTK_PPE_CPU_REASON_MULTICAST_TO_GMAC1_CPU = 0x19, + MTK_PPE_CPU_REASON_HIT_PRE_BIND = 0x1a, + MTK_PPE_CPU_REASON_PACKET_SAMPLING = 0x1b, + MTK_PPE_CPU_REASON_EXCEED_MTU = 0x1c, + MTK_PPE_CPU_REASON_PPE_BYPASS = 0x1e, + MTK_PPE_CPU_REASON_INVALID = 0x1f, +}; + +struct mtk_ppe { + struct device *dev; + void __iomem *base; + int version; + + struct mtk_foe_entry *foe_table; + dma_addr_t foe_phys; + + void *acct_table; +}; + +int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base, + int version); +int mtk_ppe_start(struct mtk_ppe *ppe); +int mtk_ppe_stop(struct mtk_ppe *ppe); + +static inline void +mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash) +{ + ppe->foe_table[hash].ib1 = 0; + dma_wmb(); +} + +static inline int +mtk_foe_entry_timestamp(struct mtk_ppe *ppe, u16 hash) +{ + u32 ib1 = READ_ONCE(ppe->foe_table[hash].ib1); + + if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) + return -1; + + return FIELD_GET(MTK_FOE_IB1_BIND_TIMESTAMP, ib1); +} + +int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto, + u8 pse_port, u8 *src_mac, u8 *dest_mac); +int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port); +int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool orig, + __be32 src_addr, __be16 src_port, + __be32 dest_addr, __be16 dest_port); +int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry, + __be32 *src_addr, __be16 src_port, + __be32 *dest_addr, __be16 dest_port); +int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port); +int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid); +int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid); +int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, + u16 timestamp); +int mtk_ppe_debugfs_init(struct mtk_ppe *ppe); + +#endif diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c new file mode 100644 index 000000000000..98b1d3577bcd --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ + +#include <linux/kernel.h> +#include <linux/debugfs.h> +#include "mtk_eth_soc.h" + +struct mtk_flow_addr_info +{ + void *src, *dest; + u16 *src_port, *dest_port; + bool ipv6; +}; + +static const char *mtk_foe_entry_state_str(int state) +{ + static const char * const state_str[] = { + [MTK_FOE_STATE_INVALID] = "INV", + [MTK_FOE_STATE_UNBIND] = "UNB", + [MTK_FOE_STATE_BIND] = "BND", + [MTK_FOE_STATE_FIN] = "FIN", + }; + + if (state >= ARRAY_SIZE(state_str) || !state_str[state]) + return "UNK"; + + return state_str[state]; +} + +static const char *mtk_foe_pkt_type_str(int type) +{ + static const char * const type_str[] = { + [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T", + [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T", + [MTK_PPE_PKT_TYPE_BRIDGE] = "L2", + [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE", + [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T", + [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T", + [MTK_PPE_PKT_TYPE_IPV6_6RD] = "6RD", + }; + + if (type >= ARRAY_SIZE(type_str) || !type_str[type]) + return "UNKNOWN"; + + return type_str[type]; +} + +static void +mtk_print_addr(struct seq_file *m, u32 *addr, bool ipv6) +{ + u32 n_addr[4]; + int i; + + if (!ipv6) { + seq_printf(m, "%pI4h", addr); + return; + } + + for (i = 0; i < ARRAY_SIZE(n_addr); i++) + n_addr[i] = htonl(addr[i]); + seq_printf(m, "%pI6", n_addr); +} + +static void +mtk_print_addr_info(struct seq_file *m, struct mtk_flow_addr_info *ai) +{ + mtk_print_addr(m, ai->src, ai->ipv6); + if (ai->src_port) + seq_printf(m, ":%d", *ai->src_port); + seq_printf(m, "->"); + mtk_print_addr(m, ai->dest, ai->ipv6); + if (ai->dest_port) + seq_printf(m, ":%d", *ai->dest_port); +} + +static int +mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind) +{ + struct mtk_ppe *ppe = m->private; + int i; + + for (i = 0; i < MTK_PPE_ENTRIES; i++) { + struct mtk_foe_entry *entry = &ppe->foe_table[i]; + struct mtk_foe_mac_info *l2; + struct mtk_flow_addr_info ai = {}; + unsigned char h_source[ETH_ALEN]; + unsigned char h_dest[ETH_ALEN]; + int type, state; + u32 ib2; + + + state = FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1); + if (!state) + continue; + + if (bind && state != MTK_FOE_STATE_BIND) + continue; + + type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); + seq_printf(m, "%05x %s %7s", i, + mtk_foe_entry_state_str(state), + mtk_foe_pkt_type_str(type)); + + switch (type) { + case MTK_PPE_PKT_TYPE_IPV4_HNAPT: + case MTK_PPE_PKT_TYPE_IPV4_DSLITE: + ai.src_port = &entry->ipv4.orig.src_port; + ai.dest_port = &entry->ipv4.orig.dest_port; + fallthrough; + case MTK_PPE_PKT_TYPE_IPV4_ROUTE: + ai.src = &entry->ipv4.orig.src_ip; + ai.dest = &entry->ipv4.orig.dest_ip; + break; + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T: + ai.src_port = &entry->ipv6.src_port; + ai.dest_port = &entry->ipv6.dest_port; + fallthrough; + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T: + case MTK_PPE_PKT_TYPE_IPV6_6RD: + ai.src = &entry->ipv6.src_ip; + ai.dest = &entry->ipv6.dest_ip; + ai.ipv6 = true; + break; + } + + seq_printf(m, " orig="); + mtk_print_addr_info(m, &ai); + + switch (type) { + case MTK_PPE_PKT_TYPE_IPV4_HNAPT: + case MTK_PPE_PKT_TYPE_IPV4_DSLITE: + ai.src_port = &entry->ipv4.new.src_port; + ai.dest_port = &entry->ipv4.new.dest_port; + fallthrough; + case MTK_PPE_PKT_TYPE_IPV4_ROUTE: + ai.src = &entry->ipv4.new.src_ip; + ai.dest = &entry->ipv4.new.dest_ip; + seq_printf(m, " new="); + mtk_print_addr_info(m, &ai); + break; + } + + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) { + l2 = &entry->ipv6.l2; + ib2 = entry->ipv6.ib2; + } else { + l2 = &entry->ipv4.l2; + ib2 = entry->ipv4.ib2; + } + + *((__be32 *)h_source) = htonl(l2->src_mac_hi); + *((__be16 *)&h_source[4]) = htons(l2->src_mac_lo); + *((__be32 *)h_dest) = htonl(l2->dest_mac_hi); + *((__be16 *)&h_dest[4]) = htons(l2->dest_mac_lo); + + seq_printf(m, " eth=%pM->%pM etype=%04x" + " vlan=%d,%d ib1=%08x ib2=%08x\n", + h_source, h_dest, ntohs(l2->etype), + l2->vlan1, l2->vlan2, entry->ib1, ib2); + } + + return 0; +} + +static int +mtk_ppe_debugfs_foe_show_all(struct seq_file *m, void *private) +{ + return mtk_ppe_debugfs_foe_show(m, private, false); +} + +static int +mtk_ppe_debugfs_foe_show_bind(struct seq_file *m, void *private) +{ + return mtk_ppe_debugfs_foe_show(m, private, true); +} + +static int +mtk_ppe_debugfs_foe_open_all(struct inode *inode, struct file *file) +{ + return single_open(file, mtk_ppe_debugfs_foe_show_all, + inode->i_private); +} + +static int +mtk_ppe_debugfs_foe_open_bind(struct inode *inode, struct file *file) +{ + return single_open(file, mtk_ppe_debugfs_foe_show_bind, + inode->i_private); +} + +int mtk_ppe_debugfs_init(struct mtk_ppe *ppe) +{ + static const struct file_operations fops_all = { + .open = mtk_ppe_debugfs_foe_open_all, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + }; + + static const struct file_operations fops_bind = { + .open = mtk_ppe_debugfs_foe_open_bind, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + }; + + struct dentry *root; + + root = debugfs_create_dir("mtk_ppe", NULL); + if (!root) + return -ENOMEM; + + debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all); + debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind); + + return 0; +} diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c new file mode 100644 index 000000000000..b5f68f66d42a --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -0,0 +1,495 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> + */ + +#include <linux/if_ether.h> +#include <linux/rhashtable.h> +#include <linux/ip.h> +#include <net/flow_offload.h> +#include <net/pkt_cls.h> +#include <net/dsa.h> +#include "mtk_eth_soc.h" + +struct mtk_flow_data { + struct ethhdr eth; + + union { + struct { + __be32 src_addr; + __be32 dst_addr; + } v4; + }; + + __be16 src_port; + __be16 dst_port; + + struct { + u16 id; + __be16 proto; + u8 num; + } vlan; + struct { + u16 sid; + u8 num; + } pppoe; +}; + +struct mtk_flow_entry { + struct rhash_head node; + unsigned long cookie; + u16 hash; +}; + +static const struct rhashtable_params mtk_flow_ht_params = { + .head_offset = offsetof(struct mtk_flow_entry, node), + .key_offset = offsetof(struct mtk_flow_entry, cookie), + .key_len = sizeof(unsigned long), + .automatic_shrinking = true, +}; + +static u32 +mtk_eth_timestamp(struct mtk_eth *eth) +{ + return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP; +} + +static int +mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data, + bool egress) +{ + return mtk_foe_entry_set_ipv4_tuple(foe, egress, + data->v4.src_addr, data->src_port, + data->v4.dst_addr, data->dst_port); +} + +static void +mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth) +{ + void *dest = eth + act->mangle.offset; + const void *src = &act->mangle.val; + + if (act->mangle.offset > 8) + return; + + if (act->mangle.mask == 0xffff) { + src += 2; + dest += 2; + } + + memcpy(dest, src, act->mangle.mask ? 2 : 4); +} + + +static int +mtk_flow_mangle_ports(const struct flow_action_entry *act, + struct mtk_flow_data *data) +{ + u32 val = ntohl(act->mangle.val); + + switch (act->mangle.offset) { + case 0: + if (act->mangle.mask == ~htonl(0xffff)) + data->dst_port = cpu_to_be16(val); + else + data->src_port = cpu_to_be16(val >> 16); + break; + case 2: + data->dst_port = cpu_to_be16(val); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int +mtk_flow_mangle_ipv4(const struct flow_action_entry *act, + struct mtk_flow_data *data) +{ + __be32 *dest; + + switch (act->mangle.offset) { + case offsetof(struct iphdr, saddr): + dest = &data->v4.src_addr; + break; + case offsetof(struct iphdr, daddr): + dest = &data->v4.dst_addr; + break; + default: + return -EINVAL; + } + + memcpy(dest, &act->mangle.val, sizeof(u32)); + + return 0; +} + +static int +mtk_flow_get_dsa_port(struct net_device **dev) +{ +#if IS_ENABLED(CONFIG_NET_DSA) + struct dsa_port *dp; + + dp = dsa_port_from_netdev(*dev); + if (IS_ERR(dp)) + return -ENODEV; + + if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK) + return -ENODEV; + + *dev = dp->cpu_dp->master; + + return dp->index; +#else + return -ENODEV; +#endif +} + +static int +mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe, + struct net_device *dev) +{ + int pse_port, dsa_port; + + dsa_port = mtk_flow_get_dsa_port(&dev); + if (dsa_port >= 0) + mtk_foe_entry_set_dsa(foe, dsa_port); + + if (dev == eth->netdev[0]) + pse_port = 1; + else if (dev == eth->netdev[1]) + pse_port = 2; + else + return -EOPNOTSUPP; + + mtk_foe_entry_set_pse_port(foe, pse_port); + + return 0; +} + +static int +mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct flow_action_entry *act; + struct mtk_flow_data data = {}; + struct mtk_foe_entry foe; + struct net_device *odev = NULL; + struct mtk_flow_entry *entry; + int offload_type = 0; + u16 addr_type = 0; + u32 timestamp; + u8 l4proto = 0; + int err = 0; + int hash; + int i; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) { + struct flow_match_meta match; + + flow_rule_match_meta(rule, &match); + } else { + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_control(rule, &match); + addr_type = match.key->addr_type; + } else { + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + l4proto = match.key->ip_proto; + } else { + return -EOPNOTSUPP; + } + + flow_action_for_each(i, act, &rule->action) { + switch (act->id) { + case FLOW_ACTION_MANGLE: + if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH) + mtk_flow_offload_mangle_eth(act, &data.eth); + break; + case FLOW_ACTION_REDIRECT: + odev = act->dev; + break; + case FLOW_ACTION_CSUM: + break; + case FLOW_ACTION_VLAN_PUSH: + if (data.vlan.num == 1 || + act->vlan.proto != htons(ETH_P_8021Q)) + return -EOPNOTSUPP; + + data.vlan.id = act->vlan.vid; + data.vlan.proto = act->vlan.proto; + data.vlan.num++; + break; + case FLOW_ACTION_VLAN_POP: + break; + case FLOW_ACTION_PPPOE_PUSH: + if (data.pppoe.num == 1) + return -EOPNOTSUPP; + + data.pppoe.sid = act->pppoe.sid; + data.pppoe.num++; + break; + default: + return -EOPNOTSUPP; + } + } + + switch (addr_type) { + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; + break; + default: + return -EOPNOTSUPP; + } + + if (!is_valid_ether_addr(data.eth.h_source) || + !is_valid_ether_addr(data.eth.h_dest)) + return -EINVAL; + + err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0, + data.eth.h_source, + data.eth.h_dest); + if (err) + return err; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports ports; + + flow_rule_match_ports(rule, &ports); + data.src_port = ports.key->src; + data.dst_port = ports.key->dst; + } else { + return -EOPNOTSUPP; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs addrs; + + flow_rule_match_ipv4_addrs(rule, &addrs); + + data.v4.src_addr = addrs.key->src; + data.v4.dst_addr = addrs.key->dst; + + mtk_flow_set_ipv4_addr(&foe, &data, false); + } + + flow_action_for_each(i, act, &rule->action) { + if (act->id != FLOW_ACTION_MANGLE) + continue; + + switch (act->mangle.htype) { + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: + err = mtk_flow_mangle_ports(act, &data); + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: + err = mtk_flow_mangle_ipv4(act, &data); + break; + case FLOW_ACT_MANGLE_HDR_TYPE_ETH: + /* handled earlier */ + break; + default: + return -EOPNOTSUPP; + } + + if (err) + return err; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + err = mtk_flow_set_ipv4_addr(&foe, &data, true); + if (err) + return err; + } + + if (data.vlan.num == 1) { + if (data.vlan.proto != htons(ETH_P_8021Q)) + return -EOPNOTSUPP; + + mtk_foe_entry_set_vlan(&foe, data.vlan.id); + } + if (data.pppoe.num == 1) + mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid); + + err = mtk_flow_set_output_device(eth, &foe, odev); + if (err) + return err; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + entry->cookie = f->cookie; + timestamp = mtk_eth_timestamp(eth); + hash = mtk_foe_entry_commit(ð->ppe, &foe, timestamp); + if (hash < 0) { + err = hash; + goto free; + } + + entry->hash = hash; + err = rhashtable_insert_fast(ð->flow_table, &entry->node, + mtk_flow_ht_params); + if (err < 0) + goto clear_flow; + + return 0; +clear_flow: + mtk_foe_entry_clear(ð->ppe, hash); +free: + kfree(entry); + return err; +} + +static int +mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f) +{ + struct mtk_flow_entry *entry; + + entry = rhashtable_lookup(ð->flow_table, &f->cookie, + mtk_flow_ht_params); + if (!entry) + return -ENOENT; + + mtk_foe_entry_clear(ð->ppe, entry->hash); + rhashtable_remove_fast(ð->flow_table, &entry->node, + mtk_flow_ht_params); + kfree(entry); + + return 0; +} + +static int +mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f) +{ + struct mtk_flow_entry *entry; + int timestamp; + u32 idle; + + entry = rhashtable_lookup(ð->flow_table, &f->cookie, + mtk_flow_ht_params); + if (!entry) + return -ENOENT; + + timestamp = mtk_foe_entry_timestamp(ð->ppe, entry->hash); + if (timestamp < 0) + return -ETIMEDOUT; + + idle = mtk_eth_timestamp(eth) - timestamp; + f->stats.lastused = jiffies - idle * HZ; + + return 0; +} + +static DEFINE_MUTEX(mtk_flow_offload_mutex); + +static int +mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) +{ + struct flow_cls_offload *cls = type_data; + struct net_device *dev = cb_priv; + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + int err; + + if (!tc_can_offload(dev)) + return -EOPNOTSUPP; + + if (type != TC_SETUP_CLSFLOWER) + return -EOPNOTSUPP; + + mutex_lock(&mtk_flow_offload_mutex); + switch (cls->command) { + case FLOW_CLS_REPLACE: + err = mtk_flow_offload_replace(eth, cls); + break; + case FLOW_CLS_DESTROY: + err = mtk_flow_offload_destroy(eth, cls); + break; + case FLOW_CLS_STATS: + err = mtk_flow_offload_stats(eth, cls); + break; + default: + err = -EOPNOTSUPP; + break; + } + mutex_unlock(&mtk_flow_offload_mutex); + + return err; +} + +static int +mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + static LIST_HEAD(block_cb_list); + struct flow_block_cb *block_cb; + flow_setup_cb_t *cb; + + if (!eth->ppe.foe_table) + return -EOPNOTSUPP; + + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + cb = mtk_eth_setup_tc_block_cb; + f->driver_block_list = &block_cb_list; + + switch (f->command) { + case FLOW_BLOCK_BIND: + block_cb = flow_block_cb_lookup(f->block, cb, dev); + if (block_cb) { + flow_block_cb_incref(block_cb); + return 0; + } + block_cb = flow_block_cb_alloc(cb, dev, dev, NULL); + if (IS_ERR(block_cb)) + return PTR_ERR(block_cb); + + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, &block_cb_list); + return 0; + case FLOW_BLOCK_UNBIND: + block_cb = flow_block_cb_lookup(f->block, cb, dev); + if (!block_cb) + return -ENOENT; + + if (flow_block_cb_decref(block_cb)) { + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); + } + return 0; + default: + return -EOPNOTSUPP; + } +} + +int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + if (type == TC_SETUP_FT) + return mtk_eth_setup_tc_block(dev, type_data); + + return -EOPNOTSUPP; +} + +int mtk_eth_offload_init(struct mtk_eth *eth) +{ + if (!eth->ppe.foe_table) + return 0; + + return rhashtable_init(ð->flow_table, &mtk_flow_ht_params); +} diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h new file mode 100644 index 000000000000..0c45ea0900f1 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ + +#ifndef __MTK_PPE_REGS_H +#define __MTK_PPE_REGS_H + +#define MTK_PPE_GLO_CFG 0x200 +#define MTK_PPE_GLO_CFG_EN BIT(0) +#define MTK_PPE_GLO_CFG_TSID_EN BIT(1) +#define MTK_PPE_GLO_CFG_IP4_L4_CS_DROP BIT(2) +#define MTK_PPE_GLO_CFG_IP4_CS_DROP BIT(3) +#define MTK_PPE_GLO_CFG_TTL0_DROP BIT(4) +#define MTK_PPE_GLO_CFG_PPE_BSWAP BIT(5) +#define MTK_PPE_GLO_CFG_PSE_HASH_OFS BIT(6) +#define MTK_PPE_GLO_CFG_MCAST_TB_EN BIT(7) +#define MTK_PPE_GLO_CFG_FLOW_DROP_KA BIT(8) +#define MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE BIT(9) +#define MTK_PPE_GLO_CFG_UDP_LITE_EN BIT(10) +#define MTK_PPE_GLO_CFG_UDP_LEN_DROP BIT(11) +#define MTK_PPE_GLO_CFG_MCAST_ENTRIES GNEMASK(13, 12) +#define MTK_PPE_GLO_CFG_BUSY BIT(31) + +#define MTK_PPE_FLOW_CFG 0x204 +#define MTK_PPE_FLOW_CFG_IP4_TCP_FRAG BIT(6) +#define MTK_PPE_FLOW_CFG_IP4_UDP_FRAG BIT(7) +#define MTK_PPE_FLOW_CFG_IP6_3T_ROUTE BIT(8) +#define MTK_PPE_FLOW_CFG_IP6_5T_ROUTE BIT(9) +#define MTK_PPE_FLOW_CFG_IP6_6RD BIT(10) +#define MTK_PPE_FLOW_CFG_IP4_NAT BIT(12) +#define MTK_PPE_FLOW_CFG_IP4_NAPT BIT(13) +#define MTK_PPE_FLOW_CFG_IP4_DSLITE BIT(14) +#define MTK_PPE_FLOW_CFG_L2_BRIDGE BIT(15) +#define MTK_PPE_FLOW_CFG_IP_PROTO_BLACKLIST BIT(16) +#define MTK_PPE_FLOW_CFG_IP4_NAT_FRAG BIT(17) +#define MTK_PPE_FLOW_CFG_IP4_HASH_FLOW_LABEL BIT(18) +#define MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY BIT(19) +#define MTK_PPE_FLOW_CFG_IP6_HASH_GRE_KEY BIT(20) + +#define MTK_PPE_IP_PROTO_CHK 0x208 +#define MTK_PPE_IP_PROTO_CHK_IPV4 GENMASK(15, 0) +#define MTK_PPE_IP_PROTO_CHK_IPV6 GENMASK(31, 16) + +#define MTK_PPE_TB_CFG 0x21c +#define MTK_PPE_TB_CFG_ENTRY_NUM GENMASK(2, 0) +#define MTK_PPE_TB_CFG_ENTRY_80B BIT(3) +#define MTK_PPE_TB_CFG_SEARCH_MISS GENMASK(5, 4) +#define MTK_PPE_TB_CFG_AGE_PREBIND BIT(6) +#define MTK_PPE_TB_CFG_AGE_NON_L4 BIT(7) +#define MTK_PPE_TB_CFG_AGE_UNBIND BIT(8) +#define MTK_PPE_TB_CFG_AGE_TCP BIT(9) +#define MTK_PPE_TB_CFG_AGE_UDP BIT(10) +#define MTK_PPE_TB_CFG_AGE_TCP_FIN BIT(11) +#define MTK_PPE_TB_CFG_KEEPALIVE GENMASK(13, 12) +#define MTK_PPE_TB_CFG_HASH_MODE GENMASK(15, 14) +#define MTK_PPE_TB_CFG_SCAN_MODE GENMASK(17, 16) +#define MTK_PPE_TB_CFG_HASH_DEBUG GENMASK(19, 18) + +enum { + MTK_PPE_SCAN_MODE_DISABLED, + MTK_PPE_SCAN_MODE_CHECK_AGE, + MTK_PPE_SCAN_MODE_KEEPALIVE_AGE, +}; + +enum { + MTK_PPE_KEEPALIVE_DISABLE, + MTK_PPE_KEEPALIVE_UNICAST_CPU, + MTK_PPE_KEEPALIVE_DUP_CPU = 3, +}; + +enum { + MTK_PPE_SEARCH_MISS_ACTION_DROP, + MTK_PPE_SEARCH_MISS_ACTION_FORWARD = 2, + MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD = 3, +}; + +#define MTK_PPE_TB_BASE 0x220 + +#define MTK_PPE_TB_USED 0x224 +#define MTK_PPE_TB_USED_NUM GENMASK(13, 0) + +#define MTK_PPE_BIND_RATE 0x228 +#define MTK_PPE_BIND_RATE_BIND GENMASK(15, 0) +#define MTK_PPE_BIND_RATE_PREBIND GENMASK(31, 16) + +#define MTK_PPE_BIND_LIMIT0 0x22c +#define MTK_PPE_BIND_LIMIT0_QUARTER GENMASK(13, 0) +#define MTK_PPE_BIND_LIMIT0_HALF GENMASK(29, 16) + +#define MTK_PPE_BIND_LIMIT1 0x230 +#define MTK_PPE_BIND_LIMIT1_FULL GENMASK(13, 0) +#define MTK_PPE_BIND_LIMIT1_NON_L4 GENMASK(23, 16) + +#define MTK_PPE_KEEPALIVE 0x234 +#define MTK_PPE_KEEPALIVE_TIME GENMASK(15, 0) +#define MTK_PPE_KEEPALIVE_TIME_TCP GENMASK(23, 16) +#define MTK_PPE_KEEPALIVE_TIME_UDP GENMASK(31, 24) + +#define MTK_PPE_UNBIND_AGE 0x238 +#define MTK_PPE_UNBIND_AGE_MIN_PACKETS GENMASK(31, 16) +#define MTK_PPE_UNBIND_AGE_DELTA GENMASK(7, 0) + +#define MTK_PPE_BIND_AGE0 0x23c +#define MTK_PPE_BIND_AGE0_DELTA_NON_L4 GENMASK(30, 16) +#define MTK_PPE_BIND_AGE0_DELTA_UDP GENMASK(14, 0) + +#define MTK_PPE_BIND_AGE1 0x240 +#define MTK_PPE_BIND_AGE1_DELTA_TCP_FIN GENMASK(30, 16) +#define MTK_PPE_BIND_AGE1_DELTA_TCP GENMASK(14, 0) + +#define MTK_PPE_HASH_SEED 0x244 + +#define MTK_PPE_DEFAULT_CPU_PORT 0x248 +#define MTK_PPE_DEFAULT_CPU_PORT_MASK(_n) (GENMASK(2, 0) << ((_n) * 4)) + +#define MTK_PPE_MTU_DROP 0x308 + +#define MTK_PPE_VLAN_MTU0 0x30c +#define MTK_PPE_VLAN_MTU0_NONE GENMASK(13, 0) +#define MTK_PPE_VLAN_MTU0_1TAG GENMASK(29, 16) + +#define MTK_PPE_VLAN_MTU1 0x310 +#define MTK_PPE_VLAN_MTU1_2TAG GENMASK(13, 0) +#define MTK_PPE_VLAN_MTU1_3TAG GENMASK(29, 16) + +#define MTK_PPE_VPM_TPID 0x318 + +#define MTK_PPE_CACHE_CTL 0x320 +#define MTK_PPE_CACHE_CTL_EN BIT(0) +#define MTK_PPE_CACHE_CTL_LOCK_CLR BIT(4) +#define MTK_PPE_CACHE_CTL_REQ BIT(8) +#define MTK_PPE_CACHE_CTL_CLEAR BIT(9) +#define MTK_PPE_CACHE_CTL_CMD GENMASK(13, 12) + +#define MTK_PPE_MIB_CFG 0x334 +#define MTK_PPE_MIB_CFG_EN BIT(0) +#define MTK_PPE_MIB_CFG_RD_CLR BIT(1) + +#define MTK_PPE_MIB_TB_BASE 0x338 + +#define MTK_PPE_MIB_CACHE_CTL 0x350 +#define MTK_PPE_MIB_CACHE_CTL_EN BIT(0) +#define MTK_PPE_MIB_CACHE_CTL_FLUSH BIT(2) + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index c678344d22a2..8d751383530b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -2241,43 +2241,52 @@ void mlx4_master_comm_channel(struct work_struct *work) struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc); struct mlx4_dev *dev = &priv->dev; - __be32 *bit_vec; + u32 lbit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE]; + u32 nmbr_bits; u32 comm_cmd; - u32 vec; - int i, j, slave; + int i, slave; int toggle; + bool first = true; int served = 0; int reported = 0; u32 slt; - bit_vec = master->comm_arm_bit_vector; - for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) { - vec = be32_to_cpu(bit_vec[i]); - for (j = 0; j < 32; j++) { - if (!(vec & (1 << j))) - continue; - ++reported; - slave = (i * 32) + j; - comm_cmd = swab32(readl( - &mfunc->comm[slave].slave_write)); - slt = swab32(readl(&mfunc->comm[slave].slave_read)) - >> 31; - toggle = comm_cmd >> 31; - if (toggle != slt) { - if (master->slave_state[slave].comm_toggle - != slt) { - pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n", - slave, slt, - master->slave_state[slave].comm_toggle); - master->slave_state[slave].comm_toggle = - slt; - } - mlx4_master_do_cmd(dev, slave, - comm_cmd >> 16 & 0xff, - comm_cmd & 0xffff, toggle); - ++served; + for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) + lbit_vec[i] = be32_to_cpu(master->comm_arm_bit_vector[i]); + nmbr_bits = dev->persist->num_vfs + 1; + if (++master->next_slave >= nmbr_bits) + master->next_slave = 0; + slave = master->next_slave; + while (true) { + slave = find_next_bit((const unsigned long *)&lbit_vec, nmbr_bits, slave); + if (!first && slave >= master->next_slave) + break; + if (slave == nmbr_bits) { + if (!first) + break; + first = false; + slave = 0; + continue; + } + ++reported; + comm_cmd = swab32(readl(&mfunc->comm[slave].slave_write)); + slt = swab32(readl(&mfunc->comm[slave].slave_read)) >> 31; + toggle = comm_cmd >> 31; + if (toggle != slt) { + if (master->slave_state[slave].comm_toggle + != slt) { + pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n", + slave, slt, + master->slave_state[slave].comm_toggle); + master->slave_state[slave].comm_toggle = + slt; } + mlx4_master_do_cmd(dev, slave, + comm_cmd >> 16 & 0xff, + comm_cmd & 0xffff, toggle); + ++served; } + slave++; } if (reported && reported != served) @@ -2389,6 +2398,8 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) if (!priv->mfunc.master.vf_oper) goto err_comm_oper; + priv->mfunc.master.next_slave = 0; + for (i = 0; i < dev->num_slaves; ++i) { vf_admin = &priv->mfunc.master.vf_admin[i]; vf_oper = &priv->mfunc.master.vf_oper[i]; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 64bed7ac3836..6ccf340660d9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -603,6 +603,7 @@ struct mlx4_mfunc_master_ctx { struct mlx4_slave_event_eq slave_eq; struct mutex gen_eqe_mutex[MLX4_MFUNC_MAX]; struct mlx4_qos_manager qos_ctl[MLX4_MAX_PORTS + 1]; + u32 next_slave; /* mlx4_master_comm_channel */ }; struct mlx4_mfunc { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 9d623e38d783..461a43f338e6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -104,6 +104,18 @@ config MLX5_TC_CT If unsure, set to Y +config MLX5_TC_SAMPLE + bool "MLX5 TC sample offload support" + depends on MLX5_CLS_ACT + default y + help + Say Y here if you want to support offloading sample rules via tc + sample action. + If set to N, will not be able to configure tc rules with sample + action. + + If unsure, set to Y + config MLX5_CORE_EN_DCB bool "Data Center Bridging (DCB) Support" default y diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 8cb2625472c3..a1223e904190 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -27,7 +27,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ en_selftest.o en/port.o en/monitor_stats.o en/health.o \ en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \ en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \ - en/qos.o en/trap.o + en/qos.o en/trap.o en/fs_tt_redirect.o # # Netdev extra @@ -37,9 +37,10 @@ mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \ - en_rep.o en/rep/bond.o en/mod_hdr.o + en_rep.o en/rep/bond.o en/mod_hdr.o \ + en/mapping.o mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \ - en/mapping.o lib/fs_chains.o en/tc_tun.o \ + lib/fs_chains.o en/tc_tun.o \ esw/indir_table.o en/tc_tun_encap.o \ en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \ en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o @@ -49,11 +50,12 @@ mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o # Core extra # mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \ - ecpf.o rdma.o + ecpf.o rdma.o esw/legacy.o mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \ esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \ esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o \ - esw/devlink_port.o + esw/devlink_port.o esw/vporttbl.o +mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += esw/sample.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index e8cecd50558d..9d79c5ec31e9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -263,15 +263,15 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent) return 0; } -static void dump_buf(void *buf, int size, int data_only, int offset) +static void dump_buf(void *buf, int size, int data_only, int offset, int idx) { __be32 *p = buf; int i; for (i = 0; i < size; i += 16) { - pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), - be32_to_cpu(p[1]), be32_to_cpu(p[2]), - be32_to_cpu(p[3])); + pr_debug("cmd[%d]: %03x: %08x %08x %08x %08x\n", idx, offset, + be32_to_cpu(p[0]), be32_to_cpu(p[1]), + be32_to_cpu(p[2]), be32_to_cpu(p[3])); p += 4; offset += 16; } @@ -802,39 +802,41 @@ static void dump_command(struct mlx5_core_dev *dev, int dump_len; int i; + mlx5_core_dbg(dev, "cmd[%d]: start dump\n", ent->idx); data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); if (data_only) mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, - "dump command data %s(0x%x) %s\n", - mlx5_command_str(op), op, + "cmd[%d]: dump command data %s(0x%x) %s\n", + ent->idx, mlx5_command_str(op), op, input ? "INPUT" : "OUTPUT"); else - mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n", - mlx5_command_str(op), op, + mlx5_core_dbg(dev, "cmd[%d]: dump command %s(0x%x) %s\n", + ent->idx, mlx5_command_str(op), op, input ? "INPUT" : "OUTPUT"); if (data_only) { if (input) { - dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset); + dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset, ent->idx); offset += sizeof(ent->lay->in); } else { - dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset); + dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset, ent->idx); offset += sizeof(ent->lay->out); } } else { - dump_buf(ent->lay, sizeof(*ent->lay), 0, offset); + dump_buf(ent->lay, sizeof(*ent->lay), 0, offset, ent->idx); offset += sizeof(*ent->lay); } for (i = 0; i < n && next; i++) { if (data_only) { dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset); - dump_buf(next->buf, dump_len, 1, offset); + dump_buf(next->buf, dump_len, 1, offset, ent->idx); offset += MLX5_CMD_DATA_BLOCK_SIZE; } else { - mlx5_core_dbg(dev, "command block:\n"); - dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset); + mlx5_core_dbg(dev, "cmd[%d]: command block:\n", ent->idx); + dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset, + ent->idx); offset += sizeof(struct mlx5_cmd_prot_block); } next = next->next; @@ -842,6 +844,8 @@ static void dump_command(struct mlx5_core_dev *dev, if (data_only) pr_debug("\n"); + + mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx); } static u16 msg_to_opcode(struct mlx5_cmd_msg *in) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index 9153c9bda96f..a9166cd85013 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -58,9 +58,6 @@ static bool is_eth_supported(struct mlx5_core_dev *dev) if (!IS_ENABLED(CONFIG_MLX5_CORE_EN)) return false; - if (is_eth_rep_supported(dev)) - return false; - if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) return false; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index d0f9d3cee97d..44c458443428 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -137,18 +137,18 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change, * unregistering devlink instance while holding devlink_mutext. * Hence, do not support reload. */ - NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated\n"); + NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated"); return -EOPNOTSUPP; } if (mlx5_lag_is_active(dev)) { - NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode\n"); + NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode"); return -EOPNOTSUPP; } switch (action) { case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: - mlx5_unload_one(dev, false); + mlx5_unload_one(dev); return 0; case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET) @@ -170,13 +170,13 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a *actions_performed = BIT(action); switch (action) { case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: - return mlx5_load_one(dev, false); + return mlx5_load_one(dev); case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET) break; /* On fw_activate action, also driver is reloaded and reinit performed */ *actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); - return mlx5_load_one(dev, false); + return mlx5_load_one(dev); default: /* Unsupported action should not get to this function */ WARN_ON(1); @@ -461,6 +461,50 @@ static int mlx5_devlink_large_group_num_validate(struct devlink *devlink, u32 id return 0; } + +static int mlx5_devlink_esw_port_metadata_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + + if (!MLX5_ESWITCH_MANAGER(dev)) + return -EOPNOTSUPP; + + return mlx5_esw_offloads_vport_metadata_set(dev->priv.eswitch, ctx->val.vbool); +} + +static int mlx5_devlink_esw_port_metadata_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + + if (!MLX5_ESWITCH_MANAGER(dev)) + return -EOPNOTSUPP; + + ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch); + return 0; +} + +static int mlx5_devlink_esw_port_metadata_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + u8 esw_mode; + + if (!MLX5_ESWITCH_MANAGER(dev)) { + NL_SET_ERR_MSG_MOD(extack, "E-Switch is unsupported"); + return -EOPNOTSUPP; + } + esw_mode = mlx5_eswitch_mode(dev); + if (esw_mode == MLX5_ESWITCH_OFFLOADS) { + NL_SET_ERR_MSG_MOD(extack, + "E-Switch must either disabled or non switchdev mode"); + return -EBUSY; + } + return 0; +} + #endif static int mlx5_devlink_enable_remote_dev_reset_set(struct devlink *devlink, u32 id, @@ -495,6 +539,12 @@ static const struct devlink_param mlx5_devlink_params[] = { BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL, mlx5_devlink_large_group_num_validate), + DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA, + "esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + mlx5_devlink_esw_port_metadata_get, + mlx5_devlink_esw_port_metadata_set, + mlx5_devlink_esw_port_metadata_validate), #endif DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET, BIT(DEVLINK_PARAM_CMODE_RUNTIME), mlx5_devlink_enable_remote_dev_reset_get, @@ -524,6 +574,18 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink) devlink_param_driverinit_value_set(devlink, MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM, value); + + if (MLX5_ESWITCH_MANAGER(dev)) { + if (mlx5_esw_vport_match_metadata_supported(dev->priv.eswitch)) { + dev->priv.eswitch->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; + value.vbool = true; + } else { + value.vbool = false; + } + devlink_param_driverinit_value_set(devlink, + MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA, + value); + } #endif } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h index eff107dad922..7318d44b774b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h @@ -10,6 +10,7 @@ enum mlx5_devlink_param_id { MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE, MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM, + MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA, }; struct mlx5_trap_ctx { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 2eb022ad7fd0..01a1d02dcf15 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -1100,7 +1100,7 @@ int mlx5_fw_tracer_reload(struct mlx5_fw_tracer *tracer) int err; if (IS_ERR_OR_NULL(tracer)) - return -EINVAL; + return 0; dev = tracer->dev; mlx5_fw_tracer_cleanup(tracer); @@ -1126,8 +1126,7 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void switch (eqe->sub_type) { case MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE: - if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) - queue_work(tracer->work_queue, &tracer->ownership_change_work); + queue_work(tracer->work_queue, &tracer->ownership_change_work); break; case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE: if (likely(tracer->str_db.loaded)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index bc6f77ea0a31..b636d63358d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -269,6 +269,7 @@ struct mlx5e_params { struct mlx5e_xsk *xsk; unsigned int sw_mtu; int hard_mtu; + bool ptp_rx; }; enum { @@ -324,9 +325,9 @@ enum { MLX5E_SQ_STATE_RECOVERING, MLX5E_SQ_STATE_IPSEC, MLX5E_SQ_STATE_AM, - MLX5E_SQ_STATE_TLS, MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, MLX5E_SQ_STATE_PENDING_XSK_TX, + MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, }; struct mlx5e_tx_mpwqe { @@ -499,6 +500,8 @@ struct mlx5e_xdpsq { struct mlx5e_channel *channel; } ____cacheline_aligned_in_smp; +struct mlx5e_ktls_resync_resp; + struct mlx5e_icosq { /* data path */ u16 cc; @@ -518,6 +521,7 @@ struct mlx5e_icosq { u32 sqn; u16 reserved_room; unsigned long state; + struct mlx5e_ktls_resync_resp *ktls_resync; /* control path */ struct mlx5_wq_ctrl wq_ctrl; @@ -708,11 +712,11 @@ struct mlx5e_channel { int cpu; }; -struct mlx5e_port_ptp; +struct mlx5e_ptp; struct mlx5e_channels { struct mlx5e_channel **c; - struct mlx5e_port_ptp *port_ptp; + struct mlx5e_ptp *ptp; unsigned int num; struct mlx5e_params params; }; @@ -727,10 +731,11 @@ struct mlx5e_channel_stats { struct mlx5e_xdpsq_stats xsksq; } ____cacheline_aligned_in_smp; -struct mlx5e_port_ptp_stats { +struct mlx5e_ptp_stats { struct mlx5e_ch_stats ch; struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC]; struct mlx5e_ptp_cq_stats cq[MLX5E_MAX_NUM_TC]; + struct mlx5e_rq_stats rq; } ____cacheline_aligned_in_smp; enum { @@ -837,6 +842,7 @@ struct mlx5e_priv { struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS]; struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; struct mlx5e_tir xsk_tir[MLX5E_MAX_NUM_CHANNELS]; + struct mlx5e_tir ptp_tir; struct mlx5e_rss_params rss_params; u32 tx_rates[MLX5E_MAX_NUM_SQS]; @@ -856,10 +862,11 @@ struct mlx5e_priv { struct mlx5e_stats stats; struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS]; struct mlx5e_channel_stats trap_stats; - struct mlx5e_port_ptp_stats port_ptp_stats; + struct mlx5e_ptp_stats ptp_stats; u16 max_nch; u8 max_opened_tc; - bool port_ptp_opened; + bool tx_ptp_opened; + bool rx_ptp_opened; struct hwtstamp_config tstamp; u16 q_counter; u16 drop_rq_q_counter; @@ -882,7 +889,6 @@ struct mlx5e_priv { #endif struct devlink_health_reporter *tx_reporter; struct devlink_health_reporter *rx_reporter; - struct devlink_port dl_port; struct mlx5e_xsk xsk; #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE) struct mlx5e_hv_vhca_stats_agent stats_agent; @@ -916,13 +922,12 @@ struct mlx5e_profile { const struct mlx5e_rx_handlers *rx_handlers; int max_tc; u8 rq_groups; + bool rx_ptp_support; }; void mlx5e_build_ptys2ethtool_map(void); bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev); -bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, - struct mlx5e_params *params); void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s); @@ -965,9 +970,9 @@ struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types struct mlx5e_xsk_param; struct mlx5e_rq_param; -int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params, - struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk, - struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq); +int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param, + struct mlx5e_xsk_param *xsk, int node, + struct mlx5e_rq *rq); int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time); void mlx5e_deactivate_rq(struct mlx5e_rq *rq); void mlx5e_close_rq(struct mlx5e_rq *rq); @@ -1013,27 +1018,20 @@ int fn##_ctx(struct mlx5e_priv *priv, void *context) \ return fn(priv); \ } int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv); -int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, - struct mlx5e_channels *new_chs, - mlx5e_fp_preactivate preactivate, - void *context); +int mlx5e_safe_switch_params(struct mlx5e_priv *priv, + struct mlx5e_params *new_params, + mlx5e_fp_preactivate preactivate, + void *context, bool reset); int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv); int mlx5e_num_channels_changed(struct mlx5e_priv *priv); int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context); void mlx5e_activate_priv_channels(struct mlx5e_priv *priv); void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); +int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx); void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, int num_channels); -void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode); -void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode); -void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); -void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); - -void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params); -void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, - struct mlx5e_params *params); int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state); void mlx5e_activate_rq(struct mlx5e_rq *rq); void mlx5e_deactivate_rq(struct mlx5e_rq *rq); @@ -1092,10 +1090,10 @@ int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv); int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc); void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv); -int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs); -void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs); -int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs); -void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs); +int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n); +void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n); +int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n); +void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n); void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn); @@ -1176,10 +1174,9 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv); void mlx5e_destroy_netdev(struct mlx5e_priv *priv); int mlx5e_netdev_change_profile(struct mlx5e_priv *priv, const struct mlx5e_profile *new_profile, void *new_ppriv); +void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv); void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv); void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu); -void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, - struct mlx5e_params *params); void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, u16 num_channels); void mlx5e_rx_dim_work(struct work_struct *work); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c index a69c62d72d16..0dd7615e5931 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c @@ -2,37 +2,70 @@ /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ #include "en/devlink.h" +#include "eswitch.h" + +static void +mlx5e_devlink_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_id *ppid) +{ + u64 parent_id; + + parent_id = mlx5_query_nic_system_image_guid(dev); + ppid->id_len = sizeof(parent_id); + memcpy(ppid->id, &parent_id, sizeof(parent_id)); +} int mlx5e_devlink_port_register(struct mlx5e_priv *priv) { struct devlink *devlink = priv_to_devlink(priv->mdev); struct devlink_port_attrs attrs = {}; + struct netdev_phys_item_id ppid = {}; + struct devlink_port *dl_port; + unsigned int dl_port_index; if (mlx5_core_is_pf(priv->mdev)) { attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; attrs.phys.port_number = PCI_FUNC(priv->mdev->pdev->devfn); + if (MLX5_ESWITCH_MANAGER(priv->mdev)) { + mlx5e_devlink_get_port_parent_id(priv->mdev, &ppid); + memcpy(attrs.switch_id.id, ppid.id, ppid.id_len); + attrs.switch_id.id_len = ppid.id_len; + } + dl_port_index = mlx5_esw_vport_to_devlink_port_index(priv->mdev, + MLX5_VPORT_UPLINK); } else { attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL; + dl_port_index = mlx5_esw_vport_to_devlink_port_index(priv->mdev, 0); } - devlink_port_attrs_set(&priv->dl_port, &attrs); + dl_port = mlx5e_devlink_get_dl_port(priv); + memset(dl_port, 0, sizeof(*dl_port)); + devlink_port_attrs_set(dl_port, &attrs); - return devlink_port_register(devlink, &priv->dl_port, 1); + return devlink_port_register(devlink, dl_port, dl_port_index); } void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv) { - devlink_port_type_eth_set(&priv->dl_port, priv->netdev); + struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv); + + devlink_port_type_eth_set(dl_port, priv->netdev); } void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv) { - devlink_port_unregister(&priv->dl_port); + struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv); + + if (dl_port->registered) + devlink_port_unregister(dl_port); } struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev) { struct mlx5e_priv *priv = netdev_priv(dev); + struct devlink_port *port; - return &priv->dl_port; + port = mlx5e_devlink_get_dl_port(priv); + if (port->registered) + return port; + return NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h index 83123a801adc..10b50feb9883 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h @@ -12,4 +12,10 @@ void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv); void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv); struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev); +static inline struct devlink_port * +mlx5e_devlink_get_dl_port(struct mlx5e_priv *priv) +{ + return &priv->mdev->mlx5e_res.dl_port; +} + #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index a16297e7e2ac..1d5ce07b83f4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -29,6 +29,7 @@ struct mlx5e_tc_table { struct netdev_net_notifier netdevice_nn; struct mlx5_tc_ct_priv *ct; + struct mapping_ctx *mapping; }; struct mlx5e_flow_table { @@ -49,18 +50,10 @@ struct mlx5e_promisc_table { struct mlx5_flow_handle *rule; }; -struct mlx5e_vlan_table { - struct mlx5e_flow_table ft; - DECLARE_BITMAP(active_cvlans, VLAN_N_VID); - DECLARE_BITMAP(active_svlans, VLAN_N_VID); - struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID]; - struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID]; - struct mlx5_flow_handle *untagged_rule; - struct mlx5_flow_handle *any_cvlan_rule; - struct mlx5_flow_handle *any_svlan_rule; - struct mlx5_flow_handle *trap_rule; - bool cvlan_filter_disabled; -}; +/* Forward declaration and APIs to get private fields of vlan_table */ +struct mlx5e_vlan_table; +unsigned long *mlx5e_vlan_get_active_svlans(struct mlx5e_vlan_table *vlan); +struct mlx5_flow_table *mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table *vlan); struct mlx5e_l2_table { struct mlx5e_flow_table ft; @@ -137,11 +130,13 @@ enum { MLX5E_L2_FT_LEVEL, MLX5E_TTC_FT_LEVEL, MLX5E_INNER_TTC_FT_LEVEL, + MLX5E_FS_TT_UDP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1, + MLX5E_FS_TT_ANY_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1, #ifdef CONFIG_MLX5_EN_TLS - MLX5E_ACCEL_FS_TCP_FT_LEVEL, + MLX5E_ACCEL_FS_TCP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1, #endif #ifdef CONFIG_MLX5_EN_ARFS - MLX5E_ARFS_FT_LEVEL, + MLX5E_ARFS_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1, #endif #ifdef CONFIG_MLX5_EN_IPSEC MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1, @@ -198,31 +193,7 @@ static inline int mlx5e_ethtool_get_rxnfc(struct net_device *dev, #endif /* CONFIG_MLX5_EN_RXNFC */ #ifdef CONFIG_MLX5_EN_ARFS -#define ARFS_HASH_SHIFT BITS_PER_BYTE -#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE) - -struct arfs_table { - struct mlx5e_flow_table ft; - struct mlx5_flow_handle *default_rule; - struct hlist_head rules_hash[ARFS_HASH_SIZE]; -}; - -enum arfs_type { - ARFS_IPV4_TCP, - ARFS_IPV6_TCP, - ARFS_IPV4_UDP, - ARFS_IPV6_UDP, - ARFS_NUM_TYPES, -}; - -struct mlx5e_arfs_tables { - struct arfs_table arfs_tables[ARFS_NUM_TYPES]; - /* Protect aRFS rules list */ - spinlock_t arfs_lock; - struct list_head rules; - int last_filter_id; - struct workqueue_struct *wq; -}; +struct mlx5e_arfs_tables; int mlx5e_arfs_create_tables(struct mlx5e_priv *priv); void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv); @@ -241,6 +212,10 @@ static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { return -EOPNOTSU struct mlx5e_accel_fs_tcp; #endif +struct mlx5e_fs_udp; +struct mlx5e_fs_any; +struct mlx5e_ptp_fs; + struct mlx5e_flow_steering { struct mlx5_flow_namespace *ns; struct mlx5_flow_namespace *egress_ns; @@ -249,16 +224,19 @@ struct mlx5e_flow_steering { #endif struct mlx5e_tc_table tc; struct mlx5e_promisc_table promisc; - struct mlx5e_vlan_table vlan; + struct mlx5e_vlan_table *vlan; struct mlx5e_l2_table l2; struct mlx5e_ttc_table ttc; struct mlx5e_ttc_table inner_ttc; #ifdef CONFIG_MLX5_EN_ARFS - struct mlx5e_arfs_tables arfs; + struct mlx5e_arfs_tables *arfs; #endif #ifdef CONFIG_MLX5_EN_TLS struct mlx5e_accel_fs_tcp *accel_tcp; #endif + struct mlx5e_fs_udp *udp; + struct mlx5e_fs_any *any; + struct mlx5e_ptp_fs *ptp_fs; }; struct ttc_params { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c new file mode 100644 index 000000000000..909faa6c89d7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c @@ -0,0 +1,605 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */ + +#include <linux/netdevice.h> +#include "en/fs_tt_redirect.h" +#include "fs_core.h" + +enum fs_udp_type { + FS_IPV4_UDP, + FS_IPV6_UDP, + FS_UDP_NUM_TYPES, +}; + +struct mlx5e_fs_udp { + struct mlx5e_flow_table tables[FS_UDP_NUM_TYPES]; + struct mlx5_flow_handle *default_rules[FS_UDP_NUM_TYPES]; + int ref_cnt; +}; + +struct mlx5e_fs_any { + struct mlx5e_flow_table table; + struct mlx5_flow_handle *default_rule; + int ref_cnt; +}; + +static char *fs_udp_type2str(enum fs_udp_type i) +{ + switch (i) { + case FS_IPV4_UDP: + return "UDP v4"; + default: /* FS_IPV6_UDP */ + return "UDP v6"; + } +} + +static enum mlx5e_traffic_types fs_udp2tt(enum fs_udp_type i) +{ + switch (i) { + case FS_IPV4_UDP: + return MLX5E_TT_IPV4_UDP; + default: /* FS_IPV6_UDP */ + return MLX5E_TT_IPV6_UDP; + } +} + +static enum fs_udp_type tt2fs_udp(enum mlx5e_traffic_types i) +{ + switch (i) { + case MLX5E_TT_IPV4_UDP: + return FS_IPV4_UDP; + case MLX5E_TT_IPV6_UDP: + return FS_IPV6_UDP; + default: + return FS_UDP_NUM_TYPES; + } +} + +void mlx5e_fs_tt_redirect_del_rule(struct mlx5_flow_handle *rule) +{ + mlx5_del_flow_rules(rule); +} + +static void fs_udp_set_dport_flow(struct mlx5_flow_spec *spec, enum fs_udp_type type, + u16 udp_dport) +{ + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_UDP); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, + type == FS_IPV4_UDP ? 4 : 6); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_dport); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, udp_dport); +} + +struct mlx5_flow_handle * +mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv, + enum mlx5e_traffic_types ttc_type, + u32 tir_num, u16 d_port) +{ + enum fs_udp_type type = tt2fs_udp(ttc_type); + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_table *ft = NULL; + MLX5_DECLARE_FLOW_ACT(flow_act); + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + struct mlx5e_fs_udp *fs_udp; + int err; + + if (type == FS_UDP_NUM_TYPES) + return ERR_PTR(-EINVAL); + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return ERR_PTR(-ENOMEM); + + fs_udp = priv->fs.udp; + ft = fs_udp->tables[type].t; + + fs_udp_set_dport_flow(spec, type, d_port); + dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + dest.tir_num = tir_num; + + rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); + kvfree(spec); + + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + netdev_err(priv->netdev, "%s: add %s rule failed, err %d\n", + __func__, fs_udp_type2str(type), err); + } + return rule; +} + +static int fs_udp_add_default_rule(struct mlx5e_priv *priv, enum fs_udp_type type) +{ + struct mlx5e_flow_table *fs_udp_t; + struct mlx5_flow_destination dest; + MLX5_DECLARE_FLOW_ACT(flow_act); + struct mlx5_flow_handle *rule; + struct mlx5e_fs_udp *fs_udp; + int err; + + fs_udp = priv->fs.udp; + fs_udp_t = &fs_udp->tables[type]; + + dest = mlx5e_ttc_get_default_dest(priv, fs_udp2tt(type)); + rule = mlx5_add_flow_rules(fs_udp_t->t, NULL, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + netdev_err(priv->netdev, + "%s: add default rule failed, fs type=%d, err %d\n", + __func__, type, err); + return err; + } + + fs_udp->default_rules[type] = rule; + return 0; +} + +#define MLX5E_FS_UDP_NUM_GROUPS (2) +#define MLX5E_FS_UDP_GROUP1_SIZE (BIT(16)) +#define MLX5E_FS_UDP_GROUP2_SIZE (BIT(0)) +#define MLX5E_FS_UDP_TABLE_SIZE (MLX5E_FS_UDP_GROUP1_SIZE +\ + MLX5E_FS_UDP_GROUP2_SIZE) +static int fs_udp_create_groups(struct mlx5e_flow_table *ft, enum fs_udp_type type) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + void *outer_headers_c; + int ix = 0; + u32 *in; + int err; + u8 *mc; + + ft->g = kcalloc(MLX5E_FS_UDP_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL); + in = kvzalloc(inlen, GFP_KERNEL); + if (!in || !ft->g) { + kfree(ft->g); + kvfree(in); + return -ENOMEM; + } + + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_version); + + switch (type) { + case FS_IPV4_UDP: + case FS_IPV6_UDP: + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport); + break; + default: + err = -EINVAL; + goto out; + } + /* Match on udp protocol, Ipv4/6 and dport */ + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_FS_UDP_GROUP1_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + /* Default Flow Group */ + memset(in, 0, inlen); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_FS_UDP_GROUP2_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + kvfree(in); + return 0; + +err: + err = PTR_ERR(ft->g[ft->num_groups]); + ft->g[ft->num_groups] = NULL; +out: + kvfree(in); + + return err; +} + +static int fs_udp_create_table(struct mlx5e_priv *priv, enum fs_udp_type type) +{ + struct mlx5e_flow_table *ft = &priv->fs.udp->tables[type]; + struct mlx5_flow_table_attr ft_attr = {}; + int err; + + ft->num_groups = 0; + + ft_attr.max_fte = MLX5E_FS_UDP_TABLE_SIZE; + ft_attr.level = MLX5E_FS_TT_UDP_FT_LEVEL; + ft_attr.prio = MLX5E_NIC_PRIO; + + ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); + if (IS_ERR(ft->t)) { + err = PTR_ERR(ft->t); + ft->t = NULL; + return err; + } + + netdev_dbg(priv->netdev, "Created fs %s table id %u level %u\n", + fs_udp_type2str(type), ft->t->id, ft->t->level); + + err = fs_udp_create_groups(ft, type); + if (err) + goto err; + + err = fs_udp_add_default_rule(priv, type); + if (err) + goto err; + + return 0; + +err: + mlx5e_destroy_flow_table(ft); + return err; +} + +static void fs_udp_destroy_table(struct mlx5e_fs_udp *fs_udp, int i) +{ + if (IS_ERR_OR_NULL(fs_udp->tables[i].t)) + return; + + mlx5_del_flow_rules(fs_udp->default_rules[i]); + mlx5e_destroy_flow_table(&fs_udp->tables[i]); + fs_udp->tables[i].t = NULL; +} + +static int fs_udp_disable(struct mlx5e_priv *priv) +{ + int err, i; + + for (i = 0; i < FS_UDP_NUM_TYPES; i++) { + /* Modify ttc rules destination to point back to the indir TIRs */ + err = mlx5e_ttc_fwd_default_dest(priv, fs_udp2tt(i)); + if (err) { + netdev_err(priv->netdev, + "%s: modify ttc[%d] default destination failed, err(%d)\n", + __func__, fs_udp2tt(i), err); + return err; + } + } + + return 0; +} + +static int fs_udp_enable(struct mlx5e_priv *priv) +{ + struct mlx5_flow_destination dest = {}; + int err, i; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + for (i = 0; i < FS_UDP_NUM_TYPES; i++) { + dest.ft = priv->fs.udp->tables[i].t; + + /* Modify ttc rules destination to point on the accel_fs FTs */ + err = mlx5e_ttc_fwd_dest(priv, fs_udp2tt(i), &dest); + if (err) { + netdev_err(priv->netdev, + "%s: modify ttc[%d] destination to accel failed, err(%d)\n", + __func__, fs_udp2tt(i), err); + return err; + } + } + return 0; +} + +void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv) +{ + struct mlx5e_fs_udp *fs_udp = priv->fs.udp; + int i; + + if (!fs_udp) + return; + + if (--fs_udp->ref_cnt) + return; + + fs_udp_disable(priv); + + for (i = 0; i < FS_UDP_NUM_TYPES; i++) + fs_udp_destroy_table(fs_udp, i); + + kfree(fs_udp); + priv->fs.udp = NULL; +} + +int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv) +{ + int i, err; + + if (priv->fs.udp) { + priv->fs.udp->ref_cnt++; + return 0; + } + + priv->fs.udp = kzalloc(sizeof(*priv->fs.udp), GFP_KERNEL); + if (!priv->fs.udp) + return -ENOMEM; + + for (i = 0; i < FS_UDP_NUM_TYPES; i++) { + err = fs_udp_create_table(priv, i); + if (err) + goto err_destroy_tables; + } + + err = fs_udp_enable(priv); + if (err) + goto err_destroy_tables; + + priv->fs.udp->ref_cnt = 1; + + return 0; + +err_destroy_tables: + while (--i >= 0) + fs_udp_destroy_table(priv->fs.udp, i); + + kfree(priv->fs.udp); + priv->fs.udp = NULL; + return err; +} + +static void fs_any_set_ethertype_flow(struct mlx5_flow_spec *spec, u16 ether_type) +{ + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ether_type); +} + +struct mlx5_flow_handle * +mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv, + u32 tir_num, u16 ether_type) +{ + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_table *ft = NULL; + MLX5_DECLARE_FLOW_ACT(flow_act); + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + struct mlx5e_fs_any *fs_any; + int err; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return ERR_PTR(-ENOMEM); + + fs_any = priv->fs.any; + ft = fs_any->table.t; + + fs_any_set_ethertype_flow(spec, ether_type); + dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + dest.tir_num = tir_num; + + rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); + kvfree(spec); + + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + netdev_err(priv->netdev, "%s: add ANY rule failed, err %d\n", + __func__, err); + } + return rule; +} + +static int fs_any_add_default_rule(struct mlx5e_priv *priv) +{ + struct mlx5e_flow_table *fs_any_t; + struct mlx5_flow_destination dest; + MLX5_DECLARE_FLOW_ACT(flow_act); + struct mlx5_flow_handle *rule; + struct mlx5e_fs_any *fs_any; + int err; + + fs_any = priv->fs.any; + fs_any_t = &fs_any->table; + + dest = mlx5e_ttc_get_default_dest(priv, MLX5E_TT_ANY); + rule = mlx5_add_flow_rules(fs_any_t->t, NULL, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + netdev_err(priv->netdev, + "%s: add default rule failed, fs type=ANY, err %d\n", + __func__, err); + return err; + } + + fs_any->default_rule = rule; + return 0; +} + +#define MLX5E_FS_ANY_NUM_GROUPS (2) +#define MLX5E_FS_ANY_GROUP1_SIZE (BIT(16)) +#define MLX5E_FS_ANY_GROUP2_SIZE (BIT(0)) +#define MLX5E_FS_ANY_TABLE_SIZE (MLX5E_FS_ANY_GROUP1_SIZE +\ + MLX5E_FS_ANY_GROUP2_SIZE) + +static int fs_any_create_groups(struct mlx5e_flow_table *ft) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + void *outer_headers_c; + int ix = 0; + u32 *in; + int err; + u8 *mc; + + ft->g = kcalloc(MLX5E_FS_UDP_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL); + in = kvzalloc(inlen, GFP_KERNEL); + if (!in || !ft->g) { + kfree(ft->g); + kvfree(in); + return -ENOMEM; + } + + /* Match on ethertype */ + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_FS_ANY_GROUP1_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + /* Default Flow Group */ + memset(in, 0, inlen); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_FS_ANY_GROUP2_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + kvfree(in); + return 0; + +err: + err = PTR_ERR(ft->g[ft->num_groups]); + ft->g[ft->num_groups] = NULL; + kvfree(in); + + return err; +} + +static int fs_any_create_table(struct mlx5e_priv *priv) +{ + struct mlx5e_flow_table *ft = &priv->fs.any->table; + struct mlx5_flow_table_attr ft_attr = {}; + int err; + + ft->num_groups = 0; + + ft_attr.max_fte = MLX5E_FS_UDP_TABLE_SIZE; + ft_attr.level = MLX5E_FS_TT_ANY_FT_LEVEL; + ft_attr.prio = MLX5E_NIC_PRIO; + + ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); + if (IS_ERR(ft->t)) { + err = PTR_ERR(ft->t); + ft->t = NULL; + return err; + } + + netdev_dbg(priv->netdev, "Created fs ANY table id %u level %u\n", + ft->t->id, ft->t->level); + + err = fs_any_create_groups(ft); + if (err) + goto err; + + err = fs_any_add_default_rule(priv); + if (err) + goto err; + + return 0; + +err: + mlx5e_destroy_flow_table(ft); + return err; +} + +static int fs_any_disable(struct mlx5e_priv *priv) +{ + int err; + + /* Modify ttc rules destination to point back to the indir TIRs */ + err = mlx5e_ttc_fwd_default_dest(priv, MLX5E_TT_ANY); + if (err) { + netdev_err(priv->netdev, + "%s: modify ttc[%d] default destination failed, err(%d)\n", + __func__, MLX5E_TT_ANY, err); + return err; + } + return 0; +} + +static int fs_any_enable(struct mlx5e_priv *priv) +{ + struct mlx5_flow_destination dest = {}; + int err; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = priv->fs.any->table.t; + + /* Modify ttc rules destination to point on the accel_fs FTs */ + err = mlx5e_ttc_fwd_dest(priv, MLX5E_TT_ANY, &dest); + if (err) { + netdev_err(priv->netdev, + "%s: modify ttc[%d] destination to accel failed, err(%d)\n", + __func__, MLX5E_TT_ANY, err); + return err; + } + return 0; +} + +static void fs_any_destroy_table(struct mlx5e_fs_any *fs_any) +{ + if (IS_ERR_OR_NULL(fs_any->table.t)) + return; + + mlx5_del_flow_rules(fs_any->default_rule); + mlx5e_destroy_flow_table(&fs_any->table); + fs_any->table.t = NULL; +} + +void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv) +{ + struct mlx5e_fs_any *fs_any = priv->fs.any; + + if (!fs_any) + return; + + if (--fs_any->ref_cnt) + return; + + fs_any_disable(priv); + + fs_any_destroy_table(fs_any); + + kfree(fs_any); + priv->fs.any = NULL; +} + +int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv) +{ + int err; + + if (priv->fs.any) { + priv->fs.any->ref_cnt++; + return 0; + } + + priv->fs.any = kzalloc(sizeof(*priv->fs.any), GFP_KERNEL); + if (!priv->fs.any) + return -ENOMEM; + + err = fs_any_create_table(priv); + if (err) + return err; + + err = fs_any_enable(priv); + if (err) + goto err_destroy_table; + + priv->fs.any->ref_cnt = 1; + + return 0; + +err_destroy_table: + fs_any_destroy_table(priv->fs.any); + + kfree(priv->fs.any); + priv->fs.any = NULL; + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h new file mode 100644 index 000000000000..8385df24eb99 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021 Mellanox Technologies. */ + +#ifndef __MLX5E_FS_TT_REDIRECT_H__ +#define __MLX5E_FS_TT_REDIRECT_H__ + +#include "en.h" +#include "en/fs.h" + +void mlx5e_fs_tt_redirect_del_rule(struct mlx5_flow_handle *rule); + +/* UDP traffic type redirect */ +struct mlx5_flow_handle * +mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv, + enum mlx5e_traffic_types ttc_type, + u32 tir_num, u16 d_port); +void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv); +int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv); + +/* ANY traffic type redirect*/ +struct mlx5_flow_handle * +mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv, + u32 tir_num, u16 ether_type); +void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv); +int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv); +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c index 84e501e057b4..6f4e6c34b2a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c @@ -128,7 +128,7 @@ int mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg if (err) return err; - err = devlink_fmsg_u32_pair_put(fmsg, "size", eq->core.nent); + err = devlink_fmsg_u32_pair_put(fmsg, "size", eq_get_size(&eq->core)); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 36381a2ed5a5..f410c1268422 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -3,10 +3,13 @@ #include "en/params.h" #include "en/txrx.h" -#include "en_accel/tls_rxtx.h" +#include "en/port.h" +#include "en_accel/en_accel.h" +#include "accel/ipsec.h" +#include "fpga/ipsec.h" -static inline bool mlx5e_rx_is_xdp(struct mlx5e_params *params, - struct mlx5e_xsk_param *xsk) +static bool mlx5e_rx_is_xdp(struct mlx5e_params *params, + struct mlx5e_xsk_param *xsk) { return params->xdp_prog || xsk; } @@ -37,8 +40,8 @@ u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params, return linear_rq_headroom + hw_mtu; } -u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params, - struct mlx5e_xsk_param *xsk) +static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params, + struct mlx5e_xsk_param *xsk) { u32 frag_sz = mlx5e_rx_get_min_frag_sz(params, xsk); @@ -87,30 +90,39 @@ bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params, return !params->lro_en && linear_frag_sz <= PAGE_SIZE; } -#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \ - MLX5_MPWQE_LOG_STRIDE_SZ_BASE) -bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, - struct mlx5e_params *params, - struct mlx5e_xsk_param *xsk) +bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev, + u8 log_stride_sz, u8 log_num_strides) { - u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk); - s8 signed_log_num_strides_param; - u8 log_num_strides; + if (log_stride_sz + log_num_strides != MLX5_MPWRQ_LOG_WQE_SZ) + return false; - if (!mlx5e_rx_is_linear_skb(params, xsk)) + if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE || + log_stride_sz > MLX5_MPWQE_LOG_STRIDE_SZ_MAX) return false; - if (order_base_2(linear_frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ) + if (log_num_strides > MLX5_MPWQE_LOG_NUM_STRIDES_MAX) return false; if (MLX5_CAP_GEN(mdev, ext_stride_num_range)) - return true; + return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE; + + return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_BASE; +} - log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz); - signed_log_num_strides_param = - (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE; +bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + struct mlx5e_xsk_param *xsk) +{ + s8 log_num_strides; + u8 log_stride_sz; - return signed_log_num_strides_param >= 0; + if (!mlx5e_rx_is_linear_skb(params, xsk)) + return false; + + log_stride_sz = order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk)); + log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - log_stride_sz; + + return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, log_num_strides); } u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params, @@ -172,17 +184,505 @@ u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par return stop_room; } -int mlx5e_validate_params(struct mlx5e_priv *priv, struct mlx5e_params *params) +int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { size_t sq_size = 1 << params->log_sq_size; u16 stop_room; - stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params); + stop_room = mlx5e_calc_sq_stop_room(mdev, params); if (stop_room >= sq_size) { - netdev_err(priv->netdev, "Stop room %u is bigger than the SQ size %zu\n", - stop_room, sq_size); + mlx5_core_err(mdev, "Stop room %u is bigger than the SQ size %zu\n", + stop_room, sq_size); return -EINVAL; } return 0; } + +static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode) +{ + struct dim_cq_moder moder; + + moder.cq_period_mode = cq_period_mode; + moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; + moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; + if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) + moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE; + + return moder; +} + +static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode) +{ + struct dim_cq_moder moder; + + moder.cq_period_mode = cq_period_mode; + moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; + moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; + if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) + moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; + + return moder; +} + +static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode) +{ + return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ? + DIM_CQ_PERIOD_MODE_START_FROM_CQE : + DIM_CQ_PERIOD_MODE_START_FROM_EQE; +} + +void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode) +{ + if (params->tx_dim_enabled) { + u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); + + params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode); + } else { + params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode); + } +} + +void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode) +{ + if (params->rx_dim_enabled) { + u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); + + params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode); + } else { + params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode); + } +} + +void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) +{ + mlx5e_reset_tx_moderation(params, cq_period_mode); + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, + params->tx_cq_moderation.cq_period_mode == + MLX5_CQ_PERIOD_MODE_START_FROM_CQE); +} + +void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) +{ + mlx5e_reset_rx_moderation(params, cq_period_mode); + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, + params->rx_cq_moderation.cq_period_mode == + MLX5_CQ_PERIOD_MODE_START_FROM_CQE); +} + +bool slow_pci_heuristic(struct mlx5_core_dev *mdev) +{ + u32 link_speed = 0; + u32 pci_bw = 0; + + mlx5e_port_max_linkspeed(mdev, &link_speed); + pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL); + mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n", + link_speed, pci_bw); + +#define MLX5E_SLOW_PCI_RATIO (2) + + return link_speed && pci_bw && + link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw; +} + +bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + if (!mlx5e_check_fragmented_striding_rq_cap(mdev)) + return false; + + if (mlx5_fpga_is_ipsec_device(mdev)) + return false; + + if (params->xdp_prog) { + /* XSK params are not considered here. If striding RQ is in use, + * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will + * be called with the known XSK params. + */ + if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL)) + return false; + } + + return true; +} + +void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + params->log_rq_mtu_frames = is_kdump_kernel() ? + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : + MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; + + mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n", + params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, + params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ? + BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) : + BIT(params->log_rq_mtu_frames), + BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)), + MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); +} + +void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params) +{ + params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) && + MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ? + MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : + MLX5_WQ_TYPE_CYCLIC; +} + +void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + /* Prefer Striding RQ, unless any of the following holds: + * - Striding RQ configuration is not possible/supported. + * - Slow PCI heuristic. + * - Legacy RQ would use linear SKB while Striding RQ would use non-linear. + * + * No XSK params: checking the availability of striding RQ in general. + */ + if (!slow_pci_heuristic(mdev) && + mlx5e_striding_rq_possible(mdev, params) && + (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) || + !mlx5e_rx_is_linear_skb(params, NULL))) + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true); + mlx5e_set_rq_type(mdev, params); + mlx5e_init_rq_type_params(mdev, params); +} + +/* Build queue parameters */ + +void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c) +{ + *ccp = (struct mlx5e_create_cq_param) { + .napi = &c->napi, + .ch_stats = c->stats, + .node = cpu_to_node(c->cpu), + .ix = c->ix, + }; +} + +#define DEFAULT_FRAG_SIZE (2048) + +static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + struct mlx5e_xsk_param *xsk, + struct mlx5e_rq_frags_info *info) +{ + u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu); + int frag_size_max = DEFAULT_FRAG_SIZE; + u32 buf_size = 0; + int i; + + if (mlx5_fpga_is_ipsec_device(mdev)) + byte_count += MLX5E_METADATA_ETHER_LEN; + + if (mlx5e_rx_is_linear_skb(params, xsk)) { + int frag_stride; + + frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk); + frag_stride = roundup_pow_of_two(frag_stride); + + info->arr[0].frag_size = byte_count; + info->arr[0].frag_stride = frag_stride; + info->num_frags = 1; + info->wqe_bulk = PAGE_SIZE / frag_stride; + goto out; + } + + if (byte_count > PAGE_SIZE + + (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max) + frag_size_max = PAGE_SIZE; + + i = 0; + while (buf_size < byte_count) { + int frag_size = byte_count - buf_size; + + if (i < MLX5E_MAX_RX_FRAGS - 1) + frag_size = min(frag_size, frag_size_max); + + info->arr[i].frag_size = frag_size; + info->arr[i].frag_stride = roundup_pow_of_two(frag_size); + + buf_size += frag_size; + i++; + } + info->num_frags = i; + /* number of different wqes sharing a page */ + info->wqe_bulk = 1 + (info->num_frags % 2); + +out: + info->wqe_bulk = max_t(u8, info->wqe_bulk, 8); + info->log_num_frags = order_base_2(info->num_frags); +} + +static u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs) +{ + int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs; + + switch (wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + sz += sizeof(struct mlx5e_rx_wqe_ll); + break; + default: /* MLX5_WQ_TYPE_CYCLIC */ + sz += sizeof(struct mlx5e_rx_wqe_cyc); + } + + return order_base_2(sz); +} + +static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev, + struct mlx5e_cq_param *param) +{ + void *cqc = param->cqc; + + MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); + if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128) + MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD); +} + +static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + struct mlx5e_xsk_param *xsk, + struct mlx5e_cq_param *param) +{ + bool hw_stridx = false; + void *cqc = param->cqc; + u8 log_cq_size; + + switch (params->rq_wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) + + mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); + hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index); + break; + default: /* MLX5_WQ_TYPE_CYCLIC */ + log_cq_size = params->log_rq_mtu_frames; + } + + MLX5_SET(cqc, cqc, log_cq_size, log_cq_size); + if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { + MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ? + MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM); + MLX5_SET(cqc, cqc, cqe_comp_en, 1); + } + + mlx5e_build_common_cq_param(mdev, param); + param->cq_period_mode = params->rx_cq_moderation.cq_period_mode; +} + +int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + struct mlx5e_xsk_param *xsk, + u16 q_counter, + struct mlx5e_rq_param *param) +{ + void *rqc = param->rqc; + void *wq = MLX5_ADDR_OF(rqc, rqc, wq); + int ndsegs = 1; + + switch (params->rq_wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: { + u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); + u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); + + if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size, + log_wqe_num_of_strides)) { + mlx5_core_err(mdev, + "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u\n", + log_wqe_stride_size, log_wqe_num_of_strides); + return -EINVAL; + } + + MLX5_SET(wq, wq, log_wqe_num_of_strides, + log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE); + MLX5_SET(wq, wq, log_wqe_stride_size, + log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE); + MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk)); + break; + } + default: /* MLX5_WQ_TYPE_CYCLIC */ + MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames); + mlx5e_build_rq_frags_info(mdev, params, xsk, ¶m->frags_info); + ndsegs = param->frags_info.num_frags; + } + + MLX5_SET(wq, wq, wq_type, params->rq_wq_type); + MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); + MLX5_SET(wq, wq, log_wq_stride, + mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs)); + MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn); + MLX5_SET(rqc, rqc, counter_set_id, q_counter); + MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); + MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en); + + param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); + mlx5e_build_rx_cq_param(mdev, params, xsk, ¶m->cqp); + + return 0; +} + +void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev, + u16 q_counter, + struct mlx5e_rq_param *param) +{ + void *rqc = param->rqc; + void *wq = MLX5_ADDR_OF(rqc, rqc, wq); + + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); + MLX5_SET(wq, wq, log_wq_stride, + mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1)); + MLX5_SET(rqc, rqc, counter_set_id, q_counter); + + param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); +} + +void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + struct mlx5e_cq_param *param) +{ + void *cqc = param->cqc; + + MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size); + + mlx5e_build_common_cq_param(mdev, param); + param->cq_period_mode = params->tx_cq_moderation.cq_period_mode; +} + +void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev, + struct mlx5e_sq_param *param) +{ + void *sqc = param->sqc; + void *wq = MLX5_ADDR_OF(sqc, sqc, wq); + + MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); + MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn); + + param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); +} + +void mlx5e_build_sq_param(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + struct mlx5e_sq_param *param) +{ + void *sqc = param->sqc; + void *wq = MLX5_ADDR_OF(sqc, sqc, wq); + bool allow_swp; + + allow_swp = mlx5_geneve_tx_allowed(mdev) || + !!MLX5_IPSEC_DEV(mdev); + mlx5e_build_sq_param_common(mdev, param); + MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); + MLX5_SET(sqc, sqc, allow_swp, allow_swp); + param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); + param->stop_room = mlx5e_calc_sq_stop_room(mdev, params); + mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp); +} + +static void mlx5e_build_ico_cq_param(struct mlx5_core_dev *mdev, + u8 log_wq_size, + struct mlx5e_cq_param *param) +{ + void *cqc = param->cqc; + + MLX5_SET(cqc, cqc, log_cq_size, log_wq_size); + + mlx5e_build_common_cq_param(mdev, param); + + param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; +} + +static u8 mlx5e_get_rq_log_wq_sz(void *rqc) +{ + void *wq = MLX5_ADDR_OF(rqc, rqc, wq); + + return MLX5_GET(wq, wq, log_wq_sz); +} + +static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params, + struct mlx5e_rq_param *rqp) +{ + switch (params->rq_wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, + order_base_2(MLX5E_UMR_WQEBBS) + + mlx5e_get_rq_log_wq_sz(rqp->rqc)); + default: /* MLX5_WQ_TYPE_CYCLIC */ + return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; + } +} + +static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev) +{ + if (mlx5_accel_is_ktls_rx(mdev)) + return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; + + return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; +} + +static void mlx5e_build_icosq_param(struct mlx5_core_dev *mdev, + u8 log_wq_size, + struct mlx5e_sq_param *param) +{ + void *sqc = param->sqc; + void *wq = MLX5_ADDR_OF(sqc, sqc, wq); + + mlx5e_build_sq_param_common(mdev, param); + + MLX5_SET(wq, wq, log_wq_sz, log_wq_size); + MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq)); + mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp); +} + +static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev, + u8 log_wq_size, + struct mlx5e_sq_param *param) +{ + void *sqc = param->sqc; + void *wq = MLX5_ADDR_OF(sqc, sqc, wq); + + mlx5e_build_sq_param_common(mdev, param); + param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */ + param->is_tls = mlx5_accel_is_ktls_rx(mdev); + if (param->is_tls) + param->stop_room += mlx5e_stop_room_for_wqe(1); /* for TLS RX resync NOP */ + MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq)); + MLX5_SET(wq, wq, log_wq_sz, log_wq_size); + mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp); +} + +void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + struct mlx5e_sq_param *param) +{ + void *sqc = param->sqc; + void *wq = MLX5_ADDR_OF(sqc, sqc, wq); + + mlx5e_build_sq_param_common(mdev, param); + MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); + param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE); + mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp); +} + +int mlx5e_build_channel_param(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + u16 q_counter, + struct mlx5e_channel_param *cparam) +{ + u8 icosq_log_wq_sz, async_icosq_log_wq_sz; + int err; + + err = mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq); + if (err) + return err; + + icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq); + async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev); + + mlx5e_build_sq_param(mdev, params, &cparam->txq_sq); + mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq); + mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq); + mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq); + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h index ea2cfb04b31a..e9593f5f0661 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h @@ -30,6 +30,7 @@ struct mlx5e_sq_param { u32 sqc[MLX5_ST_SZ_DW(sqc)]; struct mlx5_wq_param wq; bool is_mpw; + bool is_tls; u16 stop_room; }; @@ -84,12 +85,23 @@ static inline bool mlx5e_qid_validate(const struct mlx5e_profile *profile, /* Parameter calculations */ +void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode); +void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode); +void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); +void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); + +bool slow_pci_heuristic(struct mlx5_core_dev *mdev); +bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, struct mlx5e_params *params); +void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params); +void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params); +void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params); + +bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev, + u8 log_stride_sz, u8 log_num_strides); u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk); u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk); -u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params, - struct mlx5e_xsk_param *xsk); u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk); bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params, @@ -112,32 +124,31 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, /* Build queue parameters */ void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c); -void mlx5e_build_rq_param(struct mlx5e_priv *priv, - struct mlx5e_params *params, - struct mlx5e_xsk_param *xsk, - struct mlx5e_rq_param *param); -void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, +int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + struct mlx5e_xsk_param *xsk, + u16 q_counter, + struct mlx5e_rq_param *param); +void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev, + u16 q_counter, + struct mlx5e_rq_param *param); +void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev, struct mlx5e_sq_param *param); -void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_params *params, +void mlx5e_build_sq_param(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, struct mlx5e_sq_param *param); -void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, - struct mlx5e_params *params, - struct mlx5e_xsk_param *xsk, - struct mlx5e_cq_param *param); -void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, +void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_cq_param *param); -void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv, - u8 log_wq_size, - struct mlx5e_cq_param *param); -void mlx5e_build_icosq_param(struct mlx5e_priv *priv, - u8 log_wq_size, - struct mlx5e_sq_param *param); -void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv, +void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_sq_param *param); +int mlx5e_build_channel_param(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + u16 q_counter, + struct mlx5e_channel_param *cparam); u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params); -int mlx5e_validate_params(struct mlx5e_priv *priv, struct mlx5e_params *params); +int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params); #endif /* __MLX5_EN_PARAMS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index d57b6f06382f..d907c1acd4d5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -1,8 +1,26 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB // Copyright (c) 2020 Mellanox Technologies +#include <linux/ptp_classify.h> #include "en/ptp.h" #include "en/txrx.h" +#include "en/params.h" +#include "en/fs_tt_redirect.h" + +struct mlx5e_ptp_fs { + struct mlx5_flow_handle *l2_rule; + struct mlx5_flow_handle *udp_v4_rule; + struct mlx5_flow_handle *udp_v6_rule; + bool valid; +}; + +#define MLX5E_PTP_CHANNEL_IX 0 + +struct mlx5e_ptp_params { + struct mlx5e_params params; + struct mlx5e_sq_param txq_sq_param; + struct mlx5e_rq_param rq_param; +}; struct mlx5e_skb_cb_hwtstamp { ktime_t cqe_hwtstamp; @@ -116,9 +134,9 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget) static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget) { - struct mlx5e_port_ptp *c = container_of(napi, struct mlx5e_port_ptp, - napi); + struct mlx5e_ptp *c = container_of(napi, struct mlx5e_ptp, napi); struct mlx5e_ch_stats *ch_stats = c->stats; + struct mlx5e_rq *rq = &c->rq; bool busy = false; int work_done = 0; int i; @@ -127,9 +145,19 @@ static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget) ch_stats->poll++; - for (i = 0; i < c->num_tc; i++) { - busy |= mlx5e_poll_tx_cq(&c->ptpsq[i].txqsq.cq, budget); - busy |= mlx5e_ptp_poll_ts_cq(&c->ptpsq[i].ts_cq, budget); + if (test_bit(MLX5E_PTP_STATE_TX, c->state)) { + for (i = 0; i < c->num_tc; i++) { + busy |= mlx5e_poll_tx_cq(&c->ptpsq[i].txqsq.cq, budget); + busy |= mlx5e_ptp_poll_ts_cq(&c->ptpsq[i].ts_cq, budget); + } + } + if (test_bit(MLX5E_PTP_STATE_RX, c->state) && likely(budget)) { + work_done = mlx5e_poll_rx_cq(&rq->cq, budget); + busy |= work_done == budget; + busy |= INDIRECT_CALL_2(rq->post_wqes, + mlx5e_post_rx_mpwqes, + mlx5e_post_rx_wqes, + rq); } if (busy) { @@ -142,10 +170,14 @@ static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget) ch_stats->arm++; - for (i = 0; i < c->num_tc; i++) { - mlx5e_cq_arm(&c->ptpsq[i].txqsq.cq); - mlx5e_cq_arm(&c->ptpsq[i].ts_cq); + if (test_bit(MLX5E_PTP_STATE_TX, c->state)) { + for (i = 0; i < c->num_tc; i++) { + mlx5e_cq_arm(&c->ptpsq[i].txqsq.cq); + mlx5e_cq_arm(&c->ptpsq[i].ts_cq); + } } + if (test_bit(MLX5E_PTP_STATE_RX, c->state)) + mlx5e_cq_arm(&rq->cq); out: rcu_read_unlock(); @@ -153,7 +185,7 @@ out: return work_done; } -static int mlx5e_ptp_alloc_txqsq(struct mlx5e_port_ptp *c, int txq_ix, +static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix, struct mlx5e_params *params, struct mlx5e_sq_param *param, struct mlx5e_txqsq *sq, int tc, @@ -172,20 +204,18 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_port_ptp *c, int txq_ix, sq->netdev = c->netdev; sq->priv = c->priv; sq->mdev = mdev; - sq->ch_ix = c->ix; + sq->ch_ix = MLX5E_PTP_CHANNEL_IX; sq->txq_ix = txq_ix; - sq->uar_map = mdev->mlx5e_res.bfreg.map; + sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); - sq->stats = &c->priv->port_ptp_stats.sq[tc]; + sq->stats = &c->priv->ptp_stats.sq[tc]; sq->ptpsq = ptpsq; INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert)) set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); sq->stop_room = param->stop_room; - sq->ptp_cyc2time = mlx5_is_real_time_sq(mdev) ? - mlx5_real_time_cyc2time : - mlx5_timecounter_cyc2time; + sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev); node = dev_to_node(mlx5_core_dma_dev(mdev)); @@ -243,7 +273,7 @@ static void mlx5e_ptp_free_traffic_db(struct mlx5e_skb_fifo *skb_fifo) kvfree(skb_fifo->fifo); } -static int mlx5e_ptp_open_txqsq(struct mlx5e_port_ptp *c, u32 tisn, +static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn, int txq_ix, struct mlx5e_ptp_params *cparams, int tc, struct mlx5e_ptpsq *ptpsq) { @@ -293,7 +323,7 @@ static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq) mlx5e_free_txqsq(sq); } -static int mlx5e_ptp_open_txqsqs(struct mlx5e_port_ptp *c, +static int mlx5e_ptp_open_txqsqs(struct mlx5e_ptp *c, struct mlx5e_ptp_params *cparams) { struct mlx5e_params *params = &cparams->params; @@ -321,7 +351,7 @@ close_txqsq: return err; } -static void mlx5e_ptp_close_txqsqs(struct mlx5e_port_ptp *c) +static void mlx5e_ptp_close_txqsqs(struct mlx5e_ptp *c) { int tc; @@ -329,8 +359,8 @@ static void mlx5e_ptp_close_txqsqs(struct mlx5e_port_ptp *c) mlx5e_ptp_close_txqsq(&c->ptpsq[tc]); } -static int mlx5e_ptp_open_cqs(struct mlx5e_port_ptp *c, - struct mlx5e_ptp_params *cparams) +static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c, + struct mlx5e_ptp_params *cparams) { struct mlx5e_params *params = &cparams->params; struct mlx5e_create_cq_param ccp = {}; @@ -342,7 +372,7 @@ static int mlx5e_ptp_open_cqs(struct mlx5e_port_ptp *c, ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev)); ccp.ch_stats = c->stats; ccp.napi = &c->napi; - ccp.ix = c->ix; + ccp.ix = MLX5E_PTP_CHANNEL_IX; cq_param = &cparams->txq_sq_param.cqp; @@ -362,7 +392,7 @@ static int mlx5e_ptp_open_cqs(struct mlx5e_port_ptp *c, if (err) goto out_err_ts_cq; - ptpsq->cq_stats = &c->priv->port_ptp_stats.cq[tc]; + ptpsq->cq_stats = &c->priv->ptp_stats.cq[tc]; } return 0; @@ -378,7 +408,25 @@ out_err_txqsq_cq: return err; } -static void mlx5e_ptp_close_cqs(struct mlx5e_port_ptp *c) +static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c, + struct mlx5e_ptp_params *cparams) +{ + struct mlx5e_create_cq_param ccp = {}; + struct dim_cq_moder ptp_moder = {}; + struct mlx5e_cq_param *cq_param; + struct mlx5e_cq *cq = &c->rq.cq; + + ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev)); + ccp.ch_stats = c->stats; + ccp.napi = &c->napi; + ccp.ix = MLX5E_PTP_CHANNEL_IX; + + cq_param = &cparams->rq_param.cqp; + + return mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq); +} + +static void mlx5e_ptp_close_tx_cqs(struct mlx5e_ptp *c) { int tc; @@ -389,22 +437,36 @@ static void mlx5e_ptp_close_cqs(struct mlx5e_port_ptp *c) mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq); } -static void mlx5e_ptp_build_sq_param(struct mlx5e_priv *priv, +static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_sq_param *param) { void *sqc = param->sqc; void *wq; - mlx5e_build_sq_param_common(priv, param); + mlx5e_build_sq_param_common(mdev, param); wq = MLX5_ADDR_OF(sqc, sqc, wq); MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); param->stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); - mlx5e_build_tx_cq_param(priv, params, ¶m->cqp); + mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp); +} + +static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev *mdev, + struct net_device *netdev, + u16 q_counter, + struct mlx5e_ptp_params *ptp_params) +{ + struct mlx5e_rq_param *rq_params = &ptp_params->rq_param; + struct mlx5e_params *params = &ptp_params->params; + + params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC; + mlx5e_init_rq_type_params(mdev, params); + params->sw_mtu = netdev->max_mtu; + mlx5e_build_rq_param(mdev, params, NULL, q_counter, rq_params); } -static void mlx5e_ptp_build_params(struct mlx5e_port_ptp *c, +static void mlx5e_ptp_build_params(struct mlx5e_ptp *c, struct mlx5e_ptp_params *cparams, struct mlx5e_params *orig) { @@ -417,52 +479,193 @@ static void mlx5e_ptp_build_params(struct mlx5e_port_ptp *c, params->num_tc = orig->num_tc; /* SQ */ - params->log_sq_size = orig->log_sq_size; - - mlx5e_ptp_build_sq_param(c->priv, params, &cparams->txq_sq_param); + if (test_bit(MLX5E_PTP_STATE_TX, c->state)) { + params->log_sq_size = orig->log_sq_size; + mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param); + } + if (test_bit(MLX5E_PTP_STATE_RX, c->state)) + mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams); } -static int mlx5e_ptp_open_queues(struct mlx5e_port_ptp *c, - struct mlx5e_ptp_params *cparams) +static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params, + struct mlx5e_rq *rq) { + struct mlx5_core_dev *mdev = c->mdev; + struct mlx5e_priv *priv = c->priv; int err; - err = mlx5e_ptp_open_cqs(c, cparams); + rq->wq_type = params->rq_wq_type; + rq->pdev = mdev->device; + rq->netdev = priv->netdev; + rq->priv = priv; + rq->clock = &mdev->clock; + rq->tstamp = &priv->tstamp; + rq->mdev = mdev; + rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + rq->stats = &c->priv->ptp_stats.rq; + rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev); + err = mlx5e_rq_set_handlers(rq, params, false); if (err) return err; - err = mlx5e_ptp_open_txqsqs(c, cparams); + return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0); +} + +static int mlx5e_ptp_open_rq(struct mlx5e_ptp *c, struct mlx5e_params *params, + struct mlx5e_rq_param *rq_param) +{ + int node = dev_to_node(c->mdev->device); + int err; + + err = mlx5e_init_ptp_rq(c, params, &c->rq); if (err) - goto close_cqs; + return err; + + return mlx5e_open_rq(params, rq_param, NULL, node, &c->rq); +} + +static int mlx5e_ptp_open_queues(struct mlx5e_ptp *c, + struct mlx5e_ptp_params *cparams) +{ + int err; + if (test_bit(MLX5E_PTP_STATE_TX, c->state)) { + err = mlx5e_ptp_open_tx_cqs(c, cparams); + if (err) + return err; + + err = mlx5e_ptp_open_txqsqs(c, cparams); + if (err) + goto close_tx_cqs; + } + if (test_bit(MLX5E_PTP_STATE_RX, c->state)) { + err = mlx5e_ptp_open_rx_cq(c, cparams); + if (err) + goto close_txqsq; + + err = mlx5e_ptp_open_rq(c, &cparams->params, &cparams->rq_param); + if (err) + goto close_rx_cq; + } return 0; -close_cqs: - mlx5e_ptp_close_cqs(c); +close_rx_cq: + if (test_bit(MLX5E_PTP_STATE_RX, c->state)) + mlx5e_close_cq(&c->rq.cq); +close_txqsq: + if (test_bit(MLX5E_PTP_STATE_TX, c->state)) + mlx5e_ptp_close_txqsqs(c); +close_tx_cqs: + if (test_bit(MLX5E_PTP_STATE_TX, c->state)) + mlx5e_ptp_close_tx_cqs(c); return err; } -static void mlx5e_ptp_close_queues(struct mlx5e_port_ptp *c) +static void mlx5e_ptp_close_queues(struct mlx5e_ptp *c) { - mlx5e_ptp_close_txqsqs(c); - mlx5e_ptp_close_cqs(c); + if (test_bit(MLX5E_PTP_STATE_RX, c->state)) { + mlx5e_close_rq(&c->rq); + mlx5e_close_cq(&c->rq.cq); + } + if (test_bit(MLX5E_PTP_STATE_TX, c->state)) { + mlx5e_ptp_close_txqsqs(c); + mlx5e_ptp_close_tx_cqs(c); + } } -int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, - u8 lag_port, struct mlx5e_port_ptp **cp) +static int mlx5e_ptp_set_state(struct mlx5e_ptp *c, struct mlx5e_params *params) +{ + if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS)) + __set_bit(MLX5E_PTP_STATE_TX, c->state); + + if (params->ptp_rx) + __set_bit(MLX5E_PTP_STATE_RX, c->state); + + return bitmap_empty(c->state, MLX5E_PTP_STATE_NUM_STATES) ? -EINVAL : 0; +} + +static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv) +{ + struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs; + + if (!ptp_fs->valid) + return; + + mlx5e_fs_tt_redirect_del_rule(ptp_fs->l2_rule); + mlx5e_fs_tt_redirect_any_destroy(priv); + + mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule); + mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule); + mlx5e_fs_tt_redirect_udp_destroy(priv); + ptp_fs->valid = false; +} + +static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv) +{ + struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs; + struct mlx5_flow_handle *rule; + u32 tirn = priv->ptp_tir.tirn; + int err; + + if (ptp_fs->valid) + return 0; + + err = mlx5e_fs_tt_redirect_udp_create(priv); + if (err) + goto out_free; + + rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5E_TT_IPV4_UDP, + tirn, PTP_EV_PORT); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + goto out_destroy_fs_udp; + } + ptp_fs->udp_v4_rule = rule; + + rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5E_TT_IPV6_UDP, + tirn, PTP_EV_PORT); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + goto out_destroy_udp_v4_rule; + } + ptp_fs->udp_v6_rule = rule; + + err = mlx5e_fs_tt_redirect_any_create(priv); + if (err) + goto out_destroy_udp_v6_rule; + + rule = mlx5e_fs_tt_redirect_any_add_rule(priv, tirn, ETH_P_1588); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + goto out_destroy_fs_any; + } + ptp_fs->l2_rule = rule; + ptp_fs->valid = true; + + return 0; + +out_destroy_fs_any: + mlx5e_fs_tt_redirect_any_destroy(priv); +out_destroy_udp_v6_rule: + mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule); +out_destroy_udp_v4_rule: + mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule); +out_destroy_fs_udp: + mlx5e_fs_tt_redirect_udp_destroy(priv); +out_free: + return err; +} + +int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, + u8 lag_port, struct mlx5e_ptp **cp) { struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_ptp_params *cparams; - struct mlx5e_port_ptp *c; - unsigned int irq; + struct mlx5e_ptp *c; int err; - int eqn; - err = mlx5_vector2eqn(priv->mdev, 0, &eqn, &irq); - if (err) - return err; c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev))); cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL); @@ -472,14 +675,17 @@ int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, c->priv = priv; c->mdev = priv->mdev; c->tstamp = &priv->tstamp; - c->ix = 0; c->pdev = mlx5_core_dma_dev(priv->mdev); c->netdev = priv->netdev; - c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); + c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key); c->num_tc = params->num_tc; - c->stats = &priv->port_ptp_stats.ch; + c->stats = &priv->ptp_stats.ch; c->lag_port = lag_port; + err = mlx5e_ptp_set_state(c, params); + if (err) + goto err_free; + netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll, 64); mlx5e_ptp_build_params(c, cparams, params); @@ -488,6 +694,9 @@ int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, if (unlikely(err)) goto err_napi_del; + if (test_bit(MLX5E_PTP_STATE_RX, c->state)) + priv->rx_ptp_opened = true; + *cp = c; kvfree(cparams); @@ -496,13 +705,13 @@ int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, err_napi_del: netif_napi_del(&c->napi); - +err_free: kvfree(cparams); kvfree(c); return err; } -void mlx5e_port_ptp_close(struct mlx5e_port_ptp *c) +void mlx5e_ptp_close(struct mlx5e_ptp *c) { mlx5e_ptp_close_queues(c); netif_napi_del(&c->napi); @@ -510,22 +719,94 @@ void mlx5e_port_ptp_close(struct mlx5e_port_ptp *c) kvfree(c); } -void mlx5e_ptp_activate_channel(struct mlx5e_port_ptp *c) +void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c) { int tc; napi_enable(&c->napi); - for (tc = 0; tc < c->num_tc; tc++) - mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq); + if (test_bit(MLX5E_PTP_STATE_TX, c->state)) { + for (tc = 0; tc < c->num_tc; tc++) + mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq); + } + if (test_bit(MLX5E_PTP_STATE_RX, c->state)) { + mlx5e_ptp_rx_set_fs(c->priv); + mlx5e_activate_rq(&c->rq); + } } -void mlx5e_ptp_deactivate_channel(struct mlx5e_port_ptp *c) +void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c) { int tc; - for (tc = 0; tc < c->num_tc; tc++) - mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq); + if (test_bit(MLX5E_PTP_STATE_RX, c->state)) + mlx5e_deactivate_rq(&c->rq); + + if (test_bit(MLX5E_PTP_STATE_TX, c->state)) { + for (tc = 0; tc < c->num_tc; tc++) + mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq); + } napi_disable(&c->napi); } + +int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn) +{ + if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state)) + return -EINVAL; + + *rqn = c->rq.rqn; + return 0; +} + +int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv) +{ + struct mlx5e_ptp_fs *ptp_fs; + + if (!priv->profile->rx_ptp_support) + return 0; + + ptp_fs = kzalloc(sizeof(*ptp_fs), GFP_KERNEL); + if (!ptp_fs) + return -ENOMEM; + + priv->fs.ptp_fs = ptp_fs; + return 0; +} + +void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv) +{ + struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs; + + if (!priv->profile->rx_ptp_support) + return; + + mlx5e_ptp_rx_unset_fs(priv); + kfree(ptp_fs); +} + +int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set) +{ + struct mlx5e_ptp *c = priv->channels.ptp; + + if (!priv->profile->rx_ptp_support) + return 0; + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + return 0; + + if (set) { + if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state)) { + netdev_WARN_ONCE(priv->netdev, "Don't try to add PTP RX-FS rules"); + return -EINVAL; + } + return mlx5e_ptp_rx_set_fs(priv); + } + /* set == false */ + if (c && test_bit(MLX5E_PTP_STATE_RX, c->state)) { + netdev_WARN_ONCE(priv->netdev, "Don't try to remove PTP RX-FS rules"); + return -EINVAL; + } + mlx5e_ptp_rx_unset_fs(priv); + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h index 90c98ea63b7f..ab935cce952b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h @@ -5,7 +5,6 @@ #define __MLX5_EN_PTP_H__ #include "en.h" -#include "en/params.h" #include "en_stats.h" struct mlx5e_ptpsq { @@ -17,9 +16,16 @@ struct mlx5e_ptpsq { struct mlx5e_ptp_cq_stats *cq_stats; }; -struct mlx5e_port_ptp { +enum { + MLX5E_PTP_STATE_TX, + MLX5E_PTP_STATE_RX, + MLX5E_PTP_STATE_NUM_STATES, +}; + +struct mlx5e_ptp { /* data path */ struct mlx5e_ptpsq ptpsq[MLX5E_MAX_NUM_TC]; + struct mlx5e_rq rq; struct napi_struct napi; struct device *pdev; struct net_device *netdev; @@ -34,20 +40,18 @@ struct mlx5e_port_ptp { struct mlx5e_priv *priv; struct mlx5_core_dev *mdev; struct hwtstamp_config *tstamp; - DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES); - int ix; -}; - -struct mlx5e_ptp_params { - struct mlx5e_params params; - struct mlx5e_sq_param txq_sq_param; + DECLARE_BITMAP(state, MLX5E_PTP_STATE_NUM_STATES); }; -int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, - u8 lag_port, struct mlx5e_port_ptp **cp); -void mlx5e_port_ptp_close(struct mlx5e_port_ptp *c); -void mlx5e_ptp_activate_channel(struct mlx5e_port_ptp *c); -void mlx5e_ptp_deactivate_channel(struct mlx5e_port_ptp *c); +int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, + u8 lag_port, struct mlx5e_ptp **cp); +void mlx5e_ptp_close(struct mlx5e_ptp *c); +void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c); +void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c); +int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn); +int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv); +void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv); +int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set); enum { MLX5E_SKB_CB_CQE_HWTSTAMP = BIT(0), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c index 12d7ad061237..5efe3278b0f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c @@ -232,8 +232,8 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs memset(¶m_sq, 0, sizeof(param_sq)); memset(¶m_cq, 0, sizeof(param_cq)); - mlx5e_build_sq_param(priv, params, ¶m_sq); - mlx5e_build_tx_cq_param(priv, params, ¶m_cq); + mlx5e_build_sq_param(priv->mdev, params, ¶m_sq); + mlx5e_build_tx_cq_param(priv->mdev, params, ¶m_cq); err = mlx5e_open_cq(priv, params->tx_cq_moderation, ¶m_cq, &ccp, &sq->cq); if (err) goto err_free_sq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index 065126370acd..6cdc52d50a48 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -17,6 +17,7 @@ #include "en/mapping.h" #include "en/tc_tun.h" #include "lib/port_tun.h" +#include "esw/sample.h" struct mlx5e_rep_indr_block_priv { struct net_device *netdev; @@ -169,6 +170,9 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD); struct mlx5e_priv *priv = cb_priv; + if (!priv->netdev || !netif_device_present(priv->netdev)) + return -EOPNOTSUPP; + switch (type) { case TC_SETUP_CLSFLOWER: return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags); @@ -321,6 +325,9 @@ mlx5e_rep_indr_offload(struct net_device *netdev, struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev); int err = 0; + if (!netif_device_present(indr_priv->rpriv->netdev)) + return -EOPNOTSUPP; + switch (flower->command) { case FLOW_CLS_REPLACE: err = mlx5e_configure_flower(netdev, priv, flower, flags); @@ -605,27 +612,50 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb, return true; } + +static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1, + struct mlx5e_tc_update_priv *tc_priv) +{ + struct mlx5e_priv *priv = netdev_priv(skb->dev); + u32 tunnel_id = reg_c1 >> ESW_TUN_OFFSET; + + if (chain) { + struct mlx5_rep_uplink_priv *uplink_priv; + struct mlx5e_rep_priv *uplink_rpriv; + struct tc_skb_ext *tc_skb_ext; + struct mlx5_eswitch *esw; + u32 zone_restore_id; + + tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT); + if (!tc_skb_ext) { + WARN_ON(1); + return false; + } + tc_skb_ext->chain = chain; + zone_restore_id = reg_c1 & ESW_ZONE_ID_MASK; + esw = priv->mdev->priv.eswitch; + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + uplink_priv = &uplink_rpriv->uplink_priv; + if (!mlx5e_tc_ct_restore_flow(uplink_priv->ct_priv, skb, + zone_restore_id)) + return false; + } + return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id); +} #endif /* CONFIG_NET_TC_SKB_EXT */ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb, struct mlx5e_tc_update_priv *tc_priv) { -#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) - u32 chain = 0, reg_c0, reg_c1, tunnel_id, zone_restore_id; - struct mlx5_rep_uplink_priv *uplink_priv; - struct mlx5e_rep_priv *uplink_rpriv; - struct tc_skb_ext *tc_skb_ext; + struct mlx5_mapped_obj mapped_obj; struct mlx5_eswitch *esw; struct mlx5e_priv *priv; + u32 reg_c0, reg_c1; int err; reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK); - if (reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG) - reg_c0 = 0; - reg_c1 = be32_to_cpu(cqe->ft_metadata); - - if (!reg_c0) + if (!reg_c0 || reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG) return true; /* If reg_c0 is not equal to the default flow tag then skb->mark @@ -633,38 +663,33 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, */ skb->mark = 0; + reg_c1 = be32_to_cpu(cqe->ft_metadata); + priv = netdev_priv(skb->dev); esw = priv->mdev->priv.eswitch; - - err = mlx5_get_chain_for_tag(esw_chains(esw), reg_c0, &chain); + err = mapping_find(esw->offloads.reg_c0_obj_pool, reg_c0, &mapped_obj); if (err) { netdev_dbg(priv->netdev, - "Couldn't find chain for chain tag: %d, err: %d\n", + "Couldn't find mapped object for reg_c0: %d, err: %d\n", reg_c0, err); return false; } - if (chain) { - tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT); - if (!tc_skb_ext) { - WARN_ON(1); - return false; - } - - tc_skb_ext->chain = chain; - - zone_restore_id = reg_c1 & ESW_ZONE_ID_MASK; - - uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); - uplink_priv = &uplink_rpriv->uplink_priv; - if (!mlx5e_tc_ct_restore_flow(uplink_priv->ct_priv, skb, - zone_restore_id)) - return false; - } - - tunnel_id = reg_c1 >> ESW_TUN_OFFSET; - return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id); +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) + if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) + return mlx5e_restore_skb(skb, mapped_obj.chain, reg_c1, tc_priv); #endif /* CONFIG_NET_TC_SKB_EXT */ +#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) + if (mapped_obj.type == MLX5_MAPPED_OBJ_SAMPLE) { + mlx5_esw_sample_skb(skb, &mapped_obj); + return false; + } +#endif /* CONFIG_MLX5_TC_SAMPLE */ + if (mapped_obj.type != MLX5_MAPPED_OBJ_SAMPLE && + mapped_obj.type != MLX5_MAPPED_OBJ_CHAIN) { + netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type); + return false; + } return true; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index d80bbd17e5f8..0eb125316fe2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -4,6 +4,8 @@ #include "health.h" #include "params.h" #include "txrx.h" +#include "devlink.h" +#include "ptp.h" static int mlx5e_query_rq_state(struct mlx5_core_dev *dev, u32 rqn, u8 *state) { @@ -229,8 +231,9 @@ static int mlx5e_reporter_icosq_diagnose(struct mlx5e_icosq *icosq, u8 hw_state, return mlx5e_health_fmsg_named_obj_nest_end(fmsg); } -static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, - struct devlink_fmsg *fmsg) +static int +mlx5e_rx_reporter_build_diagnose_output_rq_common(struct mlx5e_rq *rq, + struct devlink_fmsg *fmsg) { u16 wqe_counter; int wqes_sz; @@ -246,14 +249,6 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, wq_head = mlx5e_rqwq_get_head(rq); wqe_counter = mlx5e_rqwq_get_wqe_counter(rq); - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->ix); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "rqn", rq->rqn); if (err) return err; @@ -299,61 +294,155 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, return err; } - err = devlink_fmsg_obj_nest_end(fmsg); + return 0; +} + +static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, + struct devlink_fmsg *fmsg) +{ + int err; + + err = devlink_fmsg_obj_nest_start(fmsg); if (err) return err; - return 0; + err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->ix); + if (err) + return err; + + err = mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg); + if (err) + return err; + + return devlink_fmsg_obj_nest_end(fmsg); } -static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter, - struct devlink_fmsg *fmsg, - struct netlink_ext_ack *extack) +static int mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq, + struct devlink_fmsg *fmsg) { - struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); - struct mlx5e_params *params = &priv->channels.params; - struct mlx5e_rq *generic_rq; + struct mlx5e_priv *priv = rq->priv; + struct mlx5e_params *params; u32 rq_stride, rq_sz; - int i, err = 0; - - mutex_lock(&priv->state_lock); - - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) - goto unlock; + bool real_time; + int err; - generic_rq = &priv->channels.c[0]->rq; - rq_sz = mlx5e_rqwq_get_size(generic_rq); + params = &priv->channels.params; + rq_sz = mlx5e_rqwq_get_size(rq); + real_time = mlx5_is_real_time_rq(priv->mdev); rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL)); - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common config"); - if (err) - goto unlock; - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ"); if (err) - goto unlock; + return err; err = devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type); if (err) - goto unlock; + return err; err = devlink_fmsg_u64_pair_put(fmsg, "stride size", rq_stride); if (err) - goto unlock; + return err; err = devlink_fmsg_u32_pair_put(fmsg, "size", rq_sz); if (err) - goto unlock; + return err; - err = mlx5e_health_cq_common_diag_fmsg(&generic_rq->cq, fmsg); + err = devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC"); if (err) - goto unlock; + return err; - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); + err = mlx5e_health_cq_common_diag_fmsg(&rq->cq, fmsg); + if (err) + return err; + + return mlx5e_health_fmsg_named_obj_nest_end(fmsg); +} + +static int +mlx5e_rx_reporter_diagnose_common_ptp_config(struct mlx5e_priv *priv, struct mlx5e_ptp *ptp_ch, + struct devlink_fmsg *fmsg) +{ + int err; + + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP"); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "filter_type", priv->tstamp.rx_filter); if (err) + return err; + + err = mlx5e_rx_reporter_diagnose_generic_rq(&ptp_ch->rq, fmsg); + if (err) + return err; + + return mlx5e_health_fmsg_named_obj_nest_end(fmsg); +} + +static int +mlx5e_rx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg) +{ + struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); + struct mlx5e_rq *generic_rq = &priv->channels.c[0]->rq; + struct mlx5e_ptp *ptp_ch = priv->channels.ptp; + int err; + + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common config"); + if (err) + return err; + + err = mlx5e_rx_reporter_diagnose_generic_rq(generic_rq, fmsg); + if (err) + return err; + + if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) { + err = mlx5e_rx_reporter_diagnose_common_ptp_config(priv, ptp_ch, fmsg); + if (err) + return err; + } + + return mlx5e_health_fmsg_named_obj_nest_end(fmsg); +} + +static int mlx5e_rx_reporter_build_diagnose_output_ptp_rq(struct mlx5e_rq *rq, + struct devlink_fmsg *fmsg) +{ + int err; + + err = devlink_fmsg_obj_nest_start(fmsg); + if (err) + return err; + + err = devlink_fmsg_string_pair_put(fmsg, "channel", "ptp"); + if (err) + return err; + + err = mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg); + if (err) + return err; + + err = devlink_fmsg_obj_nest_end(fmsg); + if (err) + return err; + + return 0; +} + +static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, + struct netlink_ext_ack *extack) +{ + struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); + struct mlx5e_ptp *ptp_ch = priv->channels.ptp; + int i, err = 0; + + mutex_lock(&priv->state_lock); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) goto unlock; - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); + err = mlx5e_rx_reporter_diagnose_common_config(reporter, fmsg); if (err) goto unlock; @@ -368,9 +457,12 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter, if (err) goto unlock; } + if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) { + err = mlx5e_rx_reporter_build_diagnose_output_ptp_rq(&ptp_ch->rq, fmsg); + if (err) + goto unlock; + } err = devlink_fmsg_arr_pair_nest_end(fmsg); - if (err) - goto unlock; unlock: mutex_unlock(&priv->state_lock); return err; @@ -502,6 +594,7 @@ static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fms static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg) { + struct mlx5e_ptp *ptp_ch = priv->channels.ptp; struct mlx5_rsc_key key = {}; int i, err; @@ -534,6 +627,12 @@ static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv, return err; } + if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) { + err = mlx5e_health_queue_dump(priv, fmsg, ptp_ch->rq.rqn, "PTP RQ"); + if (err) + return err; + } + return devlink_fmsg_arr_pair_nest_end(fmsg); } @@ -615,9 +714,10 @@ static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = { void mlx5e_reporter_rx_create(struct mlx5e_priv *priv) { + struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv); struct devlink_health_reporter *reporter; - reporter = devlink_port_health_reporter_create(&priv->dl_port, &mlx5_rx_reporter_ops, + reporter = devlink_port_health_reporter_create(dl_port, &mlx5_rx_reporter_ops, MLX5E_REPORTER_RX_GRACEFUL_PERIOD, priv); if (IS_ERR(reporter)) { netdev_warn(priv->netdev, "Failed to create rx reporter, err = %ld\n", @@ -633,4 +733,5 @@ void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv) return; devlink_port_health_reporter_destroy(priv->rx_reporter); + priv->rx_reporter = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index d7275c84313e..9d361efd5ff7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -3,6 +3,7 @@ #include "health.h" #include "en/ptp.h" +#include "en/devlink.h" static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) { @@ -256,12 +257,14 @@ mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *txqsq) { u32 sq_stride, sq_sz; + bool real_time; int err; err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ"); if (err) return err; + real_time = mlx5_is_real_time_sq(txqsq->mdev); sq_sz = mlx5_wq_cyc_get_size(&txqsq->wq); sq_stride = MLX5_SEND_WQE_BB; @@ -273,6 +276,10 @@ mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg, if (err) return err; + err = devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC"); + if (err) + return err; + err = mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg); if (err) return err; @@ -303,6 +310,7 @@ mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporte { struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); struct mlx5e_txqsq *generic_sq = priv->txq2sq[0]; + struct mlx5e_ptp *ptp_ch = priv->channels.ptp; struct mlx5e_ptpsq *generic_ptpsq; int err; @@ -314,12 +322,11 @@ mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporte if (err) return err; - generic_ptpsq = priv->channels.port_ptp ? - &priv->channels.port_ptp->ptpsq[0] : - NULL; - if (!generic_ptpsq) + if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) goto out; + generic_ptpsq = &ptp_ch->ptpsq[0]; + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP"); if (err) return err; @@ -345,7 +352,7 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, struct netlink_ext_ack *extack) { struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); - struct mlx5e_port_ptp *ptp_ch = priv->channels.port_ptp; + struct mlx5e_ptp *ptp_ch = priv->channels.ptp; int i, tc, err = 0; @@ -374,7 +381,7 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, } } - if (!ptp_ch) + if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) goto close_sqs_nest; for (tc = 0; tc < priv->channels.params.num_tc; tc++) { @@ -459,7 +466,7 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg) { - struct mlx5e_port_ptp *ptp_ch = priv->channels.port_ptp; + struct mlx5e_ptp *ptp_ch = priv->channels.ptp; struct mlx5_rsc_key key = {}; int i, tc, err; @@ -496,7 +503,7 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, } } - if (ptp_ch) { + if (ptp_ch && test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) { for (tc = 0; tc < priv->channels.params.num_tc; tc++) { struct mlx5e_txqsq *sq = &ptp_ch->ptpsq[tc].txqsq; @@ -572,9 +579,10 @@ static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = { void mlx5e_reporter_tx_create(struct mlx5e_priv *priv) { + struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv); struct devlink_health_reporter *reporter; - reporter = devlink_port_health_reporter_create(&priv->dl_port, &mlx5_tx_reporter_ops, + reporter = devlink_port_health_reporter_create(dl_port, &mlx5_tx_reporter_ops, MLX5_REPORTER_TX_GRACEFUL_PERIOD, priv); if (IS_ERR(reporter)) { netdev_warn(priv->netdev, @@ -591,4 +599,5 @@ void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv) return; devlink_port_health_reporter_destroy(priv->tx_reporter); + priv->tx_reporter = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 68e54cc1cd16..5da5e5323a44 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -29,6 +29,8 @@ #define MLX5_CT_STATE_TRK_BIT BIT(2) #define MLX5_CT_STATE_NAT_BIT BIT(3) #define MLX5_CT_STATE_REPLY_BIT BIT(4) +#define MLX5_CT_STATE_RELATED_BIT BIT(5) +#define MLX5_CT_STATE_INVALID_BIT BIT(6) #define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen * 8) #define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0) @@ -717,7 +719,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, zone_rule->nat = nat; - spec = kzalloc(sizeof(*spec), GFP_KERNEL); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return -ENOMEM; @@ -759,7 +761,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, zone_rule->attr = attr; - kfree(spec); + kvfree(spec); ct_dbg("Offloaded ct entry rule in zone %d", entry->tuple.zone); return 0; @@ -771,7 +773,7 @@ err_rule: err_mod_hdr: kfree(attr); err_attr: - kfree(spec); + kvfree(spec); return err; } @@ -1229,8 +1231,8 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_attr, struct netlink_ext_ack *extack) { + bool trk, est, untrk, unest, new, rpl, unrpl, rel, unrel, inv, uninv; struct flow_rule *rule = flow_cls_offload_flow_rule(f); - bool trk, est, untrk, unest, new, rpl, unrpl; struct flow_dissector_key_ct *mask, *key; u32 ctstate = 0, ctstate_mask = 0; u16 ct_state_on, ct_state_off; @@ -1258,7 +1260,9 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv, if (ct_state_mask & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED | TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED | TCA_FLOWER_KEY_CT_FLAGS_NEW | - TCA_FLOWER_KEY_CT_FLAGS_REPLY)) { + TCA_FLOWER_KEY_CT_FLAGS_REPLY | + TCA_FLOWER_KEY_CT_FLAGS_RELATED | + TCA_FLOWER_KEY_CT_FLAGS_INVALID)) { NL_SET_ERR_MSG_MOD(extack, "only ct_state trk, est, new and rpl are supported for offload"); return -EOPNOTSUPP; @@ -1270,9 +1274,13 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv, new = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_NEW; est = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED; rpl = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_REPLY; + rel = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_RELATED; + inv = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_INVALID; untrk = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_TRACKED; unest = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED; unrpl = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_REPLY; + unrel = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_RELATED; + uninv = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_INVALID; ctstate |= trk ? MLX5_CT_STATE_TRK_BIT : 0; ctstate |= est ? MLX5_CT_STATE_ESTABLISHED_BIT : 0; @@ -1280,6 +1288,20 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv, ctstate_mask |= (untrk || trk) ? MLX5_CT_STATE_TRK_BIT : 0; ctstate_mask |= (unest || est) ? MLX5_CT_STATE_ESTABLISHED_BIT : 0; ctstate_mask |= (unrpl || rpl) ? MLX5_CT_STATE_REPLY_BIT : 0; + ctstate_mask |= unrel ? MLX5_CT_STATE_RELATED_BIT : 0; + ctstate_mask |= uninv ? MLX5_CT_STATE_INVALID_BIT : 0; + + if (rel) { + NL_SET_ERR_MSG_MOD(extack, + "matching on ct_state +rel isn't supported"); + return -EOPNOTSUPP; + } + + if (inv) { + NL_SET_ERR_MSG_MOD(extack, + "matching on ct_state +inv isn't supported"); + return -EOPNOTSUPP; + } if (new) { NL_SET_ERR_MSG_MOD(extack, @@ -1562,6 +1584,14 @@ mlx5_tc_ct_free_pre_ct_tables(struct mlx5_ct_ft *ft) mlx5_tc_ct_free_pre_ct(ft, &ft->pre_ct); } +/* To avoid false lock dependency warning set the ct_entries_ht lock + * class different than the lock class of the ht being used when deleting + * last flow from a group and then deleting a group, we get into del_sw_flow_group() + * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but + * it's different than the ht->mutex here. + */ +static struct lock_class_key ct_entries_ht_lock_key; + static struct mlx5_ct_ft * mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone, struct nf_flowtable *nf_ft) @@ -1596,6 +1626,8 @@ mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone, if (err) goto err_init; + lockdep_set_class(&ft->ct_entries_ht.mutex, &ct_entries_ht_lock_key); + err = rhashtable_insert_fast(&ct_priv->zone_ht, &ft->node, zone_params); if (err) @@ -1697,10 +1729,10 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft; u32 fte_id = 1; - post_ct_spec = kzalloc(sizeof(*post_ct_spec), GFP_KERNEL); + post_ct_spec = kvzalloc(sizeof(*post_ct_spec), GFP_KERNEL); ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL); if (!post_ct_spec || !ct_flow) { - kfree(post_ct_spec); + kvfree(post_ct_spec); kfree(ct_flow); return ERR_PTR(-ENOMEM); } @@ -1810,6 +1842,10 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, ct_flow->post_ct_attr->prio = 0; ct_flow->post_ct_attr->ft = ct_priv->post_ct; + /* Splits were handled before CT */ + if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB) + ct_flow->post_ct_attr->esw_attr->split_count = 0; + ct_flow->post_ct_attr->inner_match_level = MLX5_MATCH_NONE; ct_flow->post_ct_attr->outer_match_level = MLX5_MATCH_NONE; ct_flow->post_ct_attr->action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP); @@ -1835,7 +1871,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, attr->ct_attr.ct_flow = ct_flow; dealloc_mod_hdr_actions(&pre_mod_acts); - kfree(post_ct_spec); + kvfree(post_ct_spec); return rule; @@ -1856,7 +1892,7 @@ err_alloc_pre: err_idr: mlx5_tc_ct_del_ft_cb(ct_priv, ft); err_ft: - kfree(post_ct_spec); + kvfree(post_ct_spec); kfree(ct_flow); netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err); return ERR_PTR(err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h index c223591ffc22..d1599b7b944b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h @@ -27,6 +27,7 @@ enum { MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 8, MLX5E_TC_FLOW_FLAG_TUN_RX = MLX5E_TC_FLOW_BASE + 9, MLX5E_TC_FLOW_FLAG_FAILED = MLX5E_TC_FLOW_BASE + 10, + MLX5E_TC_FLOW_FLAG_SAMPLE = MLX5E_TC_FLOW_BASE + 11, }; struct mlx5e_tc_flow_parse_attr { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h index e1271998b937..9350ca05ce65 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h @@ -83,10 +83,12 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv, static inline int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, struct net_device *mirred_dev, - struct mlx5e_encap_entry *e) { return -EOPNOTSUPP; } -int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv, - struct net_device *mirred_dev, - struct mlx5e_encap_entry *e) + struct mlx5e_encap_entry *e) +{ return -EOPNOTSUPP; } +static inline int +mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5e_encap_entry *e) { return -EOPNOTSUPP; } #endif int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 9f16ad2c0710..593503bc4d07 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -2,6 +2,7 @@ /* Copyright (c) 2021 Mellanox Technologies. */ #include <net/fib_notifier.h> +#include <net/nexthop.h> #include "tc_tun_encap.h" #include "en_tc.h" #include "tc_tun.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c index 37fc1d77ded7..86ab4e864fe6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c @@ -30,172 +30,62 @@ static int mlx5e_trap_napi_poll(struct napi_struct *napi, int budget) return work_done; } -static int mlx5e_alloc_trap_rq(struct mlx5e_priv *priv, struct mlx5e_rq_param *rqp, - struct mlx5e_rq_stats *stats, struct mlx5e_params *params, - struct mlx5e_ch_stats *ch_stats, +static void mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params, struct mlx5e_rq *rq) { - void *rqc_wq = MLX5_ADDR_OF(rqc, rqp->rqc, wq); - struct mlx5_core_dev *mdev = priv->mdev; - struct page_pool_params pp_params = {}; - int node = dev_to_node(mdev->device); - u32 pool_size; - int wq_sz; - int err; - int i; - - rqp->wq.db_numa_node = node; - - rq->wq_type = params->rq_wq_type; - rq->pdev = mdev->device; - rq->netdev = priv->netdev; - rq->mdev = mdev; - rq->priv = priv; - rq->stats = stats; - rq->clock = &mdev->clock; - rq->tstamp = &priv->tstamp; - rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); - + struct mlx5_core_dev *mdev = t->mdev; + struct mlx5e_priv *priv = t->priv; + + rq->wq_type = params->rq_wq_type; + rq->pdev = mdev->device; + rq->netdev = priv->netdev; + rq->priv = priv; + rq->clock = &mdev->clock; + rq->tstamp = &priv->tstamp; + rq->mdev = mdev; + rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + rq->stats = &priv->trap_stats.rq; + rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev); xdp_rxq_info_unused(&rq->xdp_rxq); - - rq->buff.map_dir = DMA_FROM_DEVICE; - rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, NULL); - pool_size = 1 << params->log_rq_mtu_frames; - - err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, &rq->wq_ctrl); - if (err) - return err; - - rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR]; - - wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq); - - rq->wqe.info = rqp->frags_info; - rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride; - rq->wqe.frags = kvzalloc_node(array_size(sizeof(*rq->wqe.frags), - (wq_sz << rq->wqe.info.log_num_frags)), - GFP_KERNEL, node); - if (!rq->wqe.frags) { - err = -ENOMEM; - goto err_wq_cyc_destroy; - } - - err = mlx5e_init_di_list(rq, wq_sz, node); - if (err) - goto err_free_frags; - - rq->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); - mlx5e_rq_set_trap_handlers(rq, params); - - /* Create a page_pool and register it with rxq */ - pp_params.order = 0; - pp_params.flags = 0; /* No-internal DMA mapping in page_pool */ - pp_params.pool_size = pool_size; - pp_params.nid = node; - pp_params.dev = mdev->device; - pp_params.dma_dir = rq->buff.map_dir; - - /* page_pool can be used even when there is no rq->xdp_prog, - * given page_pool does not handle DMA mapping there is no - * required state to clear. And page_pool gracefully handle - * elevated refcnt. - */ - rq->page_pool = page_pool_create(&pp_params); - if (IS_ERR(rq->page_pool)) { - err = PTR_ERR(rq->page_pool); - rq->page_pool = NULL; - goto err_free_di_list; - } - for (i = 0; i < wq_sz; i++) { - struct mlx5e_rx_wqe_cyc *wqe = - mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i); - int f; - - for (f = 0; f < rq->wqe.info.num_frags; f++) { - u32 frag_size = rq->wqe.info.arr[f].frag_size | - MLX5_HW_START_PADDING; - - wqe->data[f].byte_count = cpu_to_be32(frag_size); - wqe->data[f].lkey = rq->mkey_be; - } - /* check if num_frags is not a pow of two */ - if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) { - wqe->data[f].byte_count = 0; - wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY); - wqe->data[f].addr = 0; - } - } - return 0; - -err_free_di_list: - mlx5e_free_di_list(rq); -err_free_frags: - kvfree(rq->wqe.frags); -err_wq_cyc_destroy: - mlx5_wq_destroy(&rq->wq_ctrl); - - return err; } -static void mlx5e_free_trap_rq(struct mlx5e_rq *rq) -{ - page_pool_destroy(rq->page_pool); - mlx5e_free_di_list(rq); - kvfree(rq->wqe.frags); - mlx5_wq_destroy(&rq->wq_ctrl); -} - -static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct napi_struct *napi, - struct mlx5e_rq_stats *stats, struct mlx5e_params *params, - struct mlx5e_rq_param *rq_param, - struct mlx5e_ch_stats *ch_stats, - struct mlx5e_rq *rq) +static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct mlx5e_trap *t) { + struct mlx5e_rq_param *rq_param = &t->rq_param; struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_create_cq_param ccp = {}; struct dim_cq_moder trap_moder = {}; - struct mlx5e_cq *cq = &rq->cq; + struct mlx5e_rq *rq = &t->rq; + int node; int err; - ccp.node = dev_to_node(mdev->device); - ccp.ch_stats = ch_stats; - ccp.napi = napi; + node = dev_to_node(mdev->device); + + ccp.node = node; + ccp.ch_stats = t->stats; + ccp.napi = &t->napi; ccp.ix = 0; - err = mlx5e_open_cq(priv, trap_moder, &rq_param->cqp, &ccp, cq); + err = mlx5e_open_cq(priv, trap_moder, &rq_param->cqp, &ccp, &rq->cq); if (err) return err; - err = mlx5e_alloc_trap_rq(priv, rq_param, stats, params, ch_stats, rq); + mlx5e_init_trap_rq(t, &t->params, rq); + err = mlx5e_open_rq(&t->params, rq_param, NULL, node, rq); if (err) goto err_destroy_cq; - err = mlx5e_create_rq(rq, rq_param); - if (err) - goto err_free_rq; - - err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); - if (err) - goto err_destroy_rq; - return 0; -err_destroy_rq: - mlx5e_destroy_rq(rq); - mlx5e_free_rx_descs(rq); -err_free_rq: - mlx5e_free_trap_rq(rq); err_destroy_cq: - mlx5e_close_cq(cq); + mlx5e_close_cq(&rq->cq); return err; } static void mlx5e_close_trap_rq(struct mlx5e_rq *rq) { - mlx5e_destroy_rq(rq); - mlx5e_free_rx_descs(rq); - mlx5e_free_trap_rq(rq); + mlx5e_close_rq(rq); mlx5e_close_cq(&rq->cq); } @@ -213,7 +103,7 @@ static int mlx5e_create_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct ml return -ENOMEM; tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); - MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.td.tdn); + MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn); MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_NONE); MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT); MLX5_SET(tirc, tirc, inline_rqn, rqn); @@ -228,24 +118,16 @@ static void mlx5e_destroy_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_destroy_tir(mdev, tir); } -static void mlx5e_activate_trap_rq(struct mlx5e_rq *rq) -{ - set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); -} - -static void mlx5e_deactivate_trap_rq(struct mlx5e_rq *rq) -{ - clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); -} - -static void mlx5e_build_trap_params(struct mlx5e_priv *priv, struct mlx5e_trap *t) +static void mlx5e_build_trap_params(struct mlx5_core_dev *mdev, + int max_mtu, u16 q_counter, + struct mlx5e_trap *t) { struct mlx5e_params *params = &t->params; params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC; - mlx5e_init_rq_type_params(priv->mdev, params); - params->sw_mtu = priv->netdev->max_mtu; - mlx5e_build_rq_param(priv, params, NULL, &t->rq_param); + mlx5e_init_rq_type_params(mdev, params); + params->sw_mtu = max_mtu; + mlx5e_build_rq_param(mdev, params, NULL, q_counter, &t->rq_param); } static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv) @@ -259,23 +141,19 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv) if (!t) return ERR_PTR(-ENOMEM); - mlx5e_build_trap_params(priv, t); + mlx5e_build_trap_params(priv->mdev, netdev->max_mtu, priv->q_counter, t); t->priv = priv; t->mdev = priv->mdev; t->tstamp = &priv->tstamp; t->pdev = mlx5_core_dma_dev(priv->mdev); t->netdev = priv->netdev; - t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); + t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key); t->stats = &priv->trap_stats.ch; netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll, 64); - err = mlx5e_open_trap_rq(priv, &t->napi, - &priv->trap_stats.rq, - &t->params, &t->rq_param, - &priv->trap_stats.ch, - &t->rq); + err = mlx5e_open_trap_rq(priv, t); if (unlikely(err)) goto err_napi_del; @@ -304,15 +182,14 @@ void mlx5e_close_trap(struct mlx5e_trap *trap) static void mlx5e_activate_trap(struct mlx5e_trap *trap) { napi_enable(&trap->napi); - mlx5e_activate_trap_rq(&trap->rq); - napi_schedule(&trap->napi); + mlx5e_activate_rq(&trap->rq); } void mlx5e_deactivate_trap(struct mlx5e_priv *priv) { struct mlx5e_trap *trap = priv->en_trap; - mlx5e_deactivate_trap_rq(&trap->rq); + mlx5e_deactivate_rq(&trap->rq); napi_disable(&trap->napi); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 2e3e78b0f333..2f0df5cc1a2d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -500,7 +500,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_xdpsq *sq; - int drops = 0; + int nxmit = 0; int sq_num; int i; @@ -529,11 +529,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, xdptxd.dma_addr = dma_map_single(sq->pdev, xdptxd.data, xdptxd.len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(sq->pdev, xdptxd.dma_addr))) { - xdp_return_frame_rx_napi(xdpf); - drops++; - continue; - } + if (unlikely(dma_mapping_error(sq->pdev, xdptxd.dma_addr))) + break; xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME; xdpi.frame.xdpf = xdpf; @@ -544,9 +541,9 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, if (unlikely(!ret)) { dma_unmap_single(sq->pdev, xdptxd.dma_addr, xdptxd.len, DMA_TO_DEVICE); - xdp_return_frame_rx_napi(xdpf); - drops++; + break; } + nxmit++; } if (flags & XDP_XMIT_FLUSH) { @@ -555,7 +552,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, mlx5e_xmit_xdp_doorbell(sq); } - return n - drops; + return nxmit; } void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index f4bce1365639..a8315f166696 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -35,13 +35,59 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params, } } -static void mlx5e_build_xsk_cparam(struct mlx5e_priv *priv, +static void mlx5e_build_xsk_cparam(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, + u16 q_counter, struct mlx5e_channel_param *cparam) { - mlx5e_build_rq_param(priv, params, xsk, &cparam->rq); - mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq); + mlx5e_build_rq_param(mdev, params, xsk, q_counter, &cparam->rq); + mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq); +} + +static int mlx5e_init_xsk_rq(struct mlx5e_channel *c, + struct mlx5e_params *params, + struct xsk_buff_pool *pool, + struct mlx5e_xsk_param *xsk, + struct mlx5e_rq *rq) +{ + struct mlx5_core_dev *mdev = c->mdev; + int rq_xdp_ix; + int err; + + rq->wq_type = params->rq_wq_type; + rq->pdev = c->pdev; + rq->netdev = c->netdev; + rq->priv = c->priv; + rq->tstamp = c->tstamp; + rq->clock = &mdev->clock; + rq->icosq = &c->icosq; + rq->ix = c->ix; + rq->mdev = mdev; + rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + rq->xdpsq = &c->rq_xdpsq; + rq->xsk_pool = pool; + rq->stats = &c->priv->channel_stats[c->ix].xskrq; + rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev); + rq_xdp_ix = c->ix + params->num_channels * MLX5E_RQ_GROUP_XSK; + err = mlx5e_rq_set_handlers(rq, params, xsk); + if (err) + return err; + + return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, 0); +} + +static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *params, + struct mlx5e_rq_param *rq_params, struct xsk_buff_pool *pool, + struct mlx5e_xsk_param *xsk) +{ + int err; + + err = mlx5e_init_xsk_rq(c, params, pool, xsk, &c->xskrq); + if (err) + return err; + + return mlx5e_open_rq(params, rq_params, xsk, cpu_to_node(c->cpu), &c->xskrq); } int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, @@ -61,14 +107,14 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, if (!cparam) return -ENOMEM; - mlx5e_build_xsk_cparam(priv, params, xsk, cparam); + mlx5e_build_xsk_cparam(priv->mdev, params, xsk, priv->q_counter, cparam); err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, &c->xskrq.cq); if (unlikely(err)) goto err_free_cparam; - err = mlx5e_open_rq(c, params, &cparam->rq, xsk, pool, &c->xskrq); + err = mlx5e_open_xsk_rq(c, params, &cparam->rq, pool, xsk); if (unlikely(err)) goto err_close_rx_cq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h index cc0efac7b812..00af0b831a28 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -123,11 +123,10 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev, mlx5e_udp_gso_handle_tx_skb(skb); #ifdef CONFIG_MLX5_EN_TLS - if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) { - /* May send SKBs and WQEs. */ + /* May send SKBs and WQEs. */ + if (mlx5e_tls_skb_offloaded(skb)) if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls))) return false; - } #endif #ifdef CONFIG_MLX5_EN_IPSEC @@ -186,7 +185,7 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq, struct mlx5_wqe_inline_seg *inlseg) { #ifdef CONFIG_MLX5_EN_TLS - mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls); + mlx5e_tls_handle_tx_wqe(&wqe->ctrl, &state->tls); #endif #ifdef CONFIG_MLX5_EN_IPSEC diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 381a9c8c9da9..34119ce92031 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -60,7 +60,7 @@ static int rx_err_add_rule(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec; int err = 0; - spec = kzalloc(sizeof(*spec), GFP_KERNEL); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return -ENOMEM; @@ -101,7 +101,7 @@ out: if (err) mlx5_modify_header_dealloc(mdev, modify_hdr); out_spec: - kfree(spec); + kvfree(spec); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h index baa58b62e8df..aaa579bf9a39 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h @@ -12,6 +12,9 @@ void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv); int mlx5e_ktls_init_rx(struct mlx5e_priv *priv); void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv); int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable); +struct mlx5e_ktls_resync_resp * +mlx5e_ktls_rx_resync_create_resp_list(void); +void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list); #else static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv) @@ -33,6 +36,14 @@ static inline int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enab return -EOPNOTSUPP; } +static inline struct mlx5e_ktls_resync_resp * +mlx5e_ktls_rx_resync_create_resp_list(void) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void +mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) {} #endif #endif /* __MLX5E_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index 19d22a63313f..4e58fade7a60 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -56,6 +56,7 @@ struct mlx5e_ktls_offload_context_rx { /* resync */ struct mlx5e_ktls_rx_resync_ctx resync; + struct list_head list; }; static bool mlx5e_ktls_priv_rx_put(struct mlx5e_ktls_offload_context_rx *priv_rx) @@ -72,6 +73,32 @@ static void mlx5e_ktls_priv_rx_get(struct mlx5e_ktls_offload_context_rx *priv_rx refcount_inc(&priv_rx->resync.refcnt); } +struct mlx5e_ktls_resync_resp { + /* protects list changes */ + spinlock_t lock; + struct list_head list; +}; + +void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) +{ + kvfree(resp_list); +} + +struct mlx5e_ktls_resync_resp * +mlx5e_ktls_rx_resync_create_resp_list(void) +{ + struct mlx5e_ktls_resync_resp *resp_list; + + resp_list = kvzalloc(sizeof(*resp_list), GFP_KERNEL); + if (!resp_list) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&resp_list->list); + spin_lock_init(&resp_list->lock); + + return resp_list; +} + static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn) { int err, inlen; @@ -85,7 +112,7 @@ static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); - MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.td.tdn); + MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn); MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); MLX5_SET(tirc, tirc, indirect_table, rqtn); @@ -119,8 +146,7 @@ out: complete(&priv_rx->add_ctx); } -static void accel_rule_init(struct accel_rule *rule, struct mlx5e_priv *priv, - struct sock *sk) +static void accel_rule_init(struct accel_rule *rule, struct mlx5e_priv *priv) { INIT_WORK(&rule->work, accel_rule_handle_work); rule->priv = priv; @@ -359,33 +385,32 @@ static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync, /* Function can be called with the refcount being either elevated or not. * It does not affect the refcount. */ -static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx, - struct mlx5e_channel *c) +static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx, + struct mlx5e_channel *c) { struct tls12_crypto_info_aes_gcm_128 *info = &priv_rx->crypto_info; - struct mlx5_wqe_ctrl_seg *cseg; + struct mlx5e_ktls_resync_resp *ktls_resync; struct mlx5e_icosq *sq; - int err; + bool trigger_poll; memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq)); - err = 0; sq = &c->async_icosq; - spin_lock_bh(&c->async_icosq_lock); + ktls_resync = sq->ktls_resync; - cseg = post_static_params(sq, priv_rx); - if (IS_ERR(cseg)) { - priv_rx->rq_stats->tls_resync_res_skip++; - err = PTR_ERR(cseg); - goto unlock; - } - /* Do not increment priv_rx refcnt, CQE handling is empty */ - mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); - priv_rx->rq_stats->tls_resync_res_ok++; -unlock: - spin_unlock_bh(&c->async_icosq_lock); + spin_lock_bh(&ktls_resync->lock); + list_add_tail(&priv_rx->list, &ktls_resync->list); + trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); + spin_unlock_bh(&ktls_resync->lock); - return err; + if (!trigger_poll) + return; + + if (!napi_if_scheduled_mark_missed(&c->napi)) { + spin_lock_bh(&c->async_icosq_lock); + mlx5e_trigger_irq(sq); + spin_unlock_bh(&c->async_icosq_lock); + } } /* Function can be called with the refcount being either elevated or not. @@ -618,7 +643,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, init_completion(&priv_rx->add_ctx); - accel_rule_init(&priv_rx->rule, priv, sk); + accel_rule_init(&priv_rx->rule, priv); resync = &priv_rx->resync; resync_init(resync, priv); tls_offload_ctx_rx(tls_ctx)->resync_async = &resync->core; @@ -676,3 +701,65 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) */ mlx5e_ktls_priv_rx_put(priv_rx); } + +bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget) +{ + struct mlx5e_ktls_offload_context_rx *priv_rx, *tmp; + struct mlx5e_ktls_resync_resp *ktls_resync; + struct mlx5_wqe_ctrl_seg *db_cseg; + struct mlx5e_icosq *sq; + LIST_HEAD(local_list); + int i, j; + + sq = &c->async_icosq; + + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) + return false; + + ktls_resync = sq->ktls_resync; + db_cseg = NULL; + i = 0; + + spin_lock(&ktls_resync->lock); + list_for_each_entry_safe(priv_rx, tmp, &ktls_resync->list, list) { + list_move(&priv_rx->list, &local_list); + if (++i == budget) + break; + } + if (list_empty(&ktls_resync->list)) + clear_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); + spin_unlock(&ktls_resync->lock); + + spin_lock(&c->async_icosq_lock); + for (j = 0; j < i; j++) { + struct mlx5_wqe_ctrl_seg *cseg; + + priv_rx = list_first_entry(&local_list, + struct mlx5e_ktls_offload_context_rx, + list); + cseg = post_static_params(sq, priv_rx); + if (IS_ERR(cseg)) + break; + list_del(&priv_rx->list); + db_cseg = cseg; + } + if (db_cseg) + mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, db_cseg); + spin_unlock(&c->async_icosq_lock); + + priv_rx->rq_stats->tls_resync_res_ok += j; + + if (!list_empty(&local_list)) { + /* This happens only if ICOSQ is full. + * There is no need to mark busy or explicitly ask for a NAPI cycle, + * it will be triggered by the outstanding ICOSQ completions. + */ + spin_lock(&ktls_resync->lock); + list_splice(&local_list, &ktls_resync->list); + set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); + spin_unlock(&ktls_resync->lock); + priv_rx->rq_stats->tls_resync_res_retry++; + } + + return i == budget; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h index ee04e916fa21..8f79335057dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h @@ -40,6 +40,14 @@ mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq, } return false; } + +bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget); + +static inline bool +mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget) +{ + return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &c->async_icosq.state); +} #else static inline bool mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq, @@ -49,6 +57,18 @@ mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq, return false; } +static inline bool +mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget) +{ + return false; +} + +static inline bool +mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget) +{ + return false; +} + #endif /* CONFIG_MLX5_EN_TLS */ #endif /* __MLX5E_TLS_TXRX_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index 2b51d3222ca1..82dc09aaa7fc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -263,9 +263,6 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, int datalen; u32 skb_seq; - if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)) - return true; - datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); if (!datalen) return true; @@ -301,12 +298,6 @@ err_out: return false; } -void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg, - struct mlx5e_accel_tx_tls_state *state) -{ - cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8); -} - static int tls_update_resync_sn(struct net_device *netdev, struct sk_buff *skb, struct mlx5e_tls_metadata *mdata) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h index 9923132c9440..0ca0a023fb8d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h @@ -47,8 +47,18 @@ u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state); -void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg, - struct mlx5e_accel_tx_tls_state *state); + +static inline bool mlx5e_tls_skb_offloaded(struct sk_buff *skb) +{ + return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk); +} + +static inline void +mlx5e_tls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg, + struct mlx5e_accel_tx_tls_state *state) +{ + cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8); +} void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb, u32 *cqe_bcnt); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index 39475f6565c7..5cd466ec6492 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -36,6 +36,32 @@ #include <linux/ipv6.h> #include "en.h" +#define ARFS_HASH_SHIFT BITS_PER_BYTE +#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE) + +struct arfs_table { + struct mlx5e_flow_table ft; + struct mlx5_flow_handle *default_rule; + struct hlist_head rules_hash[ARFS_HASH_SIZE]; +}; + +enum arfs_type { + ARFS_IPV4_TCP, + ARFS_IPV6_TCP, + ARFS_IPV4_UDP, + ARFS_IPV6_UDP, + ARFS_NUM_TYPES, +}; + +struct mlx5e_arfs_tables { + struct arfs_table arfs_tables[ARFS_NUM_TYPES]; + /* Protect aRFS rules list */ + spinlock_t arfs_lock; + struct list_head rules; + int last_filter_id; + struct workqueue_struct *wq; +}; + struct arfs_tuple { __be16 etype; u8 ip_proto; @@ -121,7 +147,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv) dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; for (i = 0; i < ARFS_NUM_TYPES; i++) { - dest.ft = priv->fs.arfs.arfs_tables[i].ft.t; + dest.ft = priv->fs.arfs->arfs_tables[i].ft.t; /* Modify ttc rules destination to point on the aRFS FTs */ err = mlx5e_ttc_fwd_dest(priv, arfs_get_tt(i), &dest); if (err) { @@ -141,25 +167,31 @@ static void arfs_destroy_table(struct arfs_table *arfs_t) mlx5e_destroy_flow_table(&arfs_t->ft); } -void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) +static void _mlx5e_cleanup_tables(struct mlx5e_priv *priv) { int i; - if (!(priv->netdev->hw_features & NETIF_F_NTUPLE)) - return; - arfs_del_rules(priv); - destroy_workqueue(priv->fs.arfs.wq); + destroy_workqueue(priv->fs.arfs->wq); for (i = 0; i < ARFS_NUM_TYPES; i++) { - if (!IS_ERR_OR_NULL(priv->fs.arfs.arfs_tables[i].ft.t)) - arfs_destroy_table(&priv->fs.arfs.arfs_tables[i]); + if (!IS_ERR_OR_NULL(priv->fs.arfs->arfs_tables[i].ft.t)) + arfs_destroy_table(&priv->fs.arfs->arfs_tables[i]); } } +void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) +{ + if (!(priv->netdev->hw_features & NETIF_F_NTUPLE)) + return; + + _mlx5e_cleanup_tables(priv); + kvfree(priv->fs.arfs); +} + static int arfs_add_default_rule(struct mlx5e_priv *priv, enum arfs_type type) { - struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type]; + struct arfs_table *arfs_t = &priv->fs.arfs->arfs_tables[type]; struct mlx5e_tir *tir = priv->indir_tir; struct mlx5_flow_destination dest = {}; MLX5_DECLARE_FLOW_ACT(flow_act); @@ -290,7 +322,7 @@ out: static int arfs_create_table(struct mlx5e_priv *priv, enum arfs_type type) { - struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; + struct mlx5e_arfs_tables *arfs = priv->fs.arfs; struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft; struct mlx5_flow_table_attr ft_attr = {}; int err; @@ -330,20 +362,27 @@ int mlx5e_arfs_create_tables(struct mlx5e_priv *priv) if (!(priv->netdev->hw_features & NETIF_F_NTUPLE)) return 0; - spin_lock_init(&priv->fs.arfs.arfs_lock); - INIT_LIST_HEAD(&priv->fs.arfs.rules); - priv->fs.arfs.wq = create_singlethread_workqueue("mlx5e_arfs"); - if (!priv->fs.arfs.wq) + priv->fs.arfs = kvzalloc(sizeof(*priv->fs.arfs), GFP_KERNEL); + if (!priv->fs.arfs) return -ENOMEM; + spin_lock_init(&priv->fs.arfs->arfs_lock); + INIT_LIST_HEAD(&priv->fs.arfs->rules); + priv->fs.arfs->wq = create_singlethread_workqueue("mlx5e_arfs"); + if (!priv->fs.arfs->wq) + goto err; + for (i = 0; i < ARFS_NUM_TYPES; i++) { err = arfs_create_table(priv, i); if (err) - goto err; + goto err_des; } return 0; + +err_des: + _mlx5e_cleanup_tables(priv); err: - mlx5e_arfs_destroy_tables(priv); + kvfree(priv->fs.arfs); return err; } @@ -353,13 +392,13 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv) { struct arfs_rule *arfs_rule; struct hlist_node *htmp; + HLIST_HEAD(del_list); int quota = 0; int i; int j; - HLIST_HEAD(del_list); - spin_lock_bh(&priv->fs.arfs.arfs_lock); - mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) { + spin_lock_bh(&priv->fs.arfs->arfs_lock); + mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs->arfs_tables, i, j) { if (!work_pending(&arfs_rule->arfs_work) && rps_may_expire_flow(priv->netdev, arfs_rule->rxq, arfs_rule->flow_id, @@ -370,7 +409,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv) break; } } - spin_unlock_bh(&priv->fs.arfs.arfs_lock); + spin_unlock_bh(&priv->fs.arfs->arfs_lock); hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) { if (arfs_rule->rule) mlx5_del_flow_rules(arfs_rule->rule); @@ -383,16 +422,16 @@ static void arfs_del_rules(struct mlx5e_priv *priv) { struct hlist_node *htmp; struct arfs_rule *rule; + HLIST_HEAD(del_list); int i; int j; - HLIST_HEAD(del_list); - spin_lock_bh(&priv->fs.arfs.arfs_lock); - mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs.arfs_tables, i, j) { + spin_lock_bh(&priv->fs.arfs->arfs_lock); + mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs->arfs_tables, i, j) { hlist_del_init(&rule->hlist); hlist_add_head(&rule->hlist, &del_list); } - spin_unlock_bh(&priv->fs.arfs.arfs_lock); + spin_unlock_bh(&priv->fs.arfs->arfs_lock); hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) { cancel_work_sync(&rule->arfs_work); @@ -436,7 +475,7 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs, static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv, struct arfs_rule *arfs_rule) { - struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; + struct mlx5e_arfs_tables *arfs = priv->fs.arfs; struct arfs_tuple *tuple = &arfs_rule->tuple; struct mlx5_flow_handle *rule = NULL; struct mlx5_flow_destination dest = {}; @@ -554,9 +593,9 @@ static void arfs_handle_work(struct work_struct *work) mutex_lock(&priv->state_lock); if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - spin_lock_bh(&priv->fs.arfs.arfs_lock); + spin_lock_bh(&priv->fs.arfs->arfs_lock); hlist_del(&arfs_rule->hlist); - spin_unlock_bh(&priv->fs.arfs.arfs_lock); + spin_unlock_bh(&priv->fs.arfs->arfs_lock); mutex_unlock(&priv->state_lock); kfree(arfs_rule); @@ -609,7 +648,7 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, tuple->dst_port = fk->ports.dst; rule->flow_id = flow_id; - rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER; + rule->filter_id = priv->fs.arfs->last_filter_id++ % RPS_NO_FILTER; hlist_add_head(&rule->hlist, arfs_hash_bucket(arfs_t, tuple->src_port, @@ -653,7 +692,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id) { struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; + struct mlx5e_arfs_tables *arfs = priv->fs.arfs; struct arfs_table *arfs_t; struct arfs_rule *arfs_rule; struct flow_keys fk; @@ -687,7 +726,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, return -ENOMEM; } } - queue_work(priv->fs.arfs.wq, &arfs_rule->arfs_work); + queue_work(priv->fs.arfs->wq, &arfs_rule->arfs_work); spin_unlock_bh(&arfs->arfs_lock); return arfs_rule->filter_id; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index a6cf008057b5..8c166ee56d8b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -38,15 +38,16 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 *in) { + struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs; int err; err = mlx5_core_create_tir(mdev, in, &tir->tirn); if (err) return err; - mutex_lock(&mdev->mlx5e_res.td.list_lock); - list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list); - mutex_unlock(&mdev->mlx5e_res.td.list_lock); + mutex_lock(&res->td.list_lock); + list_add(&tir->list, &res->td.tirs_list); + mutex_unlock(&res->td.list_lock); return 0; } @@ -54,10 +55,12 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 *in) void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir) { - mutex_lock(&mdev->mlx5e_res.td.list_lock); + struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs; + + mutex_lock(&res->td.list_lock); mlx5_core_destroy_tir(mdev, tir->tirn); list_del(&tir->list); - mutex_unlock(&mdev->mlx5e_res.td.list_lock); + mutex_unlock(&res->td.list_lock); } void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc) @@ -99,7 +102,7 @@ static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) { - struct mlx5e_resources *res = &mdev->mlx5e_res; + struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs; int err; err = mlx5_core_alloc_pd(mdev, &res->pdn); @@ -126,8 +129,8 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) goto err_destroy_mkey; } - INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list); - mutex_init(&mdev->mlx5e_res.td.list_lock); + INIT_LIST_HEAD(&res->td.tirs_list); + mutex_init(&res->td.list_lock); return 0; @@ -142,7 +145,7 @@ err_dealloc_pd: void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev) { - struct mlx5e_resources *res = &mdev->mlx5e_res; + struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs; mlx5_free_bfreg(mdev, &res->bfreg); mlx5_core_destroy_mkey(mdev, &res->mkey); @@ -180,8 +183,8 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); - mutex_lock(&mdev->mlx5e_res.td.list_lock); - list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { + mutex_lock(&mdev->mlx5e_res.hw_objs.td.list_lock); + list_for_each_entry(tir, &mdev->mlx5e_res.hw_objs.td.tirs_list, list) { tirn = tir->tirn; err = mlx5_core_modify_tir(mdev, tirn, in); if (err) @@ -192,7 +195,7 @@ out: kvfree(in); if (err) netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err); - mutex_unlock(&mdev->mlx5e_res.td.list_lock); + mutex_unlock(&mdev->mlx5e_res.hw_objs.td.list_lock); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index f23c67575073..a4c8d8d00d5a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -1149,35 +1149,23 @@ static int mlx5e_update_trust_state_hw(struct mlx5e_priv *priv, void *context) static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state) { - struct mlx5e_channels new_channels = {}; - bool reset_channels = true; - bool opened; - int err = 0; + struct mlx5e_params new_params; + bool reset = true; + int err; mutex_lock(&priv->state_lock); - new_channels.params = priv->channels.params; - mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_channels.params, + new_params = priv->channels.params; + mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_params, trust_state); - opened = test_bit(MLX5E_STATE_OPENED, &priv->state); - if (!opened) - reset_channels = false; - /* Skip if tx_min_inline is the same */ - if (new_channels.params.tx_min_inline_mode == - priv->channels.params.tx_min_inline_mode) - reset_channels = false; - - if (reset_channels) { - err = mlx5e_safe_switch_channels(priv, &new_channels, - mlx5e_update_trust_state_hw, - &trust_state); - } else { - err = mlx5e_update_trust_state_hw(priv, &trust_state); - if (!err && !opened) - priv->channels.params = new_channels.params; - } + if (new_params.tx_min_inline_mode == priv->channels.params.tx_min_inline_mode) + reset = false; + + err = mlx5e_safe_switch_params(priv, &new_params, + mlx5e_update_trust_state_hw, + &trust_state, reset); mutex_unlock(&priv->state_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 53802e18af90..8360289813f0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -34,6 +34,7 @@ #include "en/port.h" #include "en/params.h" #include "en/xsk/pool.h" +#include "en/ptp.h" #include "lib/clock.h" void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, @@ -325,7 +326,7 @@ static void mlx5e_get_ringparam(struct net_device *dev, int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, struct ethtool_ringparam *param) { - struct mlx5e_channels new_channels = {}; + struct mlx5e_params new_params; u8 log_rq_size; u8 log_sq_size; int err = 0; @@ -364,20 +365,15 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, mutex_lock(&priv->state_lock); - new_channels.params = priv->channels.params; - new_channels.params.log_rq_mtu_frames = log_rq_size; - new_channels.params.log_sq_size = log_sq_size; + new_params = priv->channels.params; + new_params.log_rq_mtu_frames = log_rq_size; + new_params.log_sq_size = log_sq_size; - err = mlx5e_validate_params(priv, &new_channels.params); + err = mlx5e_validate_params(priv->mdev, &new_params); if (err) goto unlock; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - priv->channels.params = new_channels.params; - goto unlock; - } - - err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true); unlock: mutex_unlock(&priv->state_lock); @@ -422,8 +418,9 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, { struct mlx5e_params *cur_params = &priv->channels.params; unsigned int count = ch->combined_count; - struct mlx5e_channels new_channels = {}; + struct mlx5e_params new_params; bool arfs_enabled; + bool opened; int err = 0; if (!count) { @@ -458,28 +455,18 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, goto out; } - new_channels.params = *cur_params; - new_channels.params.num_channels = count; - - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - struct mlx5e_params old_params; + new_params = *cur_params; + new_params.num_channels = count; - old_params = *cur_params; - *cur_params = new_channels.params; - err = mlx5e_num_channels_changed(priv); - if (err) - *cur_params = old_params; - - goto out; - } + opened = test_bit(MLX5E_STATE_OPENED, &priv->state); - arfs_enabled = priv->netdev->features & NETIF_F_NTUPLE; + arfs_enabled = opened && (priv->netdev->features & NETIF_F_NTUPLE); if (arfs_enabled) mlx5e_arfs_disable(priv); /* Switch to new channels, set new parameters and close old ones */ - err = mlx5e_safe_switch_channels(priv, &new_channels, - mlx5e_num_channels_changed_ctx, NULL); + err = mlx5e_safe_switch_params(priv, &new_params, + mlx5e_num_channels_changed_ctx, NULL, true); if (arfs_enabled) { int err2 = mlx5e_arfs_enable(priv); @@ -574,8 +561,9 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, { struct dim_cq_moder *rx_moder, *tx_moder; struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_channels new_channels = {}; + struct mlx5e_params new_params; bool reset_rx, reset_tx; + bool reset = true; int err = 0; if (!MLX5_CAP_GEN(mdev, cq_moderation)) @@ -596,51 +584,47 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, } mutex_lock(&priv->state_lock); - new_channels.params = priv->channels.params; + new_params = priv->channels.params; - rx_moder = &new_channels.params.rx_cq_moderation; + rx_moder = &new_params.rx_cq_moderation; rx_moder->usec = coal->rx_coalesce_usecs; rx_moder->pkts = coal->rx_max_coalesced_frames; - new_channels.params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce; + new_params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce; - tx_moder = &new_channels.params.tx_cq_moderation; + tx_moder = &new_params.tx_cq_moderation; tx_moder->usec = coal->tx_coalesce_usecs; tx_moder->pkts = coal->tx_max_coalesced_frames; - new_channels.params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce; + new_params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce; reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled; reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled; if (reset_rx) { - u8 mode = MLX5E_GET_PFLAG(&new_channels.params, + u8 mode = MLX5E_GET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_BASED_MODER); - mlx5e_reset_rx_moderation(&new_channels.params, mode); + mlx5e_reset_rx_moderation(&new_params, mode); } if (reset_tx) { - u8 mode = MLX5E_GET_PFLAG(&new_channels.params, + u8 mode = MLX5E_GET_PFLAG(&new_params, MLX5E_PFLAG_TX_CQE_BASED_MODER); - mlx5e_reset_tx_moderation(&new_channels.params, mode); + mlx5e_reset_tx_moderation(&new_params, mode); } - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - priv->channels.params = new_channels.params; - goto out; - } - - if (!reset_rx && !reset_tx) { + /* If DIM state hasn't changed, it's possible to modify interrupt + * moderation parameters on the fly, even if the channels are open. + */ + if (!reset_rx && !reset_tx && test_bit(MLX5E_STATE_OPENED, &priv->state)) { if (!coal->use_adaptive_rx_coalesce) mlx5e_set_priv_channels_rx_coalesce(priv, coal); if (!coal->use_adaptive_tx_coalesce) mlx5e_set_priv_channels_tx_coalesce(priv, coal); - priv->channels.params = new_channels.params; - goto out; + reset = false; } - err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset); -out: mutex_unlock(&priv->state_lock); return err; } @@ -1601,6 +1585,14 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) return mlx5_set_port_wol(mdev, mlx5_wol_mode); } +static void mlx5e_get_fec_stats(struct net_device *netdev, + struct ethtool_fec_stats *fec_stats) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + mlx5e_stats_fec_get(priv, fec_stats); +} + static int mlx5e_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) { @@ -1769,6 +1761,49 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev, return 0; } +static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev, + const struct ethtool_module_eeprom *page_data, + struct netlink_ext_ack *extack) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_module_eeprom_query_params query; + struct mlx5_core_dev *mdev = priv->mdev; + u8 *data = page_data->data; + int size_read; + int i = 0; + + if (!page_data->length) + return -EINVAL; + + memset(data, 0, page_data->length); + + query.offset = page_data->offset; + query.i2c_address = page_data->i2c_address; + query.bank = page_data->bank; + query.page = page_data->page; + while (i < page_data->length) { + query.size = page_data->length - i; + size_read = mlx5_query_module_eeprom_by_page(mdev, &query, data + i); + + /* Done reading, return how many bytes was read */ + if (!size_read) + return i; + + if (size_read == -EINVAL) + return -EINVAL; + if (size_read < 0) { + netdev_err(priv->netdev, "%s: mlx5_query_module_eeprom_by_page failed:0x%x\n", + __func__, size_read); + return i; + } + + i += size_read; + query.offset += size_read; + } + + return i; +} + int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, struct ethtool_flash *flash) { @@ -1808,7 +1843,7 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_channels new_channels = {}; + struct mlx5e_params new_params; bool mode_changed; u8 cq_period_mode, current_cq_period_mode; @@ -1827,18 +1862,13 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, if (!mode_changed) return 0; - new_channels.params = priv->channels.params; + new_params = priv->channels.params; if (is_rx_cq) - mlx5e_set_rx_cq_mode_params(&new_channels.params, cq_period_mode); + mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode); else - mlx5e_set_tx_cq_mode_params(&new_channels.params, cq_period_mode); + mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode); - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - priv->channels.params = new_channels.params; - return 0; - } - - return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true); } static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable) @@ -1854,7 +1884,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable) int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val) { bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS); - struct mlx5e_channels new_channels = {}; + struct mlx5e_params new_params; int err = 0; if (!MLX5_CAP_GEN(priv->mdev, cqe_compression)) @@ -1863,15 +1893,16 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val if (curr_val == new_val) return 0; - new_channels.params = priv->channels.params; - MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val); - - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - priv->channels.params = new_channels.params; - return 0; - } + new_params = priv->channels.params; + MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val); + if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) + new_params.ptp_rx = new_val; - err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + if (new_params.ptp_rx == priv->channels.params.ptp_rx) + err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true); + else + err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx, + &new_params.ptp_rx, true); if (err) return err; @@ -1892,11 +1923,6 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, if (!MLX5_CAP_GEN(mdev, cqe_compression)) return -EOPNOTSUPP; - if (enable && priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) { - netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n"); - return -EINVAL; - } - err = mlx5e_modify_rx_cqe_compression_locked(priv, enable); if (err) return err; @@ -1910,7 +1936,7 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_channels new_channels = {}; + struct mlx5e_params new_params; if (enable) { if (!mlx5e_check_fragmented_striding_rq_cap(mdev)) @@ -1922,17 +1948,12 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) return -EINVAL; } - new_channels.params = priv->channels.params; - - MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_STRIDING_RQ, enable); - mlx5e_set_rq_type(mdev, &new_channels.params); + new_params = priv->channels.params; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - priv->channels.params = new_channels.params; - return 0; - } + MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_STRIDING_RQ, enable); + mlx5e_set_rq_type(mdev, &new_params); - return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true); } static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable) @@ -1961,23 +1982,16 @@ static int set_pflag_tx_mpwqe_common(struct net_device *netdev, u32 flag, bool e { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_channels new_channels = {}; - int err; + struct mlx5e_params new_params; if (enable && !MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe)) return -EOPNOTSUPP; - new_channels.params = priv->channels.params; - - MLX5E_SET_PFLAG(&new_channels.params, flag, enable); + new_params = priv->channels.params; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - priv->channels.params = new_channels.params; - return 0; - } + MLX5E_SET_PFLAG(&new_params, flag, enable); - err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); - return err; + return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true); } static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable) @@ -1994,7 +2008,7 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_channels new_channels = {}; + struct mlx5e_params new_params; int err; if (!MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn)) @@ -2010,29 +2024,17 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable) return -EINVAL; } - new_channels.params = priv->channels.params; - MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_TX_PORT_TS, enable); + new_params = priv->channels.params; + MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_PORT_TS, enable); /* No need to verify SQ stop room as * ptpsq.txqsq.stop_room <= generic_sq->stop_room, and both * has the same log_sq_size. */ - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - struct mlx5e_params old_params; - - old_params = priv->channels.params; - priv->channels.params = new_channels.params; - err = mlx5e_num_channels_changed(priv); - if (err) - priv->channels.params = old_params; - goto out; - } - - err = mlx5e_safe_switch_channels(priv, &new_channels, - mlx5e_num_channels_changed_ctx, NULL); -out: + err = mlx5e_safe_switch_params(priv, &new_params, + mlx5e_num_channels_changed_ctx, NULL, true); if (!err) - priv->port_ptp_opened = true; + priv->tx_ptp_opened = true; return err; } @@ -2123,12 +2125,216 @@ int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return mlx5e_ethtool_set_rxnfc(dev, cmd); } +static int query_port_status_opcode(struct mlx5_core_dev *mdev, u32 *status_opcode) +{ + struct mlx5_ifc_pddr_troubleshooting_page_bits *pddr_troubleshooting_page; + u32 in[MLX5_ST_SZ_DW(pddr_reg)] = {}; + u32 out[MLX5_ST_SZ_DW(pddr_reg)]; + int err; + + MLX5_SET(pddr_reg, in, local_port, 1); + MLX5_SET(pddr_reg, in, page_select, + MLX5_PDDR_REG_PAGE_SELECT_TROUBLESHOOTING_INFO_PAGE); + + pddr_troubleshooting_page = MLX5_ADDR_OF(pddr_reg, in, page_data); + MLX5_SET(pddr_troubleshooting_page, pddr_troubleshooting_page, + group_opcode, MLX5_PDDR_REG_TRBLSH_GROUP_OPCODE_MONITOR); + err = mlx5_core_access_reg(mdev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PDDR, 0, 0); + if (err) + return err; + + pddr_troubleshooting_page = MLX5_ADDR_OF(pddr_reg, out, page_data); + *status_opcode = MLX5_GET(pddr_troubleshooting_page, pddr_troubleshooting_page, + status_opcode); + return 0; +} + +struct mlx5e_ethtool_link_ext_state_opcode_mapping { + u32 status_opcode; + enum ethtool_link_ext_state link_ext_state; + u8 link_ext_substate; +}; + +static const struct mlx5e_ethtool_link_ext_state_opcode_mapping +mlx5e_link_ext_state_opcode_map[] = { + /* States relating to the autonegotiation or issues therein */ + {2, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED}, + {3, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED}, + {4, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED}, + {36, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE}, + {38, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE}, + {39, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD}, + + /* Failure during link training */ + {5, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED}, + {6, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT}, + {7, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY}, + {8, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, 0}, + {14, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT}, + + /* Logical mismatch in physical coding sublayer or forward error correction sublayer */ + {9, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK}, + {10, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK}, + {11, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS}, + {12, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED}, + {13, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED}, + + /* Signal integrity issues */ + {15, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, 0}, + {17, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS}, + {42, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE}, + + /* No cable connected */ + {1024, ETHTOOL_LINK_EXT_STATE_NO_CABLE, 0}, + + /* Failure is related to cable, e.g., unsupported cable */ + {16, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE}, + {20, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE}, + {29, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE}, + {1025, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE}, + {1029, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE}, + {1031, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, 0}, + + /* Failure is related to EEPROM, e.g., failure during reading or parsing the data */ + {1027, ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE, 0}, + + /* Failure during calibration algorithm */ + {23, ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE, 0}, + + /* The hardware is not able to provide the power required from cable or module */ + {1032, ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED, 0}, + + /* The module is overheated */ + {1030, ETHTOOL_LINK_EXT_STATE_OVERHEAT, 0}, +}; + +static void +mlx5e_set_link_ext_state(struct mlx5e_ethtool_link_ext_state_opcode_mapping + link_ext_state_mapping, + struct ethtool_link_ext_state_info *link_ext_state_info) +{ + switch (link_ext_state_mapping.link_ext_state) { + case ETHTOOL_LINK_EXT_STATE_AUTONEG: + link_ext_state_info->autoneg = + link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE: + link_ext_state_info->link_training = + link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH: + link_ext_state_info->link_logical_mismatch = + link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY: + link_ext_state_info->bad_signal_integrity = + link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE: + link_ext_state_info->cable_issue = + link_ext_state_mapping.link_ext_substate; + break; + default: + break; + } + + link_ext_state_info->link_ext_state = link_ext_state_mapping.link_ext_state; +} + +static int +mlx5e_get_link_ext_state(struct net_device *dev, + struct ethtool_link_ext_state_info *link_ext_state_info) +{ + struct mlx5e_ethtool_link_ext_state_opcode_mapping link_ext_state_mapping; + struct mlx5e_priv *priv = netdev_priv(dev); + u32 status_opcode = 0; + int i; + + /* Exit without data if the interface state is OK, since no extended data is + * available in such case + */ + if (netif_carrier_ok(dev)) + return -ENODATA; + + if (query_port_status_opcode(priv->mdev, &status_opcode) || + !status_opcode) + return -ENODATA; + + for (i = 0; i < ARRAY_SIZE(mlx5e_link_ext_state_opcode_map); i++) { + link_ext_state_mapping = mlx5e_link_ext_state_opcode_map[i]; + if (link_ext_state_mapping.status_opcode == status_opcode) { + mlx5e_set_link_ext_state(link_ext_state_mapping, + link_ext_state_info); + return 0; + } + } + + return -ENODATA; +} + +static void mlx5e_get_eth_phy_stats(struct net_device *netdev, + struct ethtool_eth_phy_stats *phy_stats) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + mlx5e_stats_eth_phy_get(priv, phy_stats); +} + +static void mlx5e_get_eth_mac_stats(struct net_device *netdev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + mlx5e_stats_eth_mac_get(priv, mac_stats); +} + +static void mlx5e_get_eth_ctrl_stats(struct net_device *netdev, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + mlx5e_stats_eth_ctrl_get(priv, ctrl_stats); +} + +static void mlx5e_get_rmon_stats(struct net_device *netdev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + mlx5e_stats_rmon_get(priv, rmon_stats, ranges); +} + const struct ethtool_ops mlx5e_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USE_ADAPTIVE, .get_drvinfo = mlx5e_get_drvinfo, .get_link = ethtool_op_get_link, + .get_link_ext_state = mlx5e_get_link_ext_state, .get_strings = mlx5e_get_strings, .get_sset_count = mlx5e_get_sset_count, .get_ethtool_stats = mlx5e_get_ethtool_stats, @@ -2157,12 +2363,18 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .set_wol = mlx5e_set_wol, .get_module_info = mlx5e_get_module_info, .get_module_eeprom = mlx5e_get_module_eeprom, + .get_module_eeprom_by_page = mlx5e_get_module_eeprom_by_page, .flash_device = mlx5e_flash_device, .get_priv_flags = mlx5e_get_priv_flags, .set_priv_flags = mlx5e_set_priv_flags, .self_test = mlx5e_self_test, .get_msglevel = mlx5e_get_msglevel, .set_msglevel = mlx5e_set_msglevel, + .get_fec_stats = mlx5e_get_fec_stats, .get_fecparam = mlx5e_get_fecparam, .set_fecparam = mlx5e_set_fecparam, + .get_eth_phy_stats = mlx5e_get_eth_phy_stats, + .get_eth_mac_stats = mlx5e_get_eth_mac_stats, + .get_eth_ctrl_stats = mlx5e_get_eth_ctrl_stats, + .get_rmon_stats = mlx5e_get_rmon_stats, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 16ce7756ac43..0d571a0c76d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -36,7 +36,9 @@ #include <linux/tcp.h> #include <linux/mlx5/fs.h> #include "en.h" +#include "en_rep.h" #include "lib/mpfs.h" +#include "en/ptp.h" static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, struct mlx5e_l2_rule *ai, int type); @@ -106,6 +108,29 @@ static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn) kfree(hn); } +struct mlx5e_vlan_table { + struct mlx5e_flow_table ft; + DECLARE_BITMAP(active_cvlans, VLAN_N_VID); + DECLARE_BITMAP(active_svlans, VLAN_N_VID); + struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID]; + struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID]; + struct mlx5_flow_handle *untagged_rule; + struct mlx5_flow_handle *any_cvlan_rule; + struct mlx5_flow_handle *any_svlan_rule; + struct mlx5_flow_handle *trap_rule; + bool cvlan_filter_disabled; +}; + +unsigned long *mlx5e_vlan_get_active_svlans(struct mlx5e_vlan_table *vlan) +{ + return vlan->active_svlans; +} + +struct mlx5_flow_table *mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table *vlan) +{ + return vlan->ft.t; +} + static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) { struct net_device *ndev = priv->netdev; @@ -117,7 +142,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) int i; list_size = 0; - for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) + for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID) list_size++; max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list); @@ -134,7 +159,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) return -ENOMEM; i = 0; - for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) { + for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID) { if (i >= list_size) break; vlans[i++] = vlan; @@ -161,7 +186,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, enum mlx5e_vlan_rule_type rule_type, u16 vid, struct mlx5_flow_spec *spec) { - struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; + struct mlx5_flow_table *ft = priv->fs.vlan->ft.t; struct mlx5_flow_destination dest = {}; struct mlx5_flow_handle **rule_p; MLX5_DECLARE_FLOW_ACT(flow_act); @@ -178,24 +203,24 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, * disabled in match value means both S & C tags * don't exist (untagged of both) */ - rule_p = &priv->fs.vlan.untagged_rule; + rule_p = &priv->fs.vlan->untagged_rule; MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); break; case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: - rule_p = &priv->fs.vlan.any_cvlan_rule; + rule_p = &priv->fs.vlan->any_cvlan_rule; MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1); break; case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: - rule_p = &priv->fs.vlan.any_svlan_rule; + rule_p = &priv->fs.vlan->any_svlan_rule; MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.svlan_tag); MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1); break; case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID: - rule_p = &priv->fs.vlan.active_svlans_rule[vid]; + rule_p = &priv->fs.vlan->active_svlans_rule[vid]; MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.svlan_tag); MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1); @@ -205,7 +230,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, vid); break; default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */ - rule_p = &priv->fs.vlan.active_cvlans_rule[vid]; + rule_p = &priv->fs.vlan->active_cvlans_rule[vid]; MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1); @@ -255,33 +280,33 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, { switch (rule_type) { case MLX5E_VLAN_RULE_TYPE_UNTAGGED: - if (priv->fs.vlan.untagged_rule) { - mlx5_del_flow_rules(priv->fs.vlan.untagged_rule); - priv->fs.vlan.untagged_rule = NULL; + if (priv->fs.vlan->untagged_rule) { + mlx5_del_flow_rules(priv->fs.vlan->untagged_rule); + priv->fs.vlan->untagged_rule = NULL; } break; case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: - if (priv->fs.vlan.any_cvlan_rule) { - mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule); - priv->fs.vlan.any_cvlan_rule = NULL; + if (priv->fs.vlan->any_cvlan_rule) { + mlx5_del_flow_rules(priv->fs.vlan->any_cvlan_rule); + priv->fs.vlan->any_cvlan_rule = NULL; } break; case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: - if (priv->fs.vlan.any_svlan_rule) { - mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule); - priv->fs.vlan.any_svlan_rule = NULL; + if (priv->fs.vlan->any_svlan_rule) { + mlx5_del_flow_rules(priv->fs.vlan->any_svlan_rule); + priv->fs.vlan->any_svlan_rule = NULL; } break; case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID: - if (priv->fs.vlan.active_svlans_rule[vid]) { - mlx5_del_flow_rules(priv->fs.vlan.active_svlans_rule[vid]); - priv->fs.vlan.active_svlans_rule[vid] = NULL; + if (priv->fs.vlan->active_svlans_rule[vid]) { + mlx5_del_flow_rules(priv->fs.vlan->active_svlans_rule[vid]); + priv->fs.vlan->active_svlans_rule[vid] = NULL; } break; case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID: - if (priv->fs.vlan.active_cvlans_rule[vid]) { - mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]); - priv->fs.vlan.active_cvlans_rule[vid] = NULL; + if (priv->fs.vlan->active_cvlans_rule[vid]) { + mlx5_del_flow_rules(priv->fs.vlan->active_cvlans_rule[vid]); + priv->fs.vlan->active_cvlans_rule[vid] = NULL; } mlx5e_vport_context_update_vlans(priv); break; @@ -328,27 +353,27 @@ mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num) int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num) { - struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; + struct mlx5_flow_table *ft = priv->fs.vlan->ft.t; struct mlx5_flow_handle *rule; int err; rule = mlx5e_add_trap_rule(ft, trap_id, tir_num); if (IS_ERR(rule)) { err = PTR_ERR(rule); - priv->fs.vlan.trap_rule = NULL; + priv->fs.vlan->trap_rule = NULL; netdev_err(priv->netdev, "%s: add VLAN trap rule failed, err %d\n", __func__, err); return err; } - priv->fs.vlan.trap_rule = rule; + priv->fs.vlan->trap_rule = rule; return 0; } void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv) { - if (priv->fs.vlan.trap_rule) { - mlx5_del_flow_rules(priv->fs.vlan.trap_rule); - priv->fs.vlan.trap_rule = NULL; + if (priv->fs.vlan->trap_rule) { + mlx5_del_flow_rules(priv->fs.vlan->trap_rule); + priv->fs.vlan->trap_rule = NULL; } } @@ -380,10 +405,10 @@ void mlx5e_remove_mac_trap(struct mlx5e_priv *priv) void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv) { - if (!priv->fs.vlan.cvlan_filter_disabled) + if (!priv->fs.vlan->cvlan_filter_disabled) return; - priv->fs.vlan.cvlan_filter_disabled = false; + priv->fs.vlan->cvlan_filter_disabled = false; if (priv->netdev->flags & IFF_PROMISC) return; mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); @@ -391,10 +416,10 @@ void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv) void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv) { - if (priv->fs.vlan.cvlan_filter_disabled) + if (priv->fs.vlan->cvlan_filter_disabled) return; - priv->fs.vlan.cvlan_filter_disabled = true; + priv->fs.vlan->cvlan_filter_disabled = true; if (priv->netdev->flags & IFF_PROMISC) return; mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); @@ -404,11 +429,11 @@ static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid) { int err; - set_bit(vid, priv->fs.vlan.active_cvlans); + set_bit(vid, priv->fs.vlan->active_cvlans); err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); if (err) - clear_bit(vid, priv->fs.vlan.active_cvlans); + clear_bit(vid, priv->fs.vlan->active_cvlans); return err; } @@ -418,11 +443,11 @@ static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid) struct net_device *netdev = priv->netdev; int err; - set_bit(vid, priv->fs.vlan.active_svlans); + set_bit(vid, priv->fs.vlan->active_svlans); err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); if (err) { - clear_bit(vid, priv->fs.vlan.active_svlans); + clear_bit(vid, priv->fs.vlan->active_svlans); return err; } @@ -435,6 +460,9 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct mlx5e_priv *priv = netdev_priv(dev); + if (mlx5e_is_uplink_rep(priv)) + return 0; /* no vlan table for uplink rep */ + if (be16_to_cpu(proto) == ETH_P_8021Q) return mlx5e_vlan_rx_add_cvid(priv, vid); else if (be16_to_cpu(proto) == ETH_P_8021AD) @@ -447,11 +475,14 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { struct mlx5e_priv *priv = netdev_priv(dev); + if (mlx5e_is_uplink_rep(priv)) + return 0; /* no vlan table for uplink rep */ + if (be16_to_cpu(proto) == ETH_P_8021Q) { - clear_bit(vid, priv->fs.vlan.active_cvlans); + clear_bit(vid, priv->fs.vlan->active_cvlans); mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); } else if (be16_to_cpu(proto) == ETH_P_8021AD) { - clear_bit(vid, priv->fs.vlan.active_svlans); + clear_bit(vid, priv->fs.vlan->active_svlans); mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); netdev_update_features(dev); } @@ -465,14 +496,14 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv) mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); - for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) { + for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) { mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i); } - for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID) + for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID) mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); - if (priv->fs.vlan.cvlan_filter_disabled) + if (priv->fs.vlan->cvlan_filter_disabled) mlx5e_add_any_vid_rules(priv); } @@ -482,11 +513,11 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); - for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) { + for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) { mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i); } - for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID) + for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID) mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state))); @@ -496,7 +527,7 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) /* must be called after DESTROY bit is set and * set_rx_mode is called and flushed */ - if (priv->fs.vlan.cvlan_filter_disabled) + if (priv->fs.vlan->cvlan_filter_disabled) mlx5e_del_any_vid_rules(priv); } @@ -1684,10 +1715,15 @@ static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft) static int mlx5e_create_vlan_table(struct mlx5e_priv *priv) { - struct mlx5e_flow_table *ft = &priv->fs.vlan.ft; struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5e_flow_table *ft; int err; + priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL); + if (!priv->fs.vlan) + return -ENOMEM; + + ft = &priv->fs.vlan->ft; ft->num_groups = 0; ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE; @@ -1695,12 +1731,11 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv) ft_attr.prio = MLX5E_NIC_PRIO; ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); - if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); - ft->t = NULL; - return err; + goto err_free_t; } + ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL); if (!ft->g) { err = -ENOMEM; @@ -1719,7 +1754,9 @@ err_free_g: kfree(ft->g); err_destroy_vlan_table: mlx5_destroy_flow_table(ft->t); - ft->t = NULL; +err_free_t: + kvfree(priv->fs.vlan); + priv->fs.vlan = NULL; return err; } @@ -1727,7 +1764,8 @@ err_destroy_vlan_table: static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv) { mlx5e_del_vlan_rules(priv); - mlx5e_destroy_flow_table(&priv->fs.vlan.ft); + mlx5e_destroy_flow_table(&priv->fs.vlan->ft); + kvfree(priv->fs.vlan); } int mlx5e_create_flow_steering(struct mlx5e_priv *priv) @@ -1785,10 +1823,16 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) goto err_destroy_l2_table; } + err = mlx5e_ptp_alloc_rx_fs(priv); + if (err) + goto err_destory_vlan_table; + mlx5e_ethtool_init_steering(priv); return 0; +err_destory_vlan_table: + mlx5e_destroy_vlan_table(priv); err_destroy_l2_table: mlx5e_destroy_l2_table(priv); err_destroy_ttc_table: @@ -1803,6 +1847,7 @@ err_destroy_arfs_tables: void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv) { + mlx5e_ptp_free_rx_fs(priv); mlx5e_destroy_vlan_table(priv); mlx5e_destroy_l2_table(priv); mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5db63b9f3b70..bca832cdc4cb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -87,51 +87,6 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) return true; } -void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, - struct mlx5e_params *params) -{ - params->log_rq_mtu_frames = is_kdump_kernel() ? - MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : - MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; - - mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n", - params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, - params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ? - BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) : - BIT(params->log_rq_mtu_frames), - BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)), - MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); -} - -bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, - struct mlx5e_params *params) -{ - if (!mlx5e_check_fragmented_striding_rq_cap(mdev)) - return false; - - if (mlx5_fpga_is_ipsec_device(mdev)) - return false; - - if (params->xdp_prog) { - /* XSK params are not considered here. If striding RQ is in use, - * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will - * be called with the known XSK params. - */ - if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL)) - return false; - } - - return true; -} - -void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params) -{ - params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) && - MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ? - MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : - MLX5_WQ_TYPE_CYCLIC; -} - void mlx5e_update_carrier(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; @@ -259,18 +214,17 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); } -static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, - struct mlx5e_channel *c) +static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node) { int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); rq->mpwqe.info = kvzalloc_node(array_size(wq_sz, sizeof(*rq->mpwqe.info)), - GFP_KERNEL, cpu_to_node(c->cpu)); + GFP_KERNEL, node); if (!rq->mpwqe.info) return -ENOMEM; - mlx5e_build_umr_wqe(rq, &c->icosq, &rq->mpwqe.umr_wqe); + mlx5e_build_umr_wqe(rq, rq->icosq, &rq->mpwqe.umr_wqe); return 0; } @@ -302,7 +256,7 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); mlx5e_mkey_set_relaxed_ordering(mdev, mkc); MLX5_SET(mkc, mkc, qpn, 0xffffff); - MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn); + MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn); MLX5_SET64(mkc, mkc, len, npages << page_shift); MLX5_SET(mkc, mkc, translations_octword_size, MLX5_MTT_OCTW(npages)); @@ -419,58 +373,53 @@ static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq) __free_page(rq->wqe_overflow.page); } -static int mlx5e_alloc_rq(struct mlx5e_channel *c, - struct mlx5e_params *params, +static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params, + struct mlx5e_rq *rq) +{ + struct mlx5_core_dev *mdev = c->mdev; + int err; + + rq->wq_type = params->rq_wq_type; + rq->pdev = c->pdev; + rq->netdev = c->netdev; + rq->priv = c->priv; + rq->tstamp = c->tstamp; + rq->clock = &mdev->clock; + rq->icosq = &c->icosq; + rq->ix = c->ix; + rq->mdev = mdev; + rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + rq->xdpsq = &c->rq_xdpsq; + rq->stats = &c->priv->channel_stats[c->ix].rq; + rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev); + err = mlx5e_rq_set_handlers(rq, params, NULL); + if (err) + return err; + + return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0); +} + +static int mlx5e_alloc_rq(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, - struct xsk_buff_pool *xsk_pool, struct mlx5e_rq_param *rqp, - struct mlx5e_rq *rq) + int node, struct mlx5e_rq *rq) { struct page_pool_params pp_params = { 0 }; - struct mlx5_core_dev *mdev = c->mdev; + struct mlx5_core_dev *mdev = rq->mdev; void *rqc = rqp->rqc; void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); - u32 rq_xdp_ix; u32 pool_size; int wq_sz; int err; int i; - rqp->wq.db_numa_node = cpu_to_node(c->cpu); - - rq->wq_type = params->rq_wq_type; - rq->pdev = c->pdev; - rq->netdev = c->netdev; - rq->priv = c->priv; - rq->tstamp = c->tstamp; - rq->clock = &mdev->clock; - rq->icosq = &c->icosq; - rq->ix = c->ix; - rq->mdev = mdev; - rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); - rq->xdpsq = &c->rq_xdpsq; - rq->xsk_pool = xsk_pool; - rq->ptp_cyc2time = mlx5_is_real_time_rq(mdev) ? - mlx5_real_time_cyc2time : - mlx5_timecounter_cyc2time; - - if (rq->xsk_pool) - rq->stats = &c->priv->channel_stats[c->ix].xskrq; - else - rq->stats = &c->priv->channel_stats[c->ix].rq; + rqp->wq.db_numa_node = node; INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work); if (params->xdp_prog) bpf_prog_inc(params->xdp_prog); RCU_INIT_POINTER(rq->xdp_prog, params->xdp_prog); - rq_xdp_ix = rq->ix; - if (xsk) - rq_xdp_ix += params->num_channels * MLX5E_RQ_GROUP_XSK; - err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, 0); - if (err < 0) - goto err_rq_xdp_prog; - rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk); pool_size = 1 << params->log_rq_mtu_frames; @@ -480,7 +429,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq, &rq->wq_ctrl); if (err) - goto err_rq_xdp; + goto err_rq_xdp_prog; err = mlx5e_alloc_mpwqe_rq_drop_page(rq); if (err) @@ -504,7 +453,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, goto err_rq_drop_page; rq->mkey_be = cpu_to_be32(rq->umr_mkey.key); - err = mlx5e_rq_alloc_mpwqe_info(rq, c); + err = mlx5e_rq_alloc_mpwqe_info(rq, node); if (err) goto err_rq_mkey; break; @@ -512,7 +461,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, &rq->wq_ctrl); if (err) - goto err_rq_xdp; + goto err_rq_xdp_prog; rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR]; @@ -524,23 +473,19 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->wqe.frags = kvzalloc_node(array_size(sizeof(*rq->wqe.frags), (wq_sz << rq->wqe.info.log_num_frags)), - GFP_KERNEL, cpu_to_node(c->cpu)); + GFP_KERNEL, node); if (!rq->wqe.frags) { err = -ENOMEM; goto err_rq_wq_destroy; } - err = mlx5e_init_di_list(rq, wq_sz, cpu_to_node(c->cpu)); + err = mlx5e_init_di_list(rq, wq_sz, node); if (err) goto err_rq_frags; - rq->mkey_be = c->mkey_be; + rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey.key); } - err = mlx5e_rq_set_handlers(rq, params, xsk); - if (err) - goto err_free_by_rq_type; - if (xsk) { err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_XSK_BUFF_POOL, NULL); @@ -550,8 +495,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, pp_params.order = 0; pp_params.flags = 0; /* No-internal DMA mapping in page_pool */ pp_params.pool_size = pool_size; - pp_params.nid = cpu_to_node(c->cpu); - pp_params.dev = c->pdev; + pp_params.nid = node; + pp_params.dev = rq->pdev; pp_params.dma_dir = rq->buff.map_dir; /* page_pool can be used even when there is no rq->xdp_prog, @@ -565,8 +510,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->page_pool = NULL; goto err_free_by_rq_type; } - err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, - MEM_TYPE_PAGE_POOL, rq->page_pool); + if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) + err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, + MEM_TYPE_PAGE_POOL, rq->page_pool); } if (err) goto err_free_by_rq_type; @@ -635,8 +581,6 @@ err_rq_frags: } err_rq_wq_destroy: mlx5_wq_destroy(&rq->wq_ctrl); -err_rq_xdp: - xdp_rxq_info_unreg(&rq->xdp_rxq); err_rq_xdp_prog: if (params->xdp_prog) bpf_prog_put(params->xdp_prog); @@ -649,10 +593,12 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) struct bpf_prog *old_prog; int i; - old_prog = rcu_dereference_protected(rq->xdp_prog, - lockdep_is_held(&rq->priv->state_lock)); - if (old_prog) - bpf_prog_put(old_prog); + if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) { + old_prog = rcu_dereference_protected(rq->xdp_prog, + lockdep_is_held(&rq->priv->state_lock)); + if (old_prog) + bpf_prog_put(old_prog); + } switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: @@ -888,13 +834,14 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq) } -int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params, - struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk, - struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq) +int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param, + struct mlx5e_xsk_param *xsk, int node, + struct mlx5e_rq *rq) { + struct mlx5_core_dev *mdev = rq->mdev; int err; - err = mlx5e_alloc_rq(c, params, xsk, xsk_pool, param, rq); + err = mlx5e_alloc_rq(params, xsk, param, node, rq); if (err) return err; @@ -906,28 +853,28 @@ int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params, if (err) goto err_destroy_rq; - if (mlx5e_is_tls_on(c->priv) && !mlx5_accel_is_ktls_device(c->mdev)) - __set_bit(MLX5E_RQ_STATE_FPGA_TLS, &c->rq.state); /* must be FPGA */ + if (mlx5e_is_tls_on(rq->priv) && !mlx5_accel_is_ktls_device(mdev)) + __set_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state); /* must be FPGA */ - if (MLX5_CAP_ETH(c->mdev, cqe_checksum_full)) - __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &c->rq.state); + if (MLX5_CAP_ETH(mdev, cqe_checksum_full)) + __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state); if (params->rx_dim_enabled) - __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); + __set_bit(MLX5E_RQ_STATE_AM, &rq->state); /* We disable csum_complete when XDP is enabled since * XDP programs might manipulate packets which will render * skb->checksum incorrect. */ - if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp) - __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); + if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || params->xdp_prog) + __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state); /* For CQE compression on striding RQ, use stride index provided by * HW if capability is supported. */ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) && - MLX5_CAP_GEN(c->mdev, mini_cqe_resp_stride_index)) - __set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &c->rq.state); + MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) + __set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state); return 0; @@ -942,7 +889,10 @@ err_free_rq: void mlx5e_activate_rq(struct mlx5e_rq *rq) { set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); - mlx5e_trigger_irq(rq->icosq); + if (rq->icosq) + mlx5e_trigger_irq(rq->icosq); + else + napi_schedule(rq->cq.napi); } void mlx5e_deactivate_rq(struct mlx5e_rq *rq) @@ -954,7 +904,8 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq) void mlx5e_close_rq(struct mlx5e_rq *rq) { cancel_work_sync(&rq->dim.work); - cancel_work_sync(&rq->icosq->recover_work); + if (rq->icosq) + cancel_work_sync(&rq->icosq->recover_work); cancel_work_sync(&rq->recover_work); mlx5e_destroy_rq(rq); mlx5e_free_rx_descs(rq); @@ -1019,7 +970,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, sq->pdev = c->pdev; sq->mkey_be = c->mkey_be; sq->channel = c; - sq->uar_map = mdev->mlx5e_res.bfreg.map; + sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); sq->xsk_pool = xsk_pool; @@ -1090,7 +1041,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c, int err; sq->channel = c; - sq->uar_map = mdev->mlx5e_res.bfreg.map; + sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map; sq->reserved_room = param->stop_room; param->wq.db_numa_node = cpu_to_node(c->cpu); @@ -1175,7 +1126,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, sq->priv = c->priv; sq->ch_ix = c->ix; sq->txq_ix = txq_ix; - sq->uar_map = mdev->mlx5e_res.bfreg.map; + sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); @@ -1183,14 +1134,10 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); if (MLX5_IPSEC_DEV(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); - if (mlx5_accel_is_tls_device(c->priv->mdev)) - set_bit(MLX5E_SQ_STATE_TLS, &sq->state); if (param->is_mpw) set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state); sq->stop_room = param->stop_room; - sq->ptp_cyc2time = mlx5_is_real_time_sq(mdev) ? - mlx5_real_time_cyc2time : - mlx5_timecounter_cyc2time; + sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev); param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); @@ -1258,7 +1205,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev, MLX5_SET(sqc, sqc, flush_in_error_en, 1); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); - MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index); + MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index); MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma); @@ -1462,8 +1409,17 @@ int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, if (err) goto err_free_icosq; + if (param->is_tls) { + sq->ktls_resync = mlx5e_ktls_rx_resync_create_resp_list(); + if (IS_ERR(sq->ktls_resync)) { + err = PTR_ERR(sq->ktls_resync); + goto err_destroy_icosq; + } + } return 0; +err_destroy_icosq: + mlx5e_destroy_sq(c->mdev, sq->sqn); err_free_icosq: mlx5e_free_icosq(sq); @@ -1485,6 +1441,8 @@ void mlx5e_close_icosq(struct mlx5e_icosq *sq) { struct mlx5e_channel *c = sq->channel; + if (sq->ktls_resync) + mlx5e_ktls_rx_resync_destroy_resp_list(sq->ktls_resync); mlx5e_destroy_sq(c->mdev, sq->sqn); mlx5e_free_icosq_descs(sq); mlx5e_free_icosq(sq); @@ -1861,14 +1819,16 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) return err; } -void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c) +static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params, + struct mlx5e_rq_param *rq_params) { - *ccp = (struct mlx5e_create_cq_param) { - .napi = &c->napi, - .ch_stats = c->stats, - .node = cpu_to_node(c->cpu), - .ix = c->ix, - }; + int err; + + err = mlx5e_init_rxq_rq(c, params, &c->rq); + if (err) + return err; + + return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), &c->rq); } static int mlx5e_open_queues(struct mlx5e_channel *c, @@ -1931,7 +1891,7 @@ static int mlx5e_open_queues(struct mlx5e_channel *c, goto err_close_sqs; } - err = mlx5e_open_rq(c, params, &cparam->rq, NULL, NULL, &c->rq); + err = mlx5e_open_rxq_rq(c, params, &cparam->rq); if (err) goto err_close_xdp_sq; @@ -2033,7 +1993,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->cpu = cpu; c->pdev = mlx5_core_dma_dev(priv->mdev); c->netdev = priv->netdev; - c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); + c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key); c->num_tc = params->num_tc; c->xdp = !!params->xdp_prog; c->stats = &priv->channel_stats[ix].ch; @@ -2112,314 +2072,6 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) kvfree(c); } -#define DEFAULT_FRAG_SIZE (2048) - -static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, - struct mlx5e_params *params, - struct mlx5e_xsk_param *xsk, - struct mlx5e_rq_frags_info *info) -{ - u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu); - int frag_size_max = DEFAULT_FRAG_SIZE; - u32 buf_size = 0; - int i; - - if (mlx5_fpga_is_ipsec_device(mdev)) - byte_count += MLX5E_METADATA_ETHER_LEN; - - if (mlx5e_rx_is_linear_skb(params, xsk)) { - int frag_stride; - - frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk); - frag_stride = roundup_pow_of_two(frag_stride); - - info->arr[0].frag_size = byte_count; - info->arr[0].frag_stride = frag_stride; - info->num_frags = 1; - info->wqe_bulk = PAGE_SIZE / frag_stride; - goto out; - } - - if (byte_count > PAGE_SIZE + - (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max) - frag_size_max = PAGE_SIZE; - - i = 0; - while (buf_size < byte_count) { - int frag_size = byte_count - buf_size; - - if (i < MLX5E_MAX_RX_FRAGS - 1) - frag_size = min(frag_size, frag_size_max); - - info->arr[i].frag_size = frag_size; - info->arr[i].frag_stride = roundup_pow_of_two(frag_size); - - buf_size += frag_size; - i++; - } - info->num_frags = i; - /* number of different wqes sharing a page */ - info->wqe_bulk = 1 + (info->num_frags % 2); - -out: - info->wqe_bulk = max_t(u8, info->wqe_bulk, 8); - info->log_num_frags = order_base_2(info->num_frags); -} - -static inline u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs) -{ - int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs; - - switch (wq_type) { - case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - sz += sizeof(struct mlx5e_rx_wqe_ll); - break; - default: /* MLX5_WQ_TYPE_CYCLIC */ - sz += sizeof(struct mlx5e_rx_wqe_cyc); - } - - return order_base_2(sz); -} - -static u8 mlx5e_get_rq_log_wq_sz(void *rqc) -{ - void *wq = MLX5_ADDR_OF(rqc, rqc, wq); - - return MLX5_GET(wq, wq, log_wq_sz); -} - -void mlx5e_build_rq_param(struct mlx5e_priv *priv, - struct mlx5e_params *params, - struct mlx5e_xsk_param *xsk, - struct mlx5e_rq_param *param) -{ - struct mlx5_core_dev *mdev = priv->mdev; - void *rqc = param->rqc; - void *wq = MLX5_ADDR_OF(rqc, rqc, wq); - int ndsegs = 1; - - switch (params->rq_wq_type) { - case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - MLX5_SET(wq, wq, log_wqe_num_of_strides, - mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk) - - MLX5_MPWQE_LOG_NUM_STRIDES_BASE); - MLX5_SET(wq, wq, log_wqe_stride_size, - mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk) - - MLX5_MPWQE_LOG_STRIDE_SZ_BASE); - MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk)); - break; - default: /* MLX5_WQ_TYPE_CYCLIC */ - MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames); - mlx5e_build_rq_frags_info(mdev, params, xsk, ¶m->frags_info); - ndsegs = param->frags_info.num_frags; - } - - MLX5_SET(wq, wq, wq_type, params->rq_wq_type); - MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); - MLX5_SET(wq, wq, log_wq_stride, - mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs)); - MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn); - MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter); - MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); - MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en); - - param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); - mlx5e_build_rx_cq_param(priv, params, xsk, ¶m->cqp); -} - -static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv, - struct mlx5e_rq_param *param) -{ - struct mlx5_core_dev *mdev = priv->mdev; - void *rqc = param->rqc; - void *wq = MLX5_ADDR_OF(rqc, rqc, wq); - - MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); - MLX5_SET(wq, wq, log_wq_stride, - mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1)); - MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter); - - param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); -} - -void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, - struct mlx5e_sq_param *param) -{ - void *sqc = param->sqc; - void *wq = MLX5_ADDR_OF(sqc, sqc, wq); - - MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); - MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn); - - param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(priv->mdev)); -} - -void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_params *params, - struct mlx5e_sq_param *param) -{ - void *sqc = param->sqc; - void *wq = MLX5_ADDR_OF(sqc, sqc, wq); - bool allow_swp; - - allow_swp = mlx5_geneve_tx_allowed(priv->mdev) || - !!MLX5_IPSEC_DEV(priv->mdev); - mlx5e_build_sq_param_common(priv, param); - MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); - MLX5_SET(sqc, sqc, allow_swp, allow_swp); - param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); - param->stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params); - mlx5e_build_tx_cq_param(priv, params, ¶m->cqp); -} - -static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, - struct mlx5e_cq_param *param) -{ - void *cqc = param->cqc; - - MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index); - if (MLX5_CAP_GEN(priv->mdev, cqe_128_always) && cache_line_size() >= 128) - MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD); -} - -void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, - struct mlx5e_params *params, - struct mlx5e_xsk_param *xsk, - struct mlx5e_cq_param *param) -{ - struct mlx5_core_dev *mdev = priv->mdev; - bool hw_stridx = false; - void *cqc = param->cqc; - u8 log_cq_size; - - switch (params->rq_wq_type) { - case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) + - mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); - hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index); - break; - default: /* MLX5_WQ_TYPE_CYCLIC */ - log_cq_size = params->log_rq_mtu_frames; - } - - MLX5_SET(cqc, cqc, log_cq_size, log_cq_size); - if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { - MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ? - MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM); - MLX5_SET(cqc, cqc, cqe_comp_en, 1); - } - - mlx5e_build_common_cq_param(priv, param); - param->cq_period_mode = params->rx_cq_moderation.cq_period_mode; -} - -void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, - struct mlx5e_params *params, - struct mlx5e_cq_param *param) -{ - void *cqc = param->cqc; - - MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size); - - mlx5e_build_common_cq_param(priv, param); - param->cq_period_mode = params->tx_cq_moderation.cq_period_mode; -} - -void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv, - u8 log_wq_size, - struct mlx5e_cq_param *param) -{ - void *cqc = param->cqc; - - MLX5_SET(cqc, cqc, log_cq_size, log_wq_size); - - mlx5e_build_common_cq_param(priv, param); - - param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; -} - -void mlx5e_build_icosq_param(struct mlx5e_priv *priv, - u8 log_wq_size, - struct mlx5e_sq_param *param) -{ - void *sqc = param->sqc; - void *wq = MLX5_ADDR_OF(sqc, sqc, wq); - - mlx5e_build_sq_param_common(priv, param); - - MLX5_SET(wq, wq, log_wq_sz, log_wq_size); - MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq)); - mlx5e_build_ico_cq_param(priv, log_wq_size, ¶m->cqp); -} - -static void mlx5e_build_async_icosq_param(struct mlx5e_priv *priv, - struct mlx5e_params *params, - u8 log_wq_size, - struct mlx5e_sq_param *param) -{ - void *sqc = param->sqc; - void *wq = MLX5_ADDR_OF(sqc, sqc, wq); - - mlx5e_build_sq_param_common(priv, param); - - /* async_icosq is used by XSK only if xdp_prog is active */ - if (params->xdp_prog) - param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */ - MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq)); - MLX5_SET(wq, wq, log_wq_sz, log_wq_size); - mlx5e_build_ico_cq_param(priv, log_wq_size, ¶m->cqp); -} - -void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv, - struct mlx5e_params *params, - struct mlx5e_sq_param *param) -{ - void *sqc = param->sqc; - void *wq = MLX5_ADDR_OF(sqc, sqc, wq); - - mlx5e_build_sq_param_common(priv, param); - MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); - param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE); - mlx5e_build_tx_cq_param(priv, params, ¶m->cqp); -} - -static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params, - struct mlx5e_rq_param *rqp) -{ - switch (params->rq_wq_type) { - case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, - order_base_2(MLX5E_UMR_WQEBBS) + - mlx5e_get_rq_log_wq_sz(rqp->rqc)); - default: /* MLX5_WQ_TYPE_CYCLIC */ - return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; - } -} - -static u8 mlx5e_build_async_icosq_log_wq_sz(struct net_device *netdev) -{ - if (netdev->hw_features & NETIF_F_HW_TLS_RX) - return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; - - return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; -} - -static void mlx5e_build_channel_param(struct mlx5e_priv *priv, - struct mlx5e_params *params, - struct mlx5e_channel_param *cparam) -{ - u8 icosq_log_wq_sz, async_icosq_log_wq_sz; - - mlx5e_build_rq_param(priv, params, NULL, &cparam->rq); - - icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq); - async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(priv->netdev); - - mlx5e_build_sq_param(priv, params, &cparam->txq_sq); - mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq); - mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq); - mlx5e_build_async_icosq_param(priv, params, async_icosq_log_wq_sz, &cparam->async_icosq); -} - int mlx5e_open_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs) { @@ -2434,7 +2086,10 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, if (!chs->c || !cparam) goto err_free; - mlx5e_build_channel_param(priv, &chs->params, cparam); + err = mlx5e_build_channel_param(priv->mdev, &chs->params, priv->q_counter, cparam); + if (err) + goto err_free; + for (i = 0; i < chs->num; i++) { struct xsk_buff_pool *xsk_pool = NULL; @@ -2446,9 +2101,8 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, goto err_close_channels; } - if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS)) { - err = mlx5e_port_ptp_open(priv, &chs->params, chs->c[0]->lag_port, - &chs->port_ptp); + if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS) || chs->params.ptp_rx) { + err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp); if (err) goto err_close_channels; } @@ -2462,8 +2116,8 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, return 0; err_close_ptp: - if (chs->port_ptp) - mlx5e_port_ptp_close(chs->port_ptp); + if (chs->ptp) + mlx5e_ptp_close(chs->ptp); err_close_channels: for (i--; i >= 0; i--) @@ -2483,8 +2137,8 @@ static void mlx5e_activate_channels(struct mlx5e_channels *chs) for (i = 0; i < chs->num; i++) mlx5e_activate_channel(chs->c[i]); - if (chs->port_ptp) - mlx5e_ptp_activate_channel(chs->port_ptp); + if (chs->ptp) + mlx5e_ptp_activate_channel(chs->ptp); } #define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */ @@ -2511,8 +2165,8 @@ static void mlx5e_deactivate_channels(struct mlx5e_channels *chs) { int i; - if (chs->port_ptp) - mlx5e_ptp_deactivate_channel(chs->port_ptp); + if (chs->ptp) + mlx5e_ptp_deactivate_channel(chs->ptp); for (i = 0; i < chs->num; i++) mlx5e_deactivate_channel(chs->c[i]); @@ -2522,11 +2176,10 @@ void mlx5e_close_channels(struct mlx5e_channels *chs) { int i; - if (chs->port_ptp) { - mlx5e_port_ptp_close(chs->port_ptp); - chs->port_ptp = NULL; + if (chs->ptp) { + mlx5e_ptp_close(chs->ptp); + chs->ptp = NULL; } - for (i = 0; i < chs->num; i++) mlx5e_close_channel(chs->c[i]); @@ -2582,12 +2235,12 @@ int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv) return err; } -int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) +int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n) { int err; int ix; - for (ix = 0; ix < priv->max_nch; ix++) { + for (ix = 0; ix < n; ix++) { err = mlx5e_create_rqt(priv, 1 /*size */, &tirs[ix].rqt); if (unlikely(err)) goto err_destroy_rqts; @@ -2603,11 +2256,11 @@ err_destroy_rqts: return err; } -void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) +void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n) { int i; - for (i = 0; i < priv->max_nch; i++) + for (i = 0; i < n; i++) mlx5e_destroy_rqt(priv, &tirs[i].rqt); } @@ -2690,7 +2343,8 @@ static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix, } static void mlx5e_redirect_rqts(struct mlx5e_priv *priv, - struct mlx5e_redirect_rqt_param rrp) + struct mlx5e_redirect_rqt_param rrp, + struct mlx5e_redirect_rqt_param *ptp_rrp) { u32 rqtn; int ix; @@ -2716,11 +2370,17 @@ static void mlx5e_redirect_rqts(struct mlx5e_priv *priv, rqtn = priv->direct_tir[ix].rqt.rqtn; mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp); } + if (ptp_rrp) { + rqtn = priv->ptp_tir.rqt.rqtn; + mlx5e_redirect_rqt(priv, rqtn, 1, *ptp_rrp); + } } static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs) { + bool rx_ptp_support = priv->profile->rx_ptp_support; + struct mlx5e_redirect_rqt_param *ptp_rrp_p = NULL; struct mlx5e_redirect_rqt_param rrp = { .is_rss = true, { @@ -2730,12 +2390,22 @@ static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv, } }, }; + struct mlx5e_redirect_rqt_param ptp_rrp; + + if (rx_ptp_support) { + u32 ptp_rqn; - mlx5e_redirect_rqts(priv, rrp); + ptp_rrp.is_rss = false; + ptp_rrp.rqn = mlx5e_ptp_get_rqn(priv->channels.ptp, &ptp_rqn) ? + priv->drop_rq.rqn : ptp_rqn; + ptp_rrp_p = &ptp_rrp; + } + mlx5e_redirect_rqts(priv, rrp, ptp_rrp_p); } static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv) { + bool rx_ptp_support = priv->profile->rx_ptp_support; struct mlx5e_redirect_rqt_param drop_rrp = { .is_rss = false, { @@ -2743,7 +2413,7 @@ static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv) }, }; - mlx5e_redirect_rqts(priv, drop_rrp); + mlx5e_redirect_rqts(priv, drop_rrp, rx_ptp_support ? &drop_rrp : NULL); } static const struct mlx5e_tirc_config tirc_default_config[MLX5E_NUM_INDIR_TIRS] = { @@ -3032,6 +2702,8 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) nch = priv->channels.params.num_channels; ntc = priv->channels.params.num_tc; num_rxqs = nch * priv->profile->rq_groups; + if (priv->channels.params.ptp_rx) + num_rxqs++; mlx5e_netdev_set_tcs(netdev, nch, ntc); @@ -3117,11 +2789,14 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) } } - if (!priv->channels.port_ptp) + if (!priv->channels.ptp) + return; + + if (!test_bit(MLX5E_PTP_STATE_TX, priv->channels.ptp->state)) return; for (tc = 0; tc < num_tc; tc++) { - struct mlx5e_port_ptp *c = priv->channels.port_ptp; + struct mlx5e_ptp *c = priv->channels.ptp; struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq; priv->txq2sq[sq->txq_ix] = sq; @@ -3172,6 +2847,29 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) mlx5e_deactivate_channels(&priv->channels); } +static int mlx5e_switch_priv_params(struct mlx5e_priv *priv, + struct mlx5e_params *new_params, + mlx5e_fp_preactivate preactivate, + void *context) +{ + struct mlx5e_params old_params; + + old_params = priv->channels.params; + priv->channels.params = *new_params; + + if (preactivate) { + int err; + + err = preactivate(priv, context); + if (err) { + priv->channels.params = old_params; + return err; + } + } + + return 0; +} + static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv, struct mlx5e_channels *new_chs, mlx5e_fp_preactivate preactivate, @@ -3214,35 +2912,32 @@ out: return err; } -int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, - struct mlx5e_channels *new_chs, - mlx5e_fp_preactivate preactivate, - void *context) +int mlx5e_safe_switch_params(struct mlx5e_priv *priv, + struct mlx5e_params *params, + mlx5e_fp_preactivate preactivate, + void *context, bool reset) { + struct mlx5e_channels new_chs = {}; int err; - err = mlx5e_open_channels(priv, new_chs); + reset &= test_bit(MLX5E_STATE_OPENED, &priv->state); + if (!reset) + return mlx5e_switch_priv_params(priv, params, preactivate, context); + + new_chs.params = *params; + err = mlx5e_open_channels(priv, &new_chs); if (err) return err; - - err = mlx5e_switch_priv_channels(priv, new_chs, preactivate, context); + err = mlx5e_switch_priv_channels(priv, &new_chs, preactivate, context); if (err) - goto err_close; - - return 0; - -err_close: - mlx5e_close_channels(new_chs); + mlx5e_close_channels(&new_chs); return err; } int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv) { - struct mlx5e_channels new_channels = {}; - - new_channels.params = priv->channels.params; - return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + return mlx5e_safe_switch_params(priv, &priv->channels.params, NULL, NULL, true); } void mlx5e_timestamp_init(struct mlx5e_priv *priv) @@ -3395,7 +3090,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv, struct mlx5e_cq *cq = &drop_rq->cq; int err; - mlx5e_build_drop_rq_param(priv, &rq_param); + mlx5e_build_drop_rq_param(mdev, priv->drop_rq_q_counter, &rq_param); err = mlx5e_alloc_drop_cq(priv, cq, &cq_param); if (err) @@ -3443,10 +3138,10 @@ int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn) { void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); - MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn); + MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn); if (MLX5_GET(tisc, tisc, tls_en)) - MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.pdn); + MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn); if (mlx5_lag_is_lacp_owner(mdev)) MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1); @@ -3516,7 +3211,7 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc) { - MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); + MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.hw_objs.td.tdn); MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); MLX5_SET(tirc, tirc, indirect_table, rqtn); MLX5_SET(tirc, tirc, tunneled_offload_en, @@ -3608,7 +3303,7 @@ err_destroy_inner_tirs: return err; } -int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) +int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n) { struct mlx5e_tir *tir; void *tirc; @@ -3622,7 +3317,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) if (!in) return -ENOMEM; - for (ix = 0; ix < priv->max_nch; ix++) { + for (ix = 0; ix < n; ix++) { memset(in, 0, inlen); tir = &tirs[ix]; tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); @@ -3660,11 +3355,11 @@ void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv) mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]); } -void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) +void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n) { int i; - for (i = 0; i < priv->max_nch; i++) + for (i = 0; i < n; i++) mlx5e_destroy_tir(priv->mdev, &tirs[i]); } @@ -3699,7 +3394,7 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd) static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, struct tc_mqprio_qopt *mqprio) { - struct mlx5e_channels new_channels = {}; + struct mlx5e_params new_params; u8 tc = mqprio->num_tc; int err = 0; @@ -3718,23 +3413,11 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, goto out; } - new_channels.params = priv->channels.params; - new_channels.params.num_tc = tc ? tc : 1; - - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - struct mlx5e_params old_params; + new_params = priv->channels.params; + new_params.num_tc = tc ? tc : 1; - old_params = priv->channels.params; - priv->channels.params = new_channels.params; - err = mlx5e_num_channels_changed(priv); - if (err) - priv->channels.params = old_params; - - goto out; - } - - err = mlx5e_safe_switch_channels(priv, &new_channels, - mlx5e_num_channels_changed_ctx, NULL); + err = mlx5e_safe_switch_params(priv, &new_params, + mlx5e_num_channels_changed_ctx, NULL, true); out: priv->max_opened_tc = max_t(u8, priv->max_opened_tc, @@ -3791,8 +3474,16 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { struct mlx5e_priv *priv = netdev_priv(dev); + bool tc_unbind = false; int err; + if (type == TC_SETUP_BLOCK && + ((struct flow_block_offload *)type_data)->command == FLOW_BLOCK_UNBIND) + tc_unbind = true; + + if (!netif_device_present(dev) && !tc_unbind) + return -ENODEV; + switch (type) { case TC_SETUP_BLOCK: { struct flow_block_offload *f = type_data; @@ -3837,15 +3528,22 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s) s->tx_dropped += sq_stats->dropped; } } - if (priv->port_ptp_opened) { + if (priv->tx_ptp_opened) { for (i = 0; i < priv->max_opened_tc; i++) { - struct mlx5e_sq_stats *sq_stats = &priv->port_ptp_stats.sq[i]; + struct mlx5e_sq_stats *sq_stats = &priv->ptp_stats.sq[i]; s->tx_packets += sq_stats->packets; s->tx_bytes += sq_stats->bytes; s->tx_dropped += sq_stats->dropped; } } + if (priv->rx_ptp_opened) { + struct mlx5e_rq_stats *rq_stats = &priv->ptp_stats.rq; + + s->rx_packets += rq_stats->packets; + s->rx_bytes += rq_stats->bytes; + s->multicast += rq_stats->mcast_packets; + } } void @@ -3854,6 +3552,9 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_pport_stats *pstats = &priv->stats.pport; + if (!netif_device_present(dev)) + return; + /* In switchdev mode, monitor counters doesn't monitor * rx/tx stats of 802_3. The update stats mechanism * should keep the 802_3 layout counters updated @@ -3895,11 +3596,19 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors; } +static void mlx5e_nic_set_rx_mode(struct mlx5e_priv *priv) +{ + if (mlx5e_is_uplink_rep(priv)) + return; /* no rx mode for uplink rep */ + + queue_work(priv->wq, &priv->set_rx_mode_work); +} + static void mlx5e_set_rx_mode(struct net_device *dev) { struct mlx5e_priv *priv = netdev_priv(dev); - queue_work(priv->wq, &priv->set_rx_mode_work); + mlx5e_nic_set_rx_mode(priv); } static int mlx5e_set_mac(struct net_device *netdev, void *addr) @@ -3914,7 +3623,7 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr) ether_addr_copy(netdev->dev_addr, saddr->sa_data); netif_addr_unlock_bh(netdev); - queue_work(priv->wq, &priv->set_rx_mode_work); + mlx5e_nic_set_rx_mode(priv); return 0; } @@ -3933,10 +3642,10 @@ static int set_feature_lro(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_channels new_channels = {}; struct mlx5e_params *cur_params; + struct mlx5e_params new_params; + bool reset = true; int err = 0; - bool reset; mutex_lock(&priv->state_lock); @@ -3954,30 +3663,17 @@ static int set_feature_lro(struct net_device *netdev, bool enable) goto out; } - reset = test_bit(MLX5E_STATE_OPENED, &priv->state); + new_params = *cur_params; + new_params.lro_en = enable; - new_channels.params = *cur_params; - new_channels.params.lro_en = enable; - - if (cur_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) { + if (cur_params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) == - mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params, NULL)) + mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_params, NULL)) reset = false; } - if (!reset) { - struct mlx5e_params old_params; - - old_params = *cur_params; - *cur_params = new_channels.params; - err = mlx5e_modify_tirs_lro(priv); - if (err) - *cur_params = old_params; - goto out; - } - - err = mlx5e_safe_switch_channels(priv, &new_channels, - mlx5e_modify_tirs_lro_ctx, NULL); + err = mlx5e_safe_switch_params(priv, &new_params, + mlx5e_modify_tirs_lro_ctx, NULL, reset); out: mutex_unlock(&priv->state_lock); return err; @@ -4136,7 +3832,8 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, mutex_lock(&priv->state_lock); params = &priv->channels.params; - if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) { + if (!priv->fs.vlan || + !bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs.vlan), VLAN_N_VID)) { /* HW strips the outer C-tag header, this is a problem * for S-tag traffic. */ @@ -4205,26 +3902,23 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, mlx5e_fp_preactivate preactivate) { struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_channels new_channels = {}; + struct mlx5e_params new_params; struct mlx5e_params *params; + bool reset = true; int err = 0; - bool reset; mutex_lock(&priv->state_lock); params = &priv->channels.params; - reset = !params->lro_en; - reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state); - - new_channels.params = *params; - new_channels.params.sw_mtu = new_mtu; - err = mlx5e_validate_params(priv, &new_channels.params); + new_params = *params; + new_params.sw_mtu = new_mtu; + err = mlx5e_validate_params(priv->mdev, &new_params); if (err) goto out; if (params->xdp_prog && - !mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) { + !mlx5e_rx_is_linear_skb(&new_params, NULL)) { netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n", new_mtu, mlx5e_xdp_max_mtu(params, NULL)); err = -EINVAL; @@ -4233,47 +3927,34 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, if (priv->xsk.refcnt && !mlx5e_xsk_validate_mtu(netdev, &priv->channels, - &new_channels.params, priv->mdev)) { + &new_params, priv->mdev)) { err = -EINVAL; goto out; } + if (params->lro_en) + reset = false; + if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { - bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, - &new_channels.params, - NULL); + bool is_linear_old = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, params, NULL); + bool is_linear_new = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, + &new_params, NULL); u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params, NULL); - u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params, NULL); + u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_params, NULL); - /* If XSK is active, XSK RQs are linear. */ - is_linear |= priv->xsk.refcnt; - - /* Always reset in linear mode - hw_mtu is used in data path. */ - reset = reset && (is_linear || (ppw_old != ppw_new)); - } - - if (!reset) { - unsigned int old_mtu = params->sw_mtu; - - params->sw_mtu = new_mtu; - if (preactivate) { - err = preactivate(priv, NULL); - if (err) { - params->sw_mtu = old_mtu; - goto out; - } - } - netdev->mtu = params->sw_mtu; - goto out; + /* Always reset in linear mode - hw_mtu is used in data path. + * Check that the mode was non-linear and didn't change. + * If XSK is active, XSK RQs are linear. + */ + if (!is_linear_old && !is_linear_new && !priv->xsk.refcnt && + ppw_old == ppw_new) + reset = false; } - err = mlx5e_safe_switch_channels(priv, &new_channels, preactivate, NULL); - if (err) - goto out; - - netdev->mtu = new_channels.params.sw_mtu; + err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, reset); out: + netdev->mtu = params->sw_mtu; mutex_unlock(&priv->state_lock); return err; } @@ -4283,9 +3964,18 @@ static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu) return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx); } +int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx) +{ + bool set = *(bool *)ctx; + + return mlx5e_ptp_rx_manage_fs(priv, set); +} + int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) { + struct mlx5e_params new_params; struct hwtstamp_config config; + bool rx_cqe_compress_def; int err; if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) || @@ -4305,11 +3995,13 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) } mutex_lock(&priv->state_lock); + new_params = priv->channels.params; + rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def; + /* RX HW timestamp */ switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: - /* Reset CQE compression to Admin default */ - mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def); + new_params.ptp_rx = false; break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: @@ -4326,15 +4018,7 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: - /* Disable CQE compression */ - if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS)) - netdev_warn(priv->netdev, "Disabling RX cqe compression\n"); - err = mlx5e_modify_rx_cqe_compression_locked(priv, false); - if (err) { - netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err); - mutex_unlock(&priv->state_lock); - return err; - } + new_params.ptp_rx = rx_cqe_compress_def; config.rx_filter = HWTSTAMP_FILTER_ALL; break; default: @@ -4342,6 +4026,16 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) return -ERANGE; } + if (new_params.ptp_rx == priv->channels.params.ptp_rx) + goto out; + + err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx, + &new_params.ptp_rx, true); + if (err) { + mutex_unlock(&priv->state_lock); + return err; + } +out: memcpy(&priv->tstamp, &config, sizeof(config)); mutex_unlock(&priv->state_lock); @@ -4452,6 +4146,9 @@ static int mlx5e_set_vf_link_state(struct net_device *dev, int vf, struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; + if (mlx5e_is_uplink_rep(priv)) + return -EOPNOTSUPP; + return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1, mlx5_ifla_link2vport(link_state)); } @@ -4463,6 +4160,9 @@ int mlx5e_get_vf_config(struct net_device *dev, struct mlx5_core_dev *mdev = priv->mdev; int err; + if (!netif_device_present(dev)) + return -EOPNOTSUPP; + err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi); if (err) return err; @@ -4479,6 +4179,32 @@ int mlx5e_get_vf_stats(struct net_device *dev, return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1, vf_stats); } + +static bool +mlx5e_has_offload_stats(const struct net_device *dev, int attr_id) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + if (!netif_device_present(dev)) + return false; + + if (!mlx5e_is_uplink_rep(priv)) + return false; + + return mlx5e_rep_has_offload_stats(dev, attr_id); +} + +static int +mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, + void *sp) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + if (!mlx5e_is_uplink_rep(priv)) + return -EOPNOTSUPP; + + return mlx5e_rep_get_offload_stats(attr_id, dev, sp); +} #endif static bool mlx5e_tunnel_proto_supported_tx(struct mlx5_core_dev *mdev, u8 proto_type) @@ -4622,7 +4348,7 @@ static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue) static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) { struct net_device *netdev = priv->netdev; - struct mlx5e_channels new_channels = {}; + struct mlx5e_params new_params; if (priv->channels.params.lro_en) { netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n"); @@ -4635,16 +4361,16 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) return -EINVAL; } - new_channels.params = priv->channels.params; - new_channels.params.xdp_prog = prog; + new_params = priv->channels.params; + new_params.xdp_prog = prog; /* No XSK params: AF_XDP can't be enabled yet at the point of setting * the XDP program. */ - if (!mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) { + if (!mlx5e_rx_is_linear_skb(&new_params, NULL)) { netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n", - new_channels.params.sw_mtu, - mlx5e_xdp_max_mtu(&new_channels.params, NULL)); + new_params.sw_mtu, + mlx5e_xdp_max_mtu(&new_params, NULL)); return -EINVAL; } @@ -4664,9 +4390,10 @@ static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_params new_params; struct bpf_prog *old_prog; - bool reset, was_opened; int err = 0; + bool reset; int i; mutex_lock(&priv->state_lock); @@ -4677,46 +4404,29 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) goto unlock; } - was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); /* no need for full reset when exchanging programs */ reset = (!priv->channels.params.xdp_prog || !prog); - if (was_opened && !reset) - /* num_channels is invariant here, so we can take the - * batched reference right upfront. - */ - bpf_prog_add(prog, priv->channels.num); - - if (was_opened && reset) { - struct mlx5e_channels new_channels = {}; - - new_channels.params = priv->channels.params; - new_channels.params.xdp_prog = prog; - mlx5e_set_rq_type(priv->mdev, &new_channels.params); - old_prog = priv->channels.params.xdp_prog; + new_params = priv->channels.params; + new_params.xdp_prog = prog; + if (reset) + mlx5e_set_rq_type(priv->mdev, &new_params); + old_prog = priv->channels.params.xdp_prog; - err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); - if (err) - goto unlock; - } else { - /* exchange programs, extra prog reference we got from caller - * as long as we don't fail from this point onwards. - */ - old_prog = xchg(&priv->channels.params.xdp_prog, prog); - } + err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset); + if (err) + goto unlock; if (old_prog) bpf_prog_put(old_prog); - if (!was_opened && reset) /* change RQ type according to priv->xdp_prog */ - mlx5e_set_rq_type(priv->mdev, &priv->channels.params); - - if (!was_opened || reset) + if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset) goto unlock; /* exchanging programs w/o reset, we update ref counts on behalf * of the channels RQs here. */ + bpf_prog_add(prog, priv->channels.num); for (i = 0; i < priv->channels.num; i++) { struct mlx5e_channel *c = priv->channels.c[i]; @@ -4837,6 +4547,8 @@ const struct net_device_ops mlx5e_netdev_ops = { .ndo_get_vf_config = mlx5e_get_vf_config, .ndo_set_vf_link_state = mlx5e_set_vf_link_state, .ndo_get_vf_stats = mlx5e_get_vf_stats, + .ndo_has_offload_stats = mlx5e_has_offload_stats, + .ndo_get_offload_stats = mlx5e_get_offload_stats, #endif .ndo_get_devlink_port = mlx5e_get_devlink_port, }; @@ -4850,93 +4562,6 @@ void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, indirection_rqt[i] = i % num_channels; } -static bool slow_pci_heuristic(struct mlx5_core_dev *mdev) -{ - u32 link_speed = 0; - u32 pci_bw = 0; - - mlx5e_port_max_linkspeed(mdev, &link_speed); - pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL); - mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n", - link_speed, pci_bw); - -#define MLX5E_SLOW_PCI_RATIO (2) - - return link_speed && pci_bw && - link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw; -} - -static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode) -{ - struct dim_cq_moder moder; - - moder.cq_period_mode = cq_period_mode; - moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; - moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; - if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) - moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE; - - return moder; -} - -static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode) -{ - struct dim_cq_moder moder; - - moder.cq_period_mode = cq_period_mode; - moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; - moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; - if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) - moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; - - return moder; -} - -static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode) -{ - return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ? - DIM_CQ_PERIOD_MODE_START_FROM_CQE : - DIM_CQ_PERIOD_MODE_START_FROM_EQE; -} - -void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode) -{ - if (params->tx_dim_enabled) { - u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); - - params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode); - } else { - params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode); - } -} - -void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode) -{ - if (params->rx_dim_enabled) { - u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); - - params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode); - } else { - params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode); - } -} - -void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) -{ - mlx5e_reset_tx_moderation(params, cq_period_mode); - MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, - params->tx_cq_moderation.cq_period_mode == - MLX5_CQ_PERIOD_MODE_START_FROM_CQE); -} - -void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) -{ - mlx5e_reset_rx_moderation(params, cq_period_mode); - MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, - params->rx_cq_moderation.cq_period_mode == - MLX5_CQ_PERIOD_MODE_START_FROM_CQE); -} - static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) { int i; @@ -4949,25 +4574,6 @@ static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeo return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]); } -void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, - struct mlx5e_params *params) -{ - /* Prefer Striding RQ, unless any of the following holds: - * - Striding RQ configuration is not possible/supported. - * - Slow PCI heuristic. - * - Legacy RQ would use linear SKB while Striding RQ would use non-linear. - * - * No XSK params: checking the availability of striding RQ in general. - */ - if (!slow_pci_heuristic(mdev) && - mlx5e_striding_rq_possible(mdev, params) && - (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) || - !mlx5e_rx_is_linear_skb(params, NULL))) - MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true); - mlx5e_set_rq_type(mdev, params); - mlx5e_init_rq_type_params(mdev, params); -} - void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, u16 num_channels) { @@ -5283,6 +4889,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct devlink_port *dl_port; int err; mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu); @@ -5298,19 +4905,19 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, if (err) mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); - err = mlx5e_devlink_port_register(priv); - if (err) - mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err); - - mlx5e_health_create_reporters(priv); + dl_port = mlx5e_devlink_get_dl_port(priv); + if (dl_port->registered) + mlx5e_health_create_reporters(priv); return 0; } static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) { - mlx5e_health_destroy_reporters(priv); - mlx5e_devlink_port_unregister(priv); + struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv); + + if (dl_port->registered) + mlx5e_health_destroy_reporters(priv); mlx5e_tls_cleanup(priv); mlx5e_ipsec_cleanup(priv); } @@ -5318,6 +4925,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; + u16 max_nch = priv->max_nch; int err; mlx5e_create_q_counters(priv); @@ -5332,7 +4940,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) if (err) goto err_close_drop_rq; - err = mlx5e_create_direct_rqts(priv, priv->direct_tir); + err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch); if (err) goto err_destroy_indirect_rqts; @@ -5340,22 +4948,30 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) if (err) goto err_destroy_direct_rqts; - err = mlx5e_create_direct_tirs(priv, priv->direct_tir); + err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch); if (err) goto err_destroy_indirect_tirs; - err = mlx5e_create_direct_rqts(priv, priv->xsk_tir); + err = mlx5e_create_direct_rqts(priv, priv->xsk_tir, max_nch); if (unlikely(err)) goto err_destroy_direct_tirs; - err = mlx5e_create_direct_tirs(priv, priv->xsk_tir); + err = mlx5e_create_direct_tirs(priv, priv->xsk_tir, max_nch); if (unlikely(err)) goto err_destroy_xsk_rqts; + err = mlx5e_create_direct_rqts(priv, &priv->ptp_tir, 1); + if (err) + goto err_destroy_xsk_tirs; + + err = mlx5e_create_direct_tirs(priv, &priv->ptp_tir, 1); + if (err) + goto err_destroy_ptp_rqt; + err = mlx5e_create_flow_steering(priv); if (err) { mlx5_core_warn(mdev, "create flow steering failed, %d\n", err); - goto err_destroy_xsk_tirs; + goto err_destroy_ptp_direct_tir; } err = mlx5e_tc_nic_init(priv); @@ -5376,16 +4992,20 @@ err_tc_nic_cleanup: mlx5e_tc_nic_cleanup(priv); err_destroy_flow_steering: mlx5e_destroy_flow_steering(priv); +err_destroy_ptp_direct_tir: + mlx5e_destroy_direct_tirs(priv, &priv->ptp_tir, 1); +err_destroy_ptp_rqt: + mlx5e_destroy_direct_rqts(priv, &priv->ptp_tir, 1); err_destroy_xsk_tirs: - mlx5e_destroy_direct_tirs(priv, priv->xsk_tir); + mlx5e_destroy_direct_tirs(priv, priv->xsk_tir, max_nch); err_destroy_xsk_rqts: - mlx5e_destroy_direct_rqts(priv, priv->xsk_tir); + mlx5e_destroy_direct_rqts(priv, priv->xsk_tir, max_nch); err_destroy_direct_tirs: - mlx5e_destroy_direct_tirs(priv, priv->direct_tir); + mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch); err_destroy_indirect_tirs: mlx5e_destroy_indirect_tirs(priv); err_destroy_direct_rqts: - mlx5e_destroy_direct_rqts(priv, priv->direct_tir); + mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch); err_destroy_indirect_rqts: mlx5e_destroy_rqt(priv, &priv->indir_rqt); err_close_drop_rq: @@ -5397,14 +5017,18 @@ err_destroy_q_counters: static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) { + u16 max_nch = priv->max_nch; + mlx5e_accel_cleanup_rx(priv); mlx5e_tc_nic_cleanup(priv); mlx5e_destroy_flow_steering(priv); - mlx5e_destroy_direct_tirs(priv, priv->xsk_tir); - mlx5e_destroy_direct_rqts(priv, priv->xsk_tir); - mlx5e_destroy_direct_tirs(priv, priv->direct_tir); + mlx5e_destroy_direct_tirs(priv, &priv->ptp_tir, 1); + mlx5e_destroy_direct_rqts(priv, &priv->ptp_tir, 1); + mlx5e_destroy_direct_tirs(priv, priv->xsk_tir, max_nch); + mlx5e_destroy_direct_rqts(priv, priv->xsk_tir, max_nch); + mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch); mlx5e_destroy_indirect_tirs(priv); - mlx5e_destroy_direct_rqts(priv, priv->direct_tir); + mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch); mlx5e_destroy_rqt(priv, &priv->indir_rqt); mlx5e_close_drop_rq(&priv->drop_rq); mlx5e_destroy_q_counters(priv); @@ -5450,7 +5074,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) return; mlx5e_dcbnl_init_app(priv); - queue_work(priv->wq, &priv->set_rx_mode_work); + mlx5e_nic_set_rx_mode(priv); rtnl_lock(); if (netif_running(netdev)) @@ -5473,7 +5097,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) netif_device_detach(priv->netdev); rtnl_unlock(); - queue_work(priv->wq, &priv->set_rx_mode_work); + mlx5e_nic_set_rx_mode(priv); mlx5e_hv_vhca_stats_destroy(priv); if (mlx5e_monitor_counter_supported(priv)) @@ -5512,6 +5136,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = { .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK), .stats_grps = mlx5e_nic_stats_grps, .stats_grps_num = mlx5e_nic_stats_grps_num, + .rx_ptp_support = true, }; /* mlx5e generic netdev management API (move to en_common.c) */ @@ -5746,6 +5371,11 @@ rollback: return err; } +void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv) +{ + mlx5e_netdev_change_profile(priv, &mlx5e_nic_profile, NULL); +} + void mlx5e_destroy_netdev(struct mlx5e_priv *priv) { struct net_device *netdev = priv->netdev; @@ -5828,10 +5458,17 @@ static int mlx5e_probe(struct auxiliary_device *adev, priv->profile = profile; priv->ppriv = NULL; + + err = mlx5e_devlink_port_register(priv); + if (err) { + mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err); + goto err_destroy_netdev; + } + err = profile->init(mdev, netdev); if (err) { mlx5_core_err(mdev, "mlx5e_nic_profile init failed, %d\n", err); - goto err_destroy_netdev; + goto err_devlink_cleanup; } err = mlx5e_resume(adev); @@ -5849,12 +5486,15 @@ static int mlx5e_probe(struct auxiliary_device *adev, mlx5e_devlink_port_type_eth_set(priv); mlx5e_dcbnl_init_app(priv); + mlx5_uplink_netdev_set(mdev, netdev); return 0; err_resume: mlx5e_suspend(adev, state); err_profile_cleanup: profile->cleanup(priv); +err_devlink_cleanup: + mlx5e_devlink_port_unregister(priv); err_destroy_netdev: mlx5e_destroy_netdev(priv); return err; @@ -5869,6 +5509,7 @@ static void mlx5e_remove(struct auxiliary_device *adev) unregister_netdev(priv->netdev); mlx5e_suspend(adev, state); priv->profile->cleanup(priv); + mlx5e_devlink_port_unregister(priv); mlx5e_destroy_netdev(priv); } @@ -5894,18 +5535,18 @@ int mlx5e_init(void) mlx5e_ipsec_build_inverse_table(); mlx5e_build_ptys2ethtool_map(); - ret = mlx5e_rep_init(); + ret = auxiliary_driver_register(&mlx5e_driver); if (ret) return ret; - ret = auxiliary_driver_register(&mlx5e_driver); + ret = mlx5e_rep_init(); if (ret) - mlx5e_rep_cleanup(); + auxiliary_driver_unregister(&mlx5e_driver); return ret; } void mlx5e_cleanup(void) { - auxiliary_driver_unregister(&mlx5e_driver); mlx5e_rep_cleanup(); + auxiliary_driver_unregister(&mlx5e_driver); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 8d39bfee84a9..34eb1118670f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -40,17 +40,19 @@ #include "eswitch.h" #include "en.h" #include "en_rep.h" +#include "en/params.h" #include "en/txrx.h" #include "en_tc.h" #include "en/rep/tc.h" #include "en/rep/neigh.h" +#include "en/devlink.h" #include "fs_core.h" #include "lib/mlx5.h" #define CREATE_TRACE_POINTS #include "diag/en_rep_tracepoint.h" #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \ - max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) + max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1 static const char mlx5e_rep_driver_name[] = "mlx5e_rep"; @@ -69,16 +71,6 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev, fw_rev_sub(mdev), mdev->board_id); } -static void mlx5e_uplink_rep_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *drvinfo) -{ - struct mlx5e_priv *priv = netdev_priv(dev); - - mlx5e_rep_get_drvinfo(dev, drvinfo); - strlcpy(drvinfo->bus_info, pci_name(priv->mdev->pdev), - sizeof(drvinfo->bus_info)); -} - static const struct counter_desc sw_rep_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) }, @@ -285,46 +277,6 @@ static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev) return mlx5e_ethtool_get_rxfh_indir_size(priv); } -static void mlx5e_uplink_rep_get_pause_stats(struct net_device *netdev, - struct ethtool_pause_stats *stats) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - - mlx5e_stats_pause_get(priv, stats); -} - -static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pauseparam) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - - mlx5e_ethtool_get_pauseparam(priv, pauseparam); -} - -static int mlx5e_uplink_rep_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pauseparam) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - - return mlx5e_ethtool_set_pauseparam(priv, pauseparam); -} - -static int mlx5e_uplink_rep_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *link_ksettings) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - - return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings); -} - -static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev, - const struct ethtool_link_ksettings *link_ksettings) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - - return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings); -} - static const struct ethtool_ops mlx5e_rep_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | @@ -344,34 +296,6 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = { .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size, }; -static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = { - .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_MAX_FRAMES | - ETHTOOL_COALESCE_USE_ADAPTIVE, - .get_drvinfo = mlx5e_uplink_rep_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_strings = mlx5e_rep_get_strings, - .get_sset_count = mlx5e_rep_get_sset_count, - .get_ethtool_stats = mlx5e_rep_get_ethtool_stats, - .get_ringparam = mlx5e_rep_get_ringparam, - .set_ringparam = mlx5e_rep_set_ringparam, - .get_channels = mlx5e_rep_get_channels, - .set_channels = mlx5e_rep_set_channels, - .get_coalesce = mlx5e_rep_get_coalesce, - .set_coalesce = mlx5e_rep_set_coalesce, - .get_link_ksettings = mlx5e_uplink_rep_get_link_ksettings, - .set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings, - .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size, - .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size, - .get_rxfh = mlx5e_get_rxfh, - .set_rxfh = mlx5e_set_rxfh, - .get_rxnfc = mlx5e_get_rxnfc, - .set_rxnfc = mlx5e_set_rxnfc, - .get_pause_stats = mlx5e_uplink_rep_get_pause_stats, - .get_pauseparam = mlx5e_uplink_rep_get_pauseparam, - .set_pauseparam = mlx5e_uplink_rep_set_pauseparam, -}; - static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) { @@ -411,8 +335,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, } /* Add re-inject rule to the PF/representor sqs */ - flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, - rep->vport, + flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, rep, sqns_array[i]); if (IS_ERR(flow_rule)) { err = PTR_ERR(flow_rule); @@ -522,7 +445,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) return (rep->vport == MLX5_VPORT_UPLINK); } -static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id) +bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id) { switch (attr_id) { case IFLA_OFFLOAD_XSTATS_CPU_HIT: @@ -542,8 +465,8 @@ mlx5e_get_sw_stats64(const struct net_device *dev, return 0; } -static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev, - void *sp) +int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev, + void *sp) { switch (attr_id) { case IFLA_OFFLOAD_XSTATS_CPU_HIT: @@ -568,34 +491,6 @@ static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu) return mlx5e_change_mtu(netdev, new_mtu, NULL); } -static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu) -{ - return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx); -} - -static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr) -{ - struct sockaddr *saddr = addr; - - if (!is_valid_ether_addr(saddr->sa_data)) - return -EADDRNOTAVAIL; - - ether_addr_copy(netdev->dev_addr, saddr->sa_data); - return 0; -} - -static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, - __be16 vlan_proto) -{ - netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n"); - - if (vlan != 0) - return -EOPNOTSUPP; - - /* allow setting 0-vid for compatibility with libvirt */ - return 0; -} - static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -641,29 +536,10 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = { .ndo_change_carrier = mlx5e_rep_change_carrier, }; -static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = { - .ndo_open = mlx5e_open, - .ndo_stop = mlx5e_close, - .ndo_start_xmit = mlx5e_xmit, - .ndo_set_mac_address = mlx5e_uplink_rep_set_mac, - .ndo_setup_tc = mlx5e_rep_setup_tc, - .ndo_get_devlink_port = mlx5e_rep_get_devlink_port, - .ndo_get_stats64 = mlx5e_get_stats, - .ndo_has_offload_stats = mlx5e_rep_has_offload_stats, - .ndo_get_offload_stats = mlx5e_rep_get_offload_stats, - .ndo_change_mtu = mlx5e_uplink_rep_change_mtu, - .ndo_features_check = mlx5e_features_check, - .ndo_set_vf_mac = mlx5e_set_vf_mac, - .ndo_set_vf_rate = mlx5e_set_vf_rate, - .ndo_get_vf_config = mlx5e_get_vf_config, - .ndo_get_vf_stats = mlx5e_get_vf_stats, - .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan, - .ndo_set_features = mlx5e_set_features, -}; - bool mlx5e_eswitch_uplink_rep(struct net_device *netdev) { - return netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep; + return netdev->netdev_ops == &mlx5e_netdev_ops && + mlx5e_is_uplink_rep(netdev_priv(netdev)); } bool mlx5e_eswitch_vf_rep(struct net_device *netdev) @@ -713,26 +589,15 @@ static void mlx5e_build_rep_params(struct net_device *netdev) } static void mlx5e_build_rep_netdev(struct net_device *netdev, - struct mlx5_core_dev *mdev, - struct mlx5_eswitch_rep *rep) + struct mlx5_core_dev *mdev) { SET_NETDEV_DEV(netdev, mdev->device); - if (rep->vport == MLX5_VPORT_UPLINK) { - netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep; - /* we want a persistent mac for the uplink rep */ - mlx5_query_mac_address(mdev, netdev->dev_addr); - netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops; - mlx5e_dcbnl_build_rep_netdev(netdev); - } else { - netdev->netdev_ops = &mlx5e_netdev_ops_rep; - eth_hw_addr_random(netdev); - netdev->ethtool_ops = &mlx5e_rep_ethtool_ops; - } + netdev->netdev_ops = &mlx5e_netdev_ops_rep; + eth_hw_addr_random(netdev); + netdev->ethtool_ops = &mlx5e_rep_ethtool_ops; netdev->watchdog_timeo = 15 * HZ; - netdev->features |= NETIF_F_NETNS_LOCAL; - #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) netdev->hw_features |= NETIF_F_HW_TC; #endif @@ -744,12 +609,9 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev, netdev->hw_features |= NETIF_F_TSO6; netdev->hw_features |= NETIF_F_RXCSUM; - if (rep->vport == MLX5_VPORT_UPLINK) - netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; - else - netdev->features |= NETIF_F_VLAN_CHALLENGED; - netdev->features |= netdev->hw_features; + netdev->features |= NETIF_F_VLAN_CHALLENGED; + netdev->features |= NETIF_F_NETNS_LOCAL; } static int mlx5e_init_rep(struct mlx5_core_dev *mdev, @@ -890,6 +752,7 @@ int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup) static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; + u16 max_nch = priv->max_nch; int err; mlx5e_init_l2_addr(priv); @@ -904,7 +767,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) if (err) goto err_close_drop_rq; - err = mlx5e_create_direct_rqts(priv, priv->direct_tir); + err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch); if (err) goto err_destroy_indirect_rqts; @@ -912,7 +775,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) if (err) goto err_destroy_direct_rqts; - err = mlx5e_create_direct_tirs(priv, priv->direct_tir); + err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch); if (err) goto err_destroy_indirect_tirs; @@ -937,11 +800,11 @@ err_destroy_root_ft: err_destroy_ttc_table: mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); err_destroy_direct_tirs: - mlx5e_destroy_direct_tirs(priv, priv->direct_tir); + mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch); err_destroy_indirect_tirs: mlx5e_destroy_indirect_tirs(priv); err_destroy_direct_rqts: - mlx5e_destroy_direct_rqts(priv, priv->direct_tir); + mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch); err_destroy_indirect_rqts: mlx5e_destroy_rqt(priv, &priv->indir_rqt); err_close_drop_rq: @@ -951,13 +814,15 @@ err_close_drop_rq: static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) { + u16 max_nch = priv->max_nch; + mlx5e_ethtool_cleanup_steering(priv); rep_vport_rx_rule_destroy(priv); mlx5e_destroy_rep_root_ft(priv); mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); - mlx5e_destroy_direct_tirs(priv, priv->direct_tir); + mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch); mlx5e_destroy_indirect_tirs(priv); - mlx5e_destroy_direct_rqts(priv, priv->direct_tir); + mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch); mlx5e_destroy_rqt(priv, &priv->indir_rqt); mlx5e_close_drop_rq(&priv->drop_rq); } @@ -1116,6 +981,14 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) mlx5e_dcbnl_initialize(priv); mlx5e_dcbnl_init_app(priv); mlx5e_rep_neigh_init(rpriv); + + netdev->wanted_features |= NETIF_F_HW_TC; + + rtnl_lock(); + if (netif_running(netdev)) + mlx5e_open(netdev); + netif_device_attach(netdev); + rtnl_unlock(); } static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv) @@ -1123,6 +996,12 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv) struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_core_dev *mdev = priv->mdev; + rtnl_lock(); + if (netif_running(priv->netdev)) + mlx5e_close(priv->netdev); + netif_device_detach(priv->netdev); + rtnl_unlock(); + mlx5e_rep_neigh_cleanup(rpriv); mlx5e_dcbnl_delete_app(priv); mlx5_notifier_unregister(mdev, &priv->events_nb); @@ -1183,6 +1062,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = { .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), .stats_grps = mlx5e_rep_stats_grps, .stats_grps_num = mlx5e_rep_stats_grps_num, + .rx_ptp_support = false, }; static const struct mlx5e_profile mlx5e_uplink_rep_profile = { @@ -1199,33 +1079,65 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = { .update_carrier = mlx5e_update_carrier, .rx_handlers = &mlx5e_rx_handlers_rep, .max_tc = MLX5E_MAX_NUM_TC, - .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), + /* XSK is needed so we can replace profile with NIC netdev */ + .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK), .stats_grps = mlx5e_ul_rep_stats_grps, .stats_grps_num = mlx5e_ul_rep_stats_grps_num, + .rx_ptp_support = false, }; /* e-Switch vport representors */ static int -mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) +mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) +{ + struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev)); + struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); + struct devlink_port *dl_port; + int err; + + rpriv->netdev = priv->netdev; + + err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile, + rpriv); + if (err) + return err; + + dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport); + if (dl_port) + devlink_port_type_eth_set(dl_port, rpriv->netdev); + + return 0; +} + +static void +mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv) +{ + struct net_device *netdev = rpriv->netdev; + struct devlink_port *dl_port; + struct mlx5_core_dev *dev; + struct mlx5e_priv *priv; + + priv = netdev_priv(netdev); + dev = priv->mdev; + + dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport); + if (dl_port) + devlink_port_type_clear(dl_port); + mlx5e_netdev_attach_nic_profile(priv); +} + +static int +mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) { + struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); const struct mlx5e_profile *profile; - struct mlx5e_rep_priv *rpriv; struct devlink_port *dl_port; struct net_device *netdev; struct mlx5e_priv *priv; unsigned int txqs, rxqs; int nch, err; - rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL); - if (!rpriv) - return -ENOMEM; - - /* rpriv->rep to be looked up when profile->init() is called */ - rpriv->rep = rep; - - profile = (rep->vport == MLX5_VPORT_UPLINK) ? - &mlx5e_uplink_rep_profile : &mlx5e_rep_profile; - + profile = &mlx5e_rep_profile; nch = mlx5e_get_max_num_channels(dev); txqs = nch * profile->max_tc; rxqs = nch * profile->rq_groups; @@ -1234,21 +1146,11 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) mlx5_core_warn(dev, "Failed to create representor netdev for vport %d\n", rep->vport); - kfree(rpriv); return -EINVAL; } - mlx5e_build_rep_netdev(netdev, dev, rep); - + mlx5e_build_rep_netdev(netdev, dev); rpriv->netdev = netdev; - rep->rep_data[REP_ETH].priv = rpriv; - INIT_LIST_HEAD(&rpriv->vport_sqs_list); - - if (rep->vport == MLX5_VPORT_UPLINK) { - err = mlx5e_create_mdev_resources(dev); - if (err) - goto err_destroy_netdev; - } priv = netdev_priv(netdev); priv->profile = profile; @@ -1256,7 +1158,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) err = profile->init(dev, netdev); if (err) { netdev_warn(netdev, "rep profile init failed, %d\n", err); - goto err_destroy_mdev_resources; + goto err_destroy_netdev; } err = mlx5e_attach_netdev(netdev_priv(netdev)); @@ -1286,13 +1188,34 @@ err_detach_netdev: err_cleanup_profile: priv->profile->cleanup(priv); -err_destroy_mdev_resources: - if (rep->vport == MLX5_VPORT_UPLINK) - mlx5e_destroy_mdev_resources(dev); - err_destroy_netdev: mlx5e_destroy_netdev(netdev_priv(netdev)); - kfree(rpriv); + return err; +} + +static int +mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) +{ + struct mlx5e_rep_priv *rpriv; + int err; + + rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL); + if (!rpriv) + return -ENOMEM; + + /* rpriv->rep to be looked up when profile->init() is called */ + rpriv->rep = rep; + rep->rep_data[REP_ETH].priv = rpriv; + INIT_LIST_HEAD(&rpriv->vport_sqs_list); + + if (rep->vport == MLX5_VPORT_UPLINK) + err = mlx5e_vport_uplink_rep_load(dev, rep); + else + err = mlx5e_vport_vf_rep_load(dev, rep); + + if (err) + kfree(rpriv); + return err; } @@ -1306,15 +1229,19 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep) struct devlink_port *dl_port; void *ppriv = priv->ppriv; + if (rep->vport == MLX5_VPORT_UPLINK) { + mlx5e_vport_uplink_rep_unload(rpriv); + goto free_ppriv; + } + dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport); if (dl_port) devlink_port_type_clear(dl_port); unregister_netdev(netdev); mlx5e_detach_netdev(priv); priv->profile->cleanup(priv); - if (rep->vport == MLX5_VPORT_UPLINK) - mlx5e_destroy_mdev_resources(priv->mdev); mlx5e_destroy_netdev(priv); +free_ppriv: kfree(ppriv); /* mlx5e_rep_priv */ } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index d1696404cca9..22585015c7a7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -89,6 +89,7 @@ struct mlx5_rep_uplink_priv { struct mapping_ctx *tunnel_enc_opts_mapping; struct mlx5_tc_ct_priv *ct_priv; + struct mlx5_esw_psample *esw_psample; /* support eswitch vports bonding */ struct mlx5e_rep_bond *bond; @@ -220,6 +221,10 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw, const struct net_device *lag_dev); int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup); +bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id); +int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev, + void *sp); + bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv); int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv); void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv); @@ -240,6 +245,11 @@ static inline int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) { return 0; } static inline void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) {} static inline int mlx5e_rep_init(void) { return 0; }; static inline void mlx5e_rep_cleanup(void) {}; +static inline bool mlx5e_rep_has_offload_stats(const struct net_device *dev, + int attr_id) { return false; } +static inline int mlx5e_rep_get_offload_stats(int attr_id, + const struct net_device *dev, + void *sp) { return -EOPNOTSUPP; } #endif static inline bool mlx5e_is_vport_rep(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 249d8905e644..f90894eea9e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -52,6 +52,7 @@ #include "en/health.h" #include "en/params.h" #include "devlink.h" +#include "en/devlink.h" static struct sk_buff * mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, @@ -669,6 +670,7 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) get_cqe_opcode(cqe)); mlx5e_dump_error_cqe(&sq->cq, sq->sqn, (struct mlx5_err_cqe *)cqe); + mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) queue_work(cq->priv->wq, &sq->recover_work); break; @@ -1822,6 +1824,7 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe struct mlx5e_priv *priv = netdev_priv(rq->netdev); struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; + struct devlink_port *dl_port; struct sk_buff *skb; u32 cqe_bcnt; u16 trap_id; @@ -1844,7 +1847,8 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); skb_push(skb, ETH_HLEN); - mlx5_devlink_trap_report(rq->mdev, trap_id, skb, &priv->dl_port); + dl_port = mlx5e_devlink_get_dl_port(priv); + mlx5_devlink_trap_report(rq->mdev, trap_id, skb, dl_port); dev_kfree_skb_any(skb); free_wqe: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 88a01c59ce61..e4f5b6395148 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -184,6 +184,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_retry) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) }, #endif @@ -344,6 +345,7 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s, s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end; s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip; s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok; + s->rx_tls_resync_res_retry += rq_stats->tls_resync_res_retry; s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip; s->rx_tls_err += rq_stats->tls_err; #endif @@ -401,13 +403,21 @@ static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv, { int i; - if (!priv->port_ptp_opened) + if (!priv->tx_ptp_opened && !priv->rx_ptp_opened) return; - mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->port_ptp_stats.ch); + mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->ptp_stats.ch); - for (i = 0; i < priv->max_opened_tc; i++) { - mlx5e_stats_grp_sw_update_stats_sq(s, &priv->port_ptp_stats.sq[i]); + if (priv->tx_ptp_opened) { + for (i = 0; i < priv->max_opened_tc; i++) { + mlx5e_stats_grp_sw_update_stats_sq(s, &priv->ptp_stats.sq[i]); + + /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ + barrier(); + } + } + if (priv->rx_ptp_opened) { + mlx5e_stats_grp_sw_update_stats_rq_stats(s, &priv->ptp_stats.rq); /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ barrier(); @@ -760,35 +770,112 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3) mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); } -#define MLX5E_READ_CTR64_BE_F(ptr, c) \ +#define MLX5E_READ_CTR64_BE_F(ptr, set, c) \ be64_to_cpu(*(__be64 *)((char *)ptr + \ MLX5_BYTE_OFF(ppcnt_reg, \ - counter_set.eth_802_3_cntrs_grp_data_layout.c##_high))) + counter_set.set.c##_high))) -void mlx5e_stats_pause_get(struct mlx5e_priv *priv, - struct ethtool_pause_stats *pause_stats) +static int mlx5e_stats_get_ieee(struct mlx5_core_dev *mdev, + u32 *ppcnt_ieee_802_3) { - u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; - struct mlx5_core_dev *mdev = priv->mdev; u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) - return; + return -EOPNOTSUPP; MLX5_SET(ppcnt_reg, in, local_port, 1); MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); - mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3, - sz, MLX5_REG_PPCNT, 0, 0); + return mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3, + sz, MLX5_REG_PPCNT, 0, 0); +} + +void mlx5e_stats_pause_get(struct mlx5e_priv *priv, + struct ethtool_pause_stats *pause_stats) +{ + u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; + struct mlx5_core_dev *mdev = priv->mdev; + + if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3)) + return; pause_stats->tx_pause_frames = MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, + eth_802_3_cntrs_grp_data_layout, a_pause_mac_ctrl_frames_transmitted); pause_stats->rx_pause_frames = MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, + eth_802_3_cntrs_grp_data_layout, a_pause_mac_ctrl_frames_received); } +void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv, + struct ethtool_eth_phy_stats *phy_stats) +{ + u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; + struct mlx5_core_dev *mdev = priv->mdev; + + if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3)) + return; + + phy_stats->SymbolErrorDuringCarrier = + MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, + eth_802_3_cntrs_grp_data_layout, + a_symbol_error_during_carrier); +} + +void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv, + struct ethtool_eth_mac_stats *mac_stats) +{ + u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; + struct mlx5_core_dev *mdev = priv->mdev; + + if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3)) + return; + +#define RD(name) \ + MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, \ + eth_802_3_cntrs_grp_data_layout, \ + name) + + mac_stats->FramesTransmittedOK = RD(a_frames_transmitted_ok); + mac_stats->FramesReceivedOK = RD(a_frames_received_ok); + mac_stats->FrameCheckSequenceErrors = RD(a_frame_check_sequence_errors); + mac_stats->OctetsTransmittedOK = RD(a_octets_transmitted_ok); + mac_stats->OctetsReceivedOK = RD(a_octets_received_ok); + mac_stats->MulticastFramesXmittedOK = RD(a_multicast_frames_xmitted_ok); + mac_stats->BroadcastFramesXmittedOK = RD(a_broadcast_frames_xmitted_ok); + mac_stats->MulticastFramesReceivedOK = RD(a_multicast_frames_received_ok); + mac_stats->BroadcastFramesReceivedOK = RD(a_broadcast_frames_received_ok); + mac_stats->InRangeLengthErrors = RD(a_in_range_length_errors); + mac_stats->OutOfRangeLengthField = RD(a_out_of_range_length_field); + mac_stats->FrameTooLongErrors = RD(a_frame_too_long_errors); +#undef RD +} + +void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; + struct mlx5_core_dev *mdev = priv->mdev; + + if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3)) + return; + + ctrl_stats->MACControlFramesTransmitted = + MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, + eth_802_3_cntrs_grp_data_layout, + a_mac_control_frames_transmitted); + ctrl_stats->MACControlFramesReceived = + MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, + eth_802_3_cntrs_grp_data_layout, + a_mac_control_frames_received); + ctrl_stats->UnsupportedOpcodesReceived = + MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, + eth_802_3_cntrs_grp_data_layout, + a_unsupported_opcodes_received); +} + #define PPORT_2863_OFF(c) \ MLX5_BYTE_OFF(ppcnt_reg, \ counter_set.eth_2863_cntrs_grp_data_layout.c##_high) @@ -900,6 +987,59 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819) mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); } +static const struct ethtool_rmon_hist_range mlx5e_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 2047 }, + { 2048, 4095 }, + { 4096, 8191 }, + { 8192, 10239 }, + {} +}; + +void mlx5e_stats_rmon_get(struct mlx5e_priv *priv, + struct ethtool_rmon_stats *rmon, + const struct ethtool_rmon_hist_range **ranges) +{ + u32 ppcnt_RFC_2819_counters[MLX5_ST_SZ_DW(ppcnt_reg)]; + struct mlx5_core_dev *mdev = priv->mdev; + u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; + int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); + + MLX5_SET(ppcnt_reg, in, local_port, 1); + MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); + if (mlx5_core_access_reg(mdev, in, sz, ppcnt_RFC_2819_counters, + sz, MLX5_REG_PPCNT, 0, 0)) + return; + +#define RD(name) \ + MLX5E_READ_CTR64_BE_F(ppcnt_RFC_2819_counters, \ + eth_2819_cntrs_grp_data_layout, \ + name) + + rmon->undersize_pkts = RD(ether_stats_undersize_pkts); + rmon->fragments = RD(ether_stats_fragments); + rmon->jabbers = RD(ether_stats_jabbers); + + rmon->hist[0] = RD(ether_stats_pkts64octets); + rmon->hist[1] = RD(ether_stats_pkts65to127octets); + rmon->hist[2] = RD(ether_stats_pkts128to255octets); + rmon->hist[3] = RD(ether_stats_pkts256to511octets); + rmon->hist[4] = RD(ether_stats_pkts512to1023octets); + rmon->hist[5] = RD(ether_stats_pkts1024to1518octets); + rmon->hist[6] = RD(ether_stats_pkts1519to2047octets); + rmon->hist[7] = RD(ether_stats_pkts2048to4095octets); + rmon->hist[8] = RD(ether_stats_pkts4096to8191octets); + rmon->hist[9] = RD(ether_stats_pkts8192to10239octets); +#undef RD + + *ranges = mlx5e_rmon_ranges; +} + #define PPORT_PHY_STATISTICAL_OFF(c) \ MLX5_BYTE_OFF(ppcnt_reg, \ counter_set.phys_layer_statistical_cntrs.c##_high) @@ -1007,6 +1147,29 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy) mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); } +void mlx5e_stats_fec_get(struct mlx5e_priv *priv, + struct ethtool_fec_stats *fec_stats) +{ + u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)]; + struct mlx5_core_dev *mdev = priv->mdev; + u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; + int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); + + if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) + return; + + MLX5_SET(ppcnt_reg, in, local_port, 1); + MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); + if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical, + sz, MLX5_REG_PPCNT, 0, 0)) + return; + + fec_stats->corrected_bits.total = + MLX5E_READ_CTR64_BE_F(ppcnt_phy_statistical, + phys_layer_statistical_cntrs, + phy_corrected_bits); +} + #define PPORT_ETH_EXT_OFF(c) \ MLX5_BYTE_OFF(ppcnt_reg, \ counter_set.eth_extended_cntrs_grp_data_layout.c##_high) @@ -1621,6 +1784,7 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_retry) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) }, #endif @@ -1751,6 +1915,38 @@ static const struct counter_desc ptp_cq_stats_desc[] = { { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) }, }; +static const struct counter_desc ptp_rq_stats_desc[] = { + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, packets) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, bytes) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_none) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_drop) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_redirect) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_packets) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_bytes) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, ecn_mark) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, wqe_err) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_reuse) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_full) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_empty) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_busy) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_waive) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, arfs_err) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) }, +}; + static const struct counter_desc qos_sq_stats_desc[] = { { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) }, { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) }, @@ -1795,6 +1991,7 @@ static const struct counter_desc qos_sq_stats_desc[] = { #define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc) #define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc) #define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc) +#define NUM_PTP_RQ_STATS ARRAY_SIZE(ptp_rq_stats_desc) #define NUM_QOS_SQ_STATS ARRAY_SIZE(qos_sq_stats_desc) static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos) @@ -1841,32 +2038,46 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; } static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp) { - return priv->port_ptp_opened ? - NUM_PTP_CH_STATS + - ((NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc) : - 0; + int num = NUM_PTP_CH_STATS; + + if (!priv->tx_ptp_opened && !priv->rx_ptp_opened) + return 0; + + if (priv->tx_ptp_opened) + num += (NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc; + if (priv->rx_ptp_opened) + num += NUM_PTP_RQ_STATS; + + return num; } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp) { int i, tc; - if (!priv->port_ptp_opened) + if (!priv->tx_ptp_opened && !priv->rx_ptp_opened) return idx; for (i = 0; i < NUM_PTP_CH_STATS; i++) sprintf(data + (idx++) * ETH_GSTRING_LEN, ptp_ch_stats_desc[i].format); - for (tc = 0; tc < priv->max_opened_tc; tc++) - for (i = 0; i < NUM_PTP_SQ_STATS; i++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, - ptp_sq_stats_desc[i].format, tc); + if (priv->tx_ptp_opened) { + for (tc = 0; tc < priv->max_opened_tc; tc++) + for (i = 0; i < NUM_PTP_SQ_STATS; i++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + ptp_sq_stats_desc[i].format, tc); - for (tc = 0; tc < priv->max_opened_tc; tc++) - for (i = 0; i < NUM_PTP_CQ_STATS; i++) + for (tc = 0; tc < priv->max_opened_tc; tc++) + for (i = 0; i < NUM_PTP_CQ_STATS; i++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + ptp_cq_stats_desc[i].format, tc); + } + if (priv->rx_ptp_opened) { + for (i = 0; i < NUM_PTP_RQ_STATS; i++) sprintf(data + (idx++) * ETH_GSTRING_LEN, - ptp_cq_stats_desc[i].format, tc); + ptp_rq_stats_desc[i].format); + } return idx; } @@ -1874,26 +2085,33 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp) { int i, tc; - if (!priv->port_ptp_opened) + if (!priv->tx_ptp_opened && !priv->rx_ptp_opened) return idx; for (i = 0; i < NUM_PTP_CH_STATS; i++) data[idx++] = - MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.ch, + MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch, ptp_ch_stats_desc, i); - for (tc = 0; tc < priv->max_opened_tc; tc++) - for (i = 0; i < NUM_PTP_SQ_STATS; i++) - data[idx++] = - MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.sq[tc], - ptp_sq_stats_desc, i); + if (priv->tx_ptp_opened) { + for (tc = 0; tc < priv->max_opened_tc; tc++) + for (i = 0; i < NUM_PTP_SQ_STATS; i++) + data[idx++] = + MLX5E_READ_CTR64_CPU(&priv->ptp_stats.sq[tc], + ptp_sq_stats_desc, i); - for (tc = 0; tc < priv->max_opened_tc; tc++) - for (i = 0; i < NUM_PTP_CQ_STATS; i++) + for (tc = 0; tc < priv->max_opened_tc; tc++) + for (i = 0; i < NUM_PTP_CQ_STATS; i++) + data[idx++] = + MLX5E_READ_CTR64_CPU(&priv->ptp_stats.cq[tc], + ptp_cq_stats_desc, i); + } + if (priv->rx_ptp_opened) { + for (i = 0; i < NUM_PTP_RQ_STATS; i++) data[idx++] = - MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.cq[tc], - ptp_cq_stats_desc, i); - + MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq, + ptp_rq_stats_desc, i); + } return idx; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index adf9b7b8b712..139e59f30db0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -54,6 +54,7 @@ #define MLX5E_DECLARE_PTP_TX_STAT(type, fld) "ptp_tx%d_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_PTP_CH_STAT(type, fld) "ptp_ch_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_PTP_CQ_STAT(type, fld) "ptp_cq%d_"#fld, offsetof(type, fld) +#define MLX5E_DECLARE_PTP_RQ_STAT(type, fld) "ptp_rq%d_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_QOS_TX_STAT(type, fld) "qos_tx%d_"#fld, offsetof(type, fld) @@ -113,6 +114,18 @@ void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv); void mlx5e_stats_pause_get(struct mlx5e_priv *priv, struct ethtool_pause_stats *pause_stats); +void mlx5e_stats_fec_get(struct mlx5e_priv *priv, + struct ethtool_fec_stats *fec_stats); + +void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv, + struct ethtool_eth_phy_stats *phy_stats); +void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv, + struct ethtool_eth_mac_stats *mac_stats); +void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv, + struct ethtool_eth_ctrl_stats *ctrl_stats); +void mlx5e_stats_rmon_get(struct mlx5e_priv *priv, + struct ethtool_rmon_stats *rmon, + const struct ethtool_rmon_hist_range **ranges); /* Concrete NIC Stats */ @@ -206,6 +219,7 @@ struct mlx5e_sw_stats { u64 rx_tls_resync_req_end; u64 rx_tls_resync_req_skip; u64 rx_tls_resync_res_ok; + u64 rx_tls_resync_res_retry; u64 rx_tls_resync_res_skip; u64 rx_tls_err; #endif @@ -336,6 +350,7 @@ struct mlx5e_rq_stats { u64 tls_resync_req_end; u64 tls_resync_req_skip; u64 tls_resync_res_ok; + u64 tls_resync_res_retry; u64 tls_resync_res_skip; u64 tls_err; #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index d675107d9eca..47a9c49b25fd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -47,6 +47,7 @@ #include <net/tc_act/tc_pedit.h> #include <net/tc_act/tc_csum.h> #include <net/tc_act/tc_mpls.h> +#include <net/psample.h> #include <net/arp.h> #include <net/ipv6_stubs.h> #include <net/bareudp.h> @@ -65,6 +66,7 @@ #include "en/mod_hdr.h" #include "en/tc_priv.h" #include "en/tc_tun_encap.h" +#include "esw/sample.h" #include "lib/devcom.h" #include "lib/geneve.h" #include "lib/fs_chains.h" @@ -221,6 +223,25 @@ get_ct_priv(struct mlx5e_priv *priv) return priv->fs.tc.ct; } +#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) +static struct mlx5_esw_psample * +get_sample_priv(struct mlx5e_priv *priv) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_rep_uplink_priv *uplink_priv; + struct mlx5e_rep_priv *uplink_rpriv; + + if (is_mdev_switchdev_mode(priv->mdev)) { + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + uplink_priv = &uplink_rpriv->uplink_priv; + + return uplink_priv->esw_psample; + } + + return NULL; +} +#endif + struct mlx5_flow_handle * mlx5_tc_rule_insert(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, @@ -445,11 +466,15 @@ static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp) mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn); } -static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc) +static int mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc) { - u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn; struct mlx5e_priv *priv = hp->func_priv; int i, ix, sz = MLX5E_INDIR_RQT_SIZE; + u32 *indirection_rqt, rqn; + + indirection_rqt = kcalloc(sz, sizeof(*indirection_rqt), GFP_KERNEL); + if (!indirection_rqt) + return -ENOMEM; mlx5e_build_default_indir_rqt(indirection_rqt, sz, hp->num_channels); @@ -462,6 +487,9 @@ static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc) rqn = hp->pair->rqn[ix]; MLX5_SET(rqtc, rqtc, rq_num[i], rqn); } + + kfree(indirection_rqt); + return 0; } static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp) @@ -482,12 +510,15 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp) MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); MLX5_SET(rqtc, rqtc, rqt_max_size, sz); - mlx5e_hairpin_fill_rqt_rqns(hp, rqtc); + err = mlx5e_hairpin_fill_rqt_rqns(hp, rqtc); + if (err) + goto out; err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn); if (!err) hp->indir_rqt.enabled = true; +out: kvfree(in); return err; } @@ -896,7 +927,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv, if (IS_ERR(dest[dest_ix].ft)) return ERR_CAST(dest[dest_ix].ft); } else { - dest[dest_ix].ft = priv->fs.vlan.ft.t; + dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs.vlan); } dest_ix++; } @@ -1077,19 +1108,27 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, if (flow_flag_test(flow, CT)) { mod_hdr_acts = &attr->parse_attr->mod_hdr_acts; - return mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv), + rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv), flow, spec, attr, mod_hdr_acts); +#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) + } else if (flow_flag_test(flow, SAMPLE)) { + rule = mlx5_esw_sample_offload(get_sample_priv(flow->priv), spec, attr); +#endif + } else { + rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr); } - rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr); if (IS_ERR(rule)) return rule; if (attr->esw_attr->split_count) { flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr); if (IS_ERR(flow->rule[1])) { - mlx5_eswitch_del_offloaded_rule(esw, rule, attr); + if (flow_flag_test(flow, CT)) + mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr); + else + mlx5_eswitch_del_offloaded_rule(esw, rule, attr); return flow->rule[1]; } } @@ -1111,6 +1150,13 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, return; } +#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) + if (flow_flag_test(flow, SAMPLE)) { + mlx5_esw_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr); + return; + } +#endif + if (attr->esw_attr->split_count) mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr); @@ -1467,6 +1513,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, if (flow_flag_test(flow, L3_TO_L2_DECAP)) mlx5e_detach_decap(priv, flow); + kfree(flow->attr->esw_attr->sample); kfree(flow->attr); } @@ -1950,6 +1997,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, misc_parameters); void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); + void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters_3); + void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters_3); struct flow_rule *rule = flow_cls_offload_flow_rule(f); struct flow_dissector *dissector = rule->match.dissector; u16 addr_type = 0; @@ -1979,6 +2030,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, BIT(FLOW_DISSECTOR_KEY_CT) | BIT(FLOW_DISSECTOR_KEY_ENC_IP) | BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | + BIT(FLOW_DISSECTOR_KEY_ICMP) | BIT(FLOW_DISSECTOR_KEY_MPLS))) { NL_SET_ERR_MSG_MOD(extack, "Unsupported key"); netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n", @@ -2298,7 +2350,49 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, if (match.mask->flags) *match_level = MLX5_MATCH_L4; } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) { + struct flow_match_icmp match; + flow_rule_match_icmp(rule, &match); + switch (ip_proto) { + case IPPROTO_ICMP: + if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & + MLX5_FLEX_PROTO_ICMP)) + return -EOPNOTSUPP; + MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type, + match.mask->type); + MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type, + match.key->type); + MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code, + match.mask->code); + MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code, + match.key->code); + break; + case IPPROTO_ICMPV6: + if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & + MLX5_FLEX_PROTO_ICMPV6)) + return -EOPNOTSUPP; + MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type, + match.mask->type); + MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type, + match.key->type); + MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code, + match.mask->code); + MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code, + match.key->code); + break; + default: + NL_SET_ERR_MSG_MOD(extack, + "Code and type matching only with ICMP and ICMPv6"); + netdev_err(priv->netdev, + "Code and type matching only with ICMP and ICMPv6\n"); + return -EINVAL; + } + if (match.mask->code || match.mask->type) { + *match_level = MLX5_MATCH_L4; + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3; + } + } /* Currenlty supported only for MPLS over UDP */ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) && !netif_is_bareudp(filter_dev)) { @@ -3014,7 +3108,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv, actions = flow->attr->action; if (mlx5e_is_eswitch_flow(flow)) { - if (flow->attr->esw_attr->split_count && ct_flow) { + if (flow->attr->esw_attr->split_count && ct_flow && + !MLX5_CAP_GEN(flow->attr->esw_attr->in_mdev, reg_c_preserve)) { /* All registers used by ct are cleared when using * split rules. */ @@ -3052,6 +3147,13 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) return (fsystem_guid == psystem_guid); } +static bool same_vf_reps(struct mlx5e_priv *priv, + struct net_device *out_dev) +{ + return mlx5e_eswitch_vf_rep(priv->netdev) && + priv->netdev == out_dev; +} + static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace, const struct flow_action_entry *act, struct mlx5e_tc_flow_parse_attr *parse_attr, @@ -3561,6 +3663,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, bool ft_flow = mlx5e_is_ft_flow(flow); const struct flow_action_entry *act; struct mlx5_esw_flow_attr *esw_attr; + struct mlx5_sample_attr sample = {}; bool encap = false, decap = false; u32 action = attr->action; int err, i, if_count = 0; @@ -3737,6 +3840,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, return -EOPNOTSUPP; } + if (same_vf_reps(priv, out_dev)) { + NL_SET_ERR_MSG_MOD(extack, + "can't forward from a VF to itself"); + return -EOPNOTSUPP; + } + out_priv = netdev_priv(out_dev); rpriv = out_priv->ppriv; esw_attr->dests[esw_attr->out_count].rep = rpriv->rep; @@ -3809,11 +3918,27 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, attr->dest_chain = act->chain_index; break; case FLOW_ACTION_CT: + if (flow_flag_test(flow, SAMPLE)) { + NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported"); + return -EOPNOTSUPP; + } err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack); if (err) return err; flow_flag_set(flow, CT); + esw_attr->split_count = esw_attr->out_count; + break; + case FLOW_ACTION_SAMPLE: + if (flow_flag_test(flow, CT)) { + NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported"); + return -EOPNOTSUPP; + } + sample.rate = act->sample.rate; + sample.group_num = act->sample.psample_group->group_num; + if (act->sample.truncate) + sample.trunc_size = act->sample.trunc_size; + flow_flag_set(flow, SAMPLE); break; default: NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported"); @@ -3876,11 +4001,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, return -EOPNOTSUPP; } - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { - NL_SET_ERR_MSG_MOD(extack, - "Mirroring goto chain rules isn't supported"); - return -EOPNOTSUPP; - } attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; } @@ -3898,6 +4018,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, return -EOPNOTSUPP; } + /* Allocate sample attribute only when there is a sample action and + * no errors after parsing. + */ + if (flow_flag_test(flow, SAMPLE)) { + esw_attr->sample = kzalloc(sizeof(*esw_attr->sample), GFP_KERNEL); + if (!esw_attr->sample) + return -ENOMEM; + *esw_attr->sample = sample; + } + return 0; } @@ -4300,6 +4430,11 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow; int err = 0; + if (!mlx5_esw_hold(priv->mdev)) + return -EAGAIN; + + mlx5_esw_get(priv->mdev); + rcu_read_lock(); flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params); if (flow) { @@ -4337,11 +4472,14 @@ rcu_unlock: if (err) goto err_free; + mlx5_esw_release(priv->mdev); return 0; err_free: mlx5e_flow_put(priv, flow); out: + mlx5_esw_put(priv->mdev); + mlx5_esw_release(priv->mdev); return err; } @@ -4381,6 +4519,7 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, trace_mlx5e_delete_flower(f); mlx5e_flow_put(priv, flow); + mlx5_esw_put(priv->mdev); return 0; errout: @@ -4516,6 +4655,10 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv, flow_action_for_each(i, act, flow_action) { switch (act->id) { case FLOW_ACTION_POLICE: + if (act->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); + return -EOPNOTSUPP; + } err = apply_police_params(priv, act->police.rate_bytes_ps, extack); if (err) return err; @@ -4650,6 +4793,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { struct mlx5e_tc_table *tc = &priv->fs.tc; struct mlx5_core_dev *dev = priv->mdev; + struct mapping_ctx *chains_mapping; struct mlx5_chains_attr attr = {}; int err; @@ -4664,15 +4808,22 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key); - if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) { + chains_mapping = mapping_create(sizeof(struct mlx5_mapped_obj), + MLX5E_TC_TABLE_CHAIN_TAG_MASK, true); + if (IS_ERR(chains_mapping)) { + err = PTR_ERR(chains_mapping); + goto err_mapping; + } + tc->mapping = chains_mapping; + + if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED | MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED; - attr.max_restore_tag = MLX5E_TC_TABLE_CHAIN_TAG_MASK; - } attr.ns = MLX5_FLOW_NAMESPACE_KERNEL; attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev); attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS; - attr.default_ft = priv->fs.vlan.ft.t; + attr.default_ft = mlx5e_vlan_get_flowtable(priv->fs.vlan); + attr.mapping = chains_mapping; tc->chains = mlx5_chains_create(dev, &attr); if (IS_ERR(tc->chains)) { @@ -4682,10 +4833,6 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr, MLX5_FLOW_NAMESPACE_KERNEL); - if (IS_ERR(tc->ct)) { - err = PTR_ERR(tc->ct); - goto err_ct; - } tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event; err = register_netdevice_notifier_dev_net(priv->netdev, @@ -4701,9 +4848,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) err_reg: mlx5_tc_ct_clean(tc->ct); -err_ct: mlx5_chains_destroy(tc->chains); err_chains: + mapping_destroy(chains_mapping); +err_mapping: rhashtable_destroy(&tc->ht); return err; } @@ -4738,6 +4886,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) mutex_destroy(&tc->t_lock); mlx5_tc_ct_clean(tc->ct); + mapping_destroy(tc->mapping); mlx5_chains_destroy(tc->chains); } @@ -4760,8 +4909,10 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht) esw_chains(esw), &esw->offloads.mod_hdr, MLX5_FLOW_NAMESPACE_FDB); - if (IS_ERR(uplink_priv->ct_priv)) - goto err_ct; + +#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) + uplink_priv->esw_psample = mlx5_esw_sample_init(netdev_priv(priv->netdev)); +#endif mapping = mapping_create(sizeof(struct tunnel_match_key), TUNNEL_INFO_BITS_MASK, true); @@ -4800,8 +4951,10 @@ err_ht_init: err_enc_opts_mapping: mapping_destroy(uplink_priv->tunnel_mapping); err_tun_mapping: +#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) + mlx5_esw_sample_cleanup(uplink_priv->esw_psample); +#endif mlx5_tc_ct_clean(uplink_priv->ct_priv); -err_ct: netdev_warn(priv->netdev, "Failed to initialize tc (eswitch), err: %d", err); return err; @@ -4819,6 +4972,9 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht) mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); mapping_destroy(uplink_priv->tunnel_mapping); +#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) + mlx5_esw_sample_cleanup(uplink_priv->esw_psample); +#endif mlx5_tc_ct_clean(uplink_priv->ct_priv); } @@ -4874,9 +5030,17 @@ static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv, int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { - unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD); + unsigned long flags = MLX5_TC_FLAG(INGRESS); struct mlx5e_priv *priv = cb_priv; + if (!priv->netdev || !netif_device_present(priv->netdev)) + return -EOPNOTSUPP; + + if (mlx5e_is_uplink_rep(priv)) + flags |= MLX5_TC_FLAG(ESW_OFFLOAD); + else + flags |= MLX5_TC_FLAG(NIC_OFFLOAD); + switch (type) { case TC_SETUP_CLSFLOWER: return mlx5e_setup_tc_cls_flower(priv, type_data, flags); @@ -4892,6 +5056,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, u32 chain = 0, chain_tag, reg_b, zone_restore_id; struct mlx5e_priv *priv = netdev_priv(skb->dev); struct mlx5e_tc_table *tc = &priv->fs.tc; + struct mlx5_mapped_obj mapped_obj; struct tc_skb_ext *tc_skb_ext; int err; @@ -4899,7 +5064,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK; - err = mlx5_get_chain_for_tag(nic_chains(priv), chain_tag, &chain); + err = mapping_find(tc->mapping, chain_tag, &mapped_obj); if (err) { netdev_dbg(priv->netdev, "Couldn't find chain for chain tag: %d, err: %d\n", @@ -4907,7 +5072,8 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, return false; } - if (chain) { + if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) { + chain = mapped_obj.chain; tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT); if (WARN_ON(!tc_skb_ext)) return false; @@ -4920,6 +5086,9 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, if (!mlx5e_tc_ct_restore_flow(tc->ct, skb, zone_restore_id)) return false; + } else { + netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type); + return false; } #endif /* CONFIG_NET_TC_SKB_EXT */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index bdbffe484fce..8ba62671f5f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -133,6 +133,8 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */ num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch); if (unlikely(dev->real_num_tx_queues > num_tc_x_num_ch)) { + struct mlx5e_ptp *ptp_channel; + /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */ u16 htb_maj_id = smp_load_acquire(&priv->htb.maj_id); @@ -142,10 +144,11 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, return txq_ix; } - if (unlikely(priv->channels.port_ptp)) - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && - mlx5e_use_ptpsq(skb)) - return mlx5e_select_ptpsq(dev, skb); + ptp_channel = READ_ONCE(priv->channels.ptp); + if (unlikely(ptp_channel) && + test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) && + mlx5e_use_ptpsq(skb)) + return mlx5e_select_ptpsq(dev, skb); txq_ix = netdev_pick_tx(dev, skb, NULL); /* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs. @@ -576,7 +579,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq, pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS); wqe = MLX5E_TX_FETCH_WQE(sq, pi); - prefetchw(wqe->data); + net_prefetchw(wqe->data); *session = (struct mlx5e_tx_mpwqe) { .wqe = wqe, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index d54da3797c30..833be29170a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -36,6 +36,7 @@ #include "en/xdp.h" #include "en/xsk/rx.h" #include "en/xsk/tx.h" +#include "en_accel/ktls_txrx.h" static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c) { @@ -171,6 +172,10 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) */ clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state); + /* Keep after async ICOSQ CQ poll */ + if (unlikely(mlx5e_ktls_rx_pending_resync_list(c, budget))) + busy |= mlx5e_ktls_rx_handle_resync_list(c, budget); + busy |= INDIRECT_CALL_2(rq->post_wqes, mlx5e_post_rx_mpwqes, mlx5e_post_rx_wqes, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 1fa9c18563da..77c0ca655975 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -271,7 +271,7 @@ static void init_eq_buf(struct mlx5_eq *eq) struct mlx5_eqe *eqe; int i; - for (i = 0; i < eq->nent; i++) { + for (i = 0; i < eq_get_size(eq); i++) { eqe = get_eqe(eq, i); eqe->owner = MLX5_EQE_OWNER_INIT_VAL; } @@ -281,8 +281,10 @@ static int create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, struct mlx5_eq_param *param) { + u8 log_eq_size = order_base_2(param->nent + MLX5_NUM_SPARE_EQE); struct mlx5_cq_table *cq_table = &eq->cq_table; u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0}; + u8 log_eq_stride = ilog2(MLX5_EQE_SIZE); struct mlx5_priv *priv = &dev->priv; u8 vecidx = param->irq_index; __be64 *pas; @@ -297,16 +299,18 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, spin_lock_init(&cq_table->lock); INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); - eq->nent = roundup_pow_of_two(param->nent + MLX5_NUM_SPARE_EQE); eq->cons_index = 0; - err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf); + + err = mlx5_frag_buf_alloc_node(dev, wq_get_byte_sz(log_eq_size, log_eq_stride), + &eq->frag_buf, dev->priv.numa_node); if (err) return err; + mlx5_init_fbc(eq->frag_buf.frags, log_eq_stride, log_eq_size, &eq->fbc); init_eq_buf(eq); inlen = MLX5_ST_SZ_BYTES(create_eq_in) + - MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages; + MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->frag_buf.npages; in = kvzalloc(inlen, GFP_KERNEL); if (!in) { @@ -315,7 +319,7 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, } pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas); - mlx5_fill_page_array(&eq->buf, pas); + mlx5_fill_page_frag_array(&eq->frag_buf, pas); MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ); if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx)) @@ -326,11 +330,11 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, param->mask[i]); eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry); - MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent)); + MLX5_SET(eqc, eqc, log_eq_size, eq->fbc.log_sz); MLX5_SET(eqc, eqc, uar_page, priv->uar->index); MLX5_SET(eqc, eqc, intr, vecidx); MLX5_SET(eqc, eqc, log_page_size, - eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); + eq->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (err) @@ -356,7 +360,7 @@ err_in: kvfree(in); err_buf: - mlx5_buf_free(dev, &eq->buf); + mlx5_frag_buf_free(dev, &eq->frag_buf); return err; } @@ -413,7 +417,7 @@ static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) eq->eqn); synchronize_irq(eq->irqn); - mlx5_buf_free(dev, &eq->buf); + mlx5_frag_buf_free(dev, &eq->frag_buf); return err; } @@ -764,10 +768,11 @@ EXPORT_SYMBOL(mlx5_eq_destroy_generic); struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc) { u32 ci = eq->cons_index + cc; + u32 nent = eq_get_size(eq); struct mlx5_eqe *eqe; - eqe = get_eqe(eq, ci & (eq->nent - 1)); - eqe = ((eqe->owner & 1) ^ !!(ci & eq->nent)) ? NULL : eqe; + eqe = get_eqe(eq, ci & (nent - 1)); + eqe = ((eqe->owner & 1) ^ !!(ci & nent)) ? NULL : eqe; /* Make sure we read EQ entry contents after we've * checked the ownership bit. */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c index 3e19b1721303..0399a396d166 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c @@ -96,7 +96,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw, } if (!vport->egress.acl) { - vport->egress.acl = esw_acl_table_create(esw, vport->vport, + vport->egress.acl = esw_acl_table_create(esw, vport, MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size); if (IS_ERR(vport->egress.acl)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c index 26b37a0f8762..505bf811984a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c @@ -148,7 +148,7 @@ static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport) esw_acl_egress_vlan_grp_destroy(vport); } -static bool esw_acl_egress_needed(const struct mlx5_eswitch *esw, u16 vport_num) +static bool esw_acl_egress_needed(struct mlx5_eswitch *esw, u16 vport_num) { return mlx5_eswitch_is_vf_vport(esw, vport_num) || mlx5_esw_is_sf_vport(esw, vport_num); } @@ -171,7 +171,7 @@ int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport table_size++; if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) table_size++; - vport->egress.acl = esw_acl_table_create(esw, vport->vport, + vport->egress.acl = esw_acl_table_create(esw, vport, MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size); if (IS_ERR(vport->egress.acl)) { err = PTR_ERR(vport->egress.acl); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c index 4a369669e51e..45b839116212 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c @@ -6,14 +6,14 @@ #include "helper.h" struct mlx5_flow_table * -esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size) +esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns, int size) { struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_core_dev *dev = esw->dev; struct mlx5_flow_namespace *root_ns; struct mlx5_flow_table *acl; int acl_supported; - int vport_index; + u16 vport_num; int err; acl_supported = (ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS) ? @@ -23,11 +23,11 @@ esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size) if (!acl_supported) return ERR_PTR(-EOPNOTSUPP); + vport_num = vport->vport; esw_debug(dev, "Create vport[%d] %s ACL table\n", vport_num, ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS ? "ingress" : "egress"); - vport_index = mlx5_eswitch_vport_num_to_index(esw, vport_num); - root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport_index); + root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport->index); if (!root_ns) { esw_warn(dev, "Failed to get E-Switch root namespace for vport (%d)\n", vport_num); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h index 8dc4cab66a71..a47063fab57e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h @@ -8,7 +8,7 @@ /* General acl helper functions */ struct mlx5_flow_table * -esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size); +esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns, int size); /* Egress acl helper functions */ void esw_acl_egress_table_destroy(struct mlx5_vport *vport); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c index d64fad2823e7..f75b86abaf1c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c @@ -177,7 +177,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, } if (!vport->ingress.acl) { - vport->ingress.acl = esw_acl_table_create(esw, vport->vport, + vport->ingress.acl = esw_acl_table_create(esw, vport, MLX5_FLOW_NAMESPACE_ESW_INGRESS, table_size); if (IS_ERR(vport->ingress.acl)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c index 548c005ea633..39e948bc1204 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c @@ -7,7 +7,7 @@ #include "ofld.h" static bool -esw_acl_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw, +esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch *esw, const struct mlx5_vport *vport) { return (MLX5_CAP_GEN(esw->dev, prio_tag_required) && @@ -255,7 +255,7 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, if (esw_acl_ingress_prio_tag_enabled(esw, vport)) num_ftes++; - vport->ingress.acl = esw_acl_table_create(esw, vport->vport, + vport->ingress.acl = esw_acl_table_create(esw, vport, MLX5_FLOW_NAMESPACE_ESW_INGRESS, num_ftes); if (IS_ERR(vport->ingress.acl)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index cb1e181f4c6a..1703384eca95 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -14,8 +14,7 @@ mlx5_esw_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_i memcpy(ppid->id, &parent_id, sizeof(parent_id)); } -static bool -mlx5_esw_devlink_port_supported(const struct mlx5_eswitch *esw, u16 vport_num) +static bool mlx5_esw_devlink_port_supported(struct mlx5_eswitch *esw, u16 vport_num) { return vport_num == MLX5_VPORT_UPLINK || (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) || @@ -120,11 +119,11 @@ struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u1 struct mlx5_vport *vport; vport = mlx5_eswitch_get_vport(esw, vport_num); - return vport->dl_port; + return IS_ERR(vport) ? ERR_CAST(vport) : vport->dl_port; } int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port, - u16 vport_num, u32 sfnum) + u16 vport_num, u32 controller, u32 sfnum) { struct mlx5_core_dev *dev = esw->dev; struct netdev_phys_item_id ppid = {}; @@ -142,7 +141,7 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p mlx5_esw_get_port_parent_id(dev, &ppid); memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len); dl_port->attrs.switch_id.id_len = ppid.id_len; - devlink_port_attrs_pci_sf_set(dl_port, 0, pfnum, sfnum); + devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller); devlink = priv_to_devlink(dev); dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num); err = devlink_port_register(devlink, dl_port, dl_port_index); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h index cb9eafd1b4ee..21d56b49d14b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h @@ -30,13 +30,13 @@ mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr); #else /* indir API stubs */ -struct mlx5_esw_indir_table * +static inline struct mlx5_esw_indir_table * mlx5_esw_indir_table_init(void) { return NULL; } -void +static inline void mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir) { } @@ -57,7 +57,7 @@ mlx5_esw_indir_table_put(struct mlx5_eswitch *esw, { } -bool +static inline bool mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, u16 vport_num, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c new file mode 100644 index 000000000000..d9041b16611d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c @@ -0,0 +1,510 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021 Mellanox Technologies Ltd */ + +#include <linux/etherdevice.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/mlx5_ifc.h> +#include <linux/mlx5/vport.h> +#include <linux/mlx5/fs.h> +#include "esw/acl/lgcy.h" +#include "esw/legacy.h" +#include "mlx5_core.h" +#include "eswitch.h" +#include "fs_core.h" + +enum { + LEGACY_VEPA_PRIO = 0, + LEGACY_FDB_PRIO, +}; + +static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw) +{ + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_namespace *root_ns; + struct mlx5_flow_table *fdb; + int err; + + root_ns = mlx5_get_fdb_sub_ns(dev, 0); + if (!root_ns) { + esw_warn(dev, "Failed to get FDB flow namespace\n"); + return -EOPNOTSUPP; + } + + /* num FTE 2, num FG 2 */ + ft_attr.prio = LEGACY_VEPA_PRIO; + ft_attr.max_fte = 2; + ft_attr.autogroup.max_num_groups = 2; + fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr); + if (IS_ERR(fdb)) { + err = PTR_ERR(fdb); + esw_warn(dev, "Failed to create VEPA FDB err %d\n", err); + return err; + } + esw->fdb_table.legacy.vepa_fdb = fdb; + + return 0; +} + +static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw) +{ + esw_debug(esw->dev, "Destroy FDB Table\n"); + if (!esw->fdb_table.legacy.fdb) + return; + + if (esw->fdb_table.legacy.promisc_grp) + mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp); + if (esw->fdb_table.legacy.allmulti_grp) + mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp); + if (esw->fdb_table.legacy.addr_grp) + mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp); + mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb); + + esw->fdb_table.legacy.fdb = NULL; + esw->fdb_table.legacy.addr_grp = NULL; + esw->fdb_table.legacy.allmulti_grp = NULL; + esw->fdb_table.legacy.promisc_grp = NULL; + atomic64_set(&esw->user_count, 0); +} + +static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_namespace *root_ns; + struct mlx5_flow_table *fdb; + struct mlx5_flow_group *g; + void *match_criteria; + int table_size; + u32 *flow_group_in; + u8 *dmac; + int err = 0; + + esw_debug(dev, "Create FDB log_max_size(%d)\n", + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); + + root_ns = mlx5_get_fdb_sub_ns(dev, 0); + if (!root_ns) { + esw_warn(dev, "Failed to get FDB flow namespace\n"); + return -EOPNOTSUPP; + } + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; + + table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); + ft_attr.max_fte = table_size; + ft_attr.prio = LEGACY_FDB_PRIO; + fdb = mlx5_create_flow_table(root_ns, &ft_attr); + if (IS_ERR(fdb)) { + err = PTR_ERR(fdb); + esw_warn(dev, "Failed to create FDB Table err %d\n", err); + goto out; + } + esw->fdb_table.legacy.fdb = fdb; + + /* Addresses group : Full match unicast/multicast addresses */ + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_OUTER_HEADERS); + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); + dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); + /* Preserve 2 entries for allmulti and promisc rules*/ + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3); + eth_broadcast_addr(dmac); + g = mlx5_create_flow_group(fdb, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create flow group err(%d)\n", err); + goto out; + } + esw->fdb_table.legacy.addr_grp = g; + + /* Allmulti group : One rule that forwards any mcast traffic */ + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_OUTER_HEADERS); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2); + eth_zero_addr(dmac); + dmac[0] = 0x01; + g = mlx5_create_flow_group(fdb, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err); + goto out; + } + esw->fdb_table.legacy.allmulti_grp = g; + + /* Promiscuous group : + * One rule that forward all unmatched traffic from previous groups + */ + eth_zero_addr(dmac); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_MISC_PARAMETERS); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1); + g = mlx5_create_flow_group(fdb, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err); + goto out; + } + esw->fdb_table.legacy.promisc_grp = g; + +out: + if (err) + esw_destroy_legacy_fdb_table(esw); + + kvfree(flow_group_in); + return err; +} + +static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw) +{ + esw_debug(esw->dev, "Destroy VEPA Table\n"); + if (!esw->fdb_table.legacy.vepa_fdb) + return; + + mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb); + esw->fdb_table.legacy.vepa_fdb = NULL; +} + +static int esw_create_legacy_table(struct mlx5_eswitch *esw) +{ + int err; + + memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb)); + atomic64_set(&esw->user_count, 0); + + err = esw_create_legacy_vepa_table(esw); + if (err) + return err; + + err = esw_create_legacy_fdb_table(esw); + if (err) + esw_destroy_legacy_vepa_table(esw); + + return err; +} + +static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw) +{ + if (esw->fdb_table.legacy.vepa_uplink_rule) + mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule); + + if (esw->fdb_table.legacy.vepa_star_rule) + mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule); + + esw->fdb_table.legacy.vepa_uplink_rule = NULL; + esw->fdb_table.legacy.vepa_star_rule = NULL; +} + +static void esw_destroy_legacy_table(struct mlx5_eswitch *esw) +{ + esw_cleanup_vepa_rules(esw); + esw_destroy_legacy_fdb_table(esw); + esw_destroy_legacy_vepa_table(esw); +} + +#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \ + MLX5_VPORT_MC_ADDR_CHANGE | \ + MLX5_VPORT_PROMISC_CHANGE) + +int esw_legacy_enable(struct mlx5_eswitch *esw) +{ + struct mlx5_vport *vport; + unsigned long i; + int ret; + + ret = esw_create_legacy_table(esw); + if (ret) + return ret; + + mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) + vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; + + ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS); + if (ret) + esw_destroy_legacy_table(esw); + return ret; +} + +void esw_legacy_disable(struct mlx5_eswitch *esw) +{ + struct esw_mc_addr *mc_promisc; + + mlx5_eswitch_disable_pf_vf_vports(esw); + + mc_promisc = &esw->mc_promisc; + if (mc_promisc->uplink_rule) + mlx5_del_flow_rules(mc_promisc->uplink_rule); + + esw_destroy_legacy_table(esw); +} + +static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw, + u8 setting) +{ + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *flow_rule; + struct mlx5_flow_spec *spec; + int err = 0; + void *misc; + + if (!setting) { + esw_cleanup_vepa_rules(esw); + return 0; + } + + if (esw->fdb_table.legacy.vepa_uplink_rule) + return 0; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + /* Uplink rule forward uplink traffic to FDB */ + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); + MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK); + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = esw->fdb_table.legacy.fdb; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec, + &flow_act, &dest, 1); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); + goto out; + } else { + esw->fdb_table.legacy.vepa_uplink_rule = flow_rule; + } + + /* Star rule to forward all traffic to uplink vport */ + memset(&dest, 0, sizeof(dest)); + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest.vport.num = MLX5_VPORT_UPLINK; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL, + &flow_act, &dest, 1); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); + goto out; + } else { + esw->fdb_table.legacy.vepa_star_rule = flow_rule; + } + +out: + kvfree(spec); + if (err) + esw_cleanup_vepa_rules(esw); + return err; +} + +int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting) +{ + int err = 0; + + if (!esw) + return -EOPNOTSUPP; + + if (!mlx5_esw_allowed(esw)) + return -EPERM; + + mutex_lock(&esw->state_lock); + if (esw->mode != MLX5_ESWITCH_LEGACY) { + err = -EOPNOTSUPP; + goto out; + } + + err = _mlx5_eswitch_set_vepa_locked(esw, setting); + +out: + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting) +{ + if (!esw) + return -EOPNOTSUPP; + + if (!mlx5_esw_allowed(esw)) + return -EPERM; + + if (esw->mode != MLX5_ESWITCH_LEGACY) + return -EOPNOTSUPP; + + *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0; + return 0; +} + +int esw_legacy_vport_acl_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) +{ + int ret; + + /* Only non manager vports need ACL in legacy mode */ + if (mlx5_esw_is_manager_vport(esw, vport->vport)) + return 0; + + ret = esw_acl_ingress_lgcy_setup(esw, vport); + if (ret) + goto ingress_err; + + ret = esw_acl_egress_lgcy_setup(esw, vport); + if (ret) + goto egress_err; + + return 0; + +egress_err: + esw_acl_ingress_lgcy_cleanup(esw, vport); +ingress_err: + return ret; +} + +void esw_legacy_vport_acl_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) +{ + if (mlx5_esw_is_manager_vport(esw, vport->vport)) + return; + + esw_acl_egress_lgcy_cleanup(esw, vport); + esw_acl_ingress_lgcy_cleanup(esw, vport); +} + +int mlx5_esw_query_vport_drop_stats(struct mlx5_core_dev *dev, + struct mlx5_vport *vport, + struct mlx5_vport_drop_stats *stats) +{ + u64 rx_discard_vport_down, tx_discard_vport_down; + struct mlx5_eswitch *esw = dev->priv.eswitch; + u64 bytes = 0; + int err = 0; + + if (esw->mode != MLX5_ESWITCH_LEGACY) + return 0; + + mutex_lock(&esw->state_lock); + if (!vport->enabled) + goto unlock; + + if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter)) + mlx5_fc_query(dev, vport->egress.legacy.drop_counter, + &stats->rx_dropped, &bytes); + + if (vport->ingress.legacy.drop_counter) + mlx5_fc_query(dev, vport->ingress.legacy.drop_counter, + &stats->tx_dropped, &bytes); + + if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) && + !MLX5_CAP_GEN(dev, transmit_discard_vport_down)) + goto unlock; + + err = mlx5_query_vport_down_stats(dev, vport->vport, 1, + &rx_discard_vport_down, + &tx_discard_vport_down); + if (err) + goto unlock; + + if (MLX5_CAP_GEN(dev, receive_discard_vport_down)) + stats->rx_dropped += rx_discard_vport_down; + if (MLX5_CAP_GEN(dev, transmit_discard_vport_down)) + stats->tx_dropped += tx_discard_vport_down; + +unlock: + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, + u16 vport, u16 vlan, u8 qos) +{ + u8 set_flags = 0; + int err = 0; + + if (!mlx5_esw_allowed(esw)) + return -EPERM; + + if (vlan || qos) + set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT; + + mutex_lock(&esw->state_lock); + if (esw->mode != MLX5_ESWITCH_LEGACY) { + if (!vlan) + goto unlock; /* compatibility with libvirt */ + + err = -EOPNOTSUPP; + goto unlock; + } + + err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags); + +unlock: + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, + u16 vport, bool spoofchk) +{ + struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); + bool pschk; + int err = 0; + + if (!mlx5_esw_allowed(esw)) + return -EPERM; + if (IS_ERR(evport)) + return PTR_ERR(evport); + + mutex_lock(&esw->state_lock); + if (esw->mode != MLX5_ESWITCH_LEGACY) { + err = -EOPNOTSUPP; + goto unlock; + } + pschk = evport->info.spoofchk; + evport->info.spoofchk = spoofchk; + if (pschk && !is_valid_ether_addr(evport->info.mac)) + mlx5_core_warn(esw->dev, + "Spoofchk in set while MAC is invalid, vport(%d)\n", + evport->vport); + if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) + err = esw_acl_ingress_lgcy_setup(esw, evport); + if (err) + evport->info.spoofchk = pschk; + +unlock: + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, + u16 vport, bool setting) +{ + struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); + int err = 0; + + if (!mlx5_esw_allowed(esw)) + return -EPERM; + if (IS_ERR(evport)) + return PTR_ERR(evport); + + mutex_lock(&esw->state_lock); + if (esw->mode != MLX5_ESWITCH_LEGACY) { + err = -EOPNOTSUPP; + goto unlock; + } + evport->info.trusted = setting; + if (evport->enabled) + esw_vport_change_handle_locked(evport); + +unlock: + mutex_unlock(&esw->state_lock); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.h new file mode 100644 index 000000000000..e0820bb72b57 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021 Mellanox Technologies Ltd */ + +#ifndef __MLX5_ESW_LEGACY_H__ +#define __MLX5_ESW_LEGACY_H__ + +#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \ + MLX5_VPORT_MC_ADDR_CHANGE | \ + MLX5_VPORT_PROMISC_CHANGE) + +struct mlx5_eswitch; + +int esw_legacy_enable(struct mlx5_eswitch *esw); +void esw_legacy_disable(struct mlx5_eswitch *esw); + +int esw_legacy_vport_acl_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); +void esw_legacy_vport_acl_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); + +int mlx5_esw_query_vport_drop_stats(struct mlx5_core_dev *dev, + struct mlx5_vport *vport, + struct mlx5_vport_drop_stats *stats); +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c new file mode 100644 index 000000000000..794012c5c476 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c @@ -0,0 +1,585 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021 Mellanox Technologies. */ + +#include <linux/skbuff.h> +#include <net/psample.h> +#include "en/mapping.h" +#include "esw/sample.h" +#include "eswitch.h" +#include "en_tc.h" +#include "fs_core.h" + +#define MLX5_ESW_VPORT_TBL_SIZE_SAMPLE (64 * 1024) + +static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_sample_ns = { + .max_fte = MLX5_ESW_VPORT_TBL_SIZE_SAMPLE, + .max_num_groups = 0, /* default num of groups */ + .flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | MLX5_FLOW_TABLE_TUNNEL_EN_DECAP, +}; + +struct mlx5_esw_psample { + struct mlx5e_priv *priv; + struct mlx5_flow_table *termtbl; + struct mlx5_flow_handle *termtbl_rule; + DECLARE_HASHTABLE(hashtbl, 8); + struct mutex ht_lock; /* protect hashtbl */ + DECLARE_HASHTABLE(restore_hashtbl, 8); + struct mutex restore_lock; /* protect restore_hashtbl */ +}; + +struct mlx5_sampler { + struct hlist_node hlist; + u32 sampler_id; + u32 sample_ratio; + u32 sample_table_id; + u32 default_table_id; + int count; +}; + +struct mlx5_sample_flow { + struct mlx5_sampler *sampler; + struct mlx5_sample_restore *restore; + struct mlx5_flow_attr *pre_attr; + struct mlx5_flow_handle *pre_rule; + struct mlx5_flow_handle *rule; +}; + +struct mlx5_sample_restore { + struct hlist_node hlist; + struct mlx5_modify_hdr *modify_hdr; + struct mlx5_flow_handle *rule; + u32 obj_id; + int count; +}; + +static int +sampler_termtbl_create(struct mlx5_esw_psample *esw_psample) +{ + struct mlx5_core_dev *dev = esw_psample->priv->mdev; + struct mlx5_eswitch *esw = dev->priv.eswitch; + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_namespace *root_ns; + struct mlx5_flow_act act = {}; + int err; + + if (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, termination_table)) { + mlx5_core_warn(dev, "termination table is not supported\n"); + return -EOPNOTSUPP; + } + + root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); + if (!root_ns) { + mlx5_core_warn(dev, "failed to get FDB flow namespace\n"); + return -EOPNOTSUPP; + } + + ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION | MLX5_FLOW_TABLE_UNMANAGED; + ft_attr.autogroup.max_num_groups = 1; + ft_attr.prio = FDB_SLOW_PATH; + ft_attr.max_fte = 1; + ft_attr.level = 1; + esw_psample->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr); + if (IS_ERR(esw_psample->termtbl)) { + err = PTR_ERR(esw_psample->termtbl); + mlx5_core_warn(dev, "failed to create termtbl, err: %d\n", err); + return err; + } + + act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + dest.vport.num = esw->manager_vport; + esw_psample->termtbl_rule = mlx5_add_flow_rules(esw_psample->termtbl, NULL, &act, &dest, 1); + if (IS_ERR(esw_psample->termtbl_rule)) { + err = PTR_ERR(esw_psample->termtbl_rule); + mlx5_core_warn(dev, "failed to create termtbl rule, err: %d\n", err); + mlx5_destroy_flow_table(esw_psample->termtbl); + return err; + } + + return 0; +} + +static void +sampler_termtbl_destroy(struct mlx5_esw_psample *esw_psample) +{ + mlx5_del_flow_rules(esw_psample->termtbl_rule); + mlx5_destroy_flow_table(esw_psample->termtbl); +} + +static int +sampler_obj_create(struct mlx5_core_dev *mdev, struct mlx5_sampler *sampler) +{ + u32 in[MLX5_ST_SZ_DW(create_sampler_obj_in)] = {}; + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; + u64 general_obj_types; + void *obj; + int err; + + general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types); + if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER)) + return -EOPNOTSUPP; + if (!MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level)) + return -EOPNOTSUPP; + + obj = MLX5_ADDR_OF(create_sampler_obj_in, in, sampler_object); + MLX5_SET(sampler_obj, obj, table_type, FS_FT_FDB); + MLX5_SET(sampler_obj, obj, ignore_flow_level, 1); + MLX5_SET(sampler_obj, obj, level, 1); + MLX5_SET(sampler_obj, obj, sample_ratio, sampler->sample_ratio); + MLX5_SET(sampler_obj, obj, sample_table_id, sampler->sample_table_id); + MLX5_SET(sampler_obj, obj, default_table_id, sampler->default_table_id); + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_SAMPLER); + + err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (!err) + sampler->sampler_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); + + return err; +} + +static void +sampler_obj_destroy(struct mlx5_core_dev *mdev, u32 sampler_id) +{ + u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; + + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_SAMPLER); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sampler_id); + + mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); +} + +static u32 +sampler_hash(u32 sample_ratio, u32 default_table_id) +{ + return jhash_2words(sample_ratio, default_table_id, 0); +} + +static int +sampler_cmp(u32 sample_ratio1, u32 default_table_id1, u32 sample_ratio2, u32 default_table_id2) +{ + return sample_ratio1 != sample_ratio2 || default_table_id1 != default_table_id2; +} + +static struct mlx5_sampler * +sampler_get(struct mlx5_esw_psample *esw_psample, u32 sample_ratio, u32 default_table_id) +{ + struct mlx5_sampler *sampler; + u32 hash_key; + int err; + + mutex_lock(&esw_psample->ht_lock); + hash_key = sampler_hash(sample_ratio, default_table_id); + hash_for_each_possible(esw_psample->hashtbl, sampler, hlist, hash_key) + if (!sampler_cmp(sampler->sample_ratio, sampler->default_table_id, + sample_ratio, default_table_id)) + goto add_ref; + + sampler = kzalloc(sizeof(*sampler), GFP_KERNEL); + if (!sampler) { + err = -ENOMEM; + goto err_alloc; + } + + sampler->sample_table_id = esw_psample->termtbl->id; + sampler->default_table_id = default_table_id; + sampler->sample_ratio = sample_ratio; + + err = sampler_obj_create(esw_psample->priv->mdev, sampler); + if (err) + goto err_create; + + hash_add(esw_psample->hashtbl, &sampler->hlist, hash_key); + +add_ref: + sampler->count++; + mutex_unlock(&esw_psample->ht_lock); + return sampler; + +err_create: + kfree(sampler); +err_alloc: + mutex_unlock(&esw_psample->ht_lock); + return ERR_PTR(err); +} + +static void +sampler_put(struct mlx5_esw_psample *esw_psample, struct mlx5_sampler *sampler) +{ + mutex_lock(&esw_psample->ht_lock); + if (--sampler->count == 0) { + hash_del(&sampler->hlist); + sampler_obj_destroy(esw_psample->priv->mdev, sampler->sampler_id); + kfree(sampler); + } + mutex_unlock(&esw_psample->ht_lock); +} + +static struct mlx5_modify_hdr * +sample_metadata_rule_get(struct mlx5_core_dev *mdev, u32 obj_id) +{ + struct mlx5e_tc_mod_hdr_acts mod_acts = {}; + struct mlx5_modify_hdr *modify_hdr; + int err; + + err = mlx5e_tc_match_to_reg_set(mdev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB, + CHAIN_TO_REG, obj_id); + if (err) + goto err_set_regc0; + + modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB, + mod_acts.num_actions, + mod_acts.actions); + if (IS_ERR(modify_hdr)) { + err = PTR_ERR(modify_hdr); + goto err_modify_hdr; + } + + dealloc_mod_hdr_actions(&mod_acts); + return modify_hdr; + +err_modify_hdr: + dealloc_mod_hdr_actions(&mod_acts); +err_set_regc0: + return ERR_PTR(err); +} + +static struct mlx5_sample_restore * +sample_restore_get(struct mlx5_esw_psample *esw_psample, u32 obj_id) +{ + struct mlx5_core_dev *mdev = esw_psample->priv->mdev; + struct mlx5_eswitch *esw = mdev->priv.eswitch; + struct mlx5_sample_restore *restore; + struct mlx5_modify_hdr *modify_hdr; + int err; + + mutex_lock(&esw_psample->restore_lock); + hash_for_each_possible(esw_psample->restore_hashtbl, restore, hlist, obj_id) + if (restore->obj_id == obj_id) + goto add_ref; + + restore = kzalloc(sizeof(*restore), GFP_KERNEL); + if (!restore) { + err = -ENOMEM; + goto err_alloc; + } + restore->obj_id = obj_id; + + modify_hdr = sample_metadata_rule_get(mdev, obj_id); + if (IS_ERR(modify_hdr)) { + err = PTR_ERR(modify_hdr); + goto err_modify_hdr; + } + restore->modify_hdr = modify_hdr; + + restore->rule = esw_add_restore_rule(esw, obj_id); + if (IS_ERR(restore->rule)) { + err = PTR_ERR(restore->rule); + goto err_restore; + } + + hash_add(esw_psample->restore_hashtbl, &restore->hlist, obj_id); +add_ref: + restore->count++; + mutex_unlock(&esw_psample->restore_lock); + return restore; + +err_restore: + mlx5_modify_header_dealloc(mdev, restore->modify_hdr); +err_modify_hdr: + kfree(restore); +err_alloc: + mutex_unlock(&esw_psample->restore_lock); + return ERR_PTR(err); +} + +static void +sample_restore_put(struct mlx5_esw_psample *esw_psample, struct mlx5_sample_restore *restore) +{ + mutex_lock(&esw_psample->restore_lock); + if (--restore->count == 0) + hash_del(&restore->hlist); + mutex_unlock(&esw_psample->restore_lock); + + if (!restore->count) { + mlx5_del_flow_rules(restore->rule); + mlx5_modify_header_dealloc(esw_psample->priv->mdev, restore->modify_hdr); + kfree(restore); + } +} + +void mlx5_esw_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj) +{ + u32 trunc_size = mapped_obj->sample.trunc_size; + struct psample_group psample_group = {}; + struct psample_metadata md = {}; + + md.trunc_size = trunc_size ? min(trunc_size, skb->len) : skb->len; + md.in_ifindex = skb->dev->ifindex; + psample_group.group_num = mapped_obj->sample.group_id; + psample_group.net = &init_net; + skb_push(skb, skb->mac_len); + + psample_sample_packet(&psample_group, skb, mapped_obj->sample.rate, &md); +} + +/* For the following typical flow table: + * + * +-------------------------------+ + * + original flow table + + * +-------------------------------+ + * + original match + + * +-------------------------------+ + * + sample action + other actions + + * +-------------------------------+ + * + * We translate the tc filter with sample action to the following HW model: + * + * +---------------------+ + * + original flow table + + * +---------------------+ + * + original match + + * +---------------------+ + * | + * v + * +------------------------------------------------+ + * + Flow Sampler Object + + * +------------------------------------------------+ + * + sample ratio + + * +------------------------------------------------+ + * + sample table id | default table id + + * +------------------------------------------------+ + * | | + * v v + * +-----------------------------+ +----------------------------------------+ + * + sample table + + default table per <vport, chain, prio> + + * +-----------------------------+ +----------------------------------------+ + * + forward to management vport + + original match + + * +-----------------------------+ +----------------------------------------+ + * + other actions + + * +----------------------------------------+ + */ +struct mlx5_flow_handle * +mlx5_esw_sample_offload(struct mlx5_esw_psample *esw_psample, + struct mlx5_flow_spec *spec, + struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct mlx5_vport_tbl_attr per_vport_tbl_attr; + struct mlx5_esw_flow_attr *pre_esw_attr; + struct mlx5_mapped_obj restore_obj = {}; + struct mlx5_sample_flow *sample_flow; + struct mlx5_sample_attr *sample_attr; + struct mlx5_flow_table *default_tbl; + struct mlx5_flow_attr *pre_attr; + struct mlx5_eswitch *esw; + u32 obj_id; + int err; + + if (IS_ERR_OR_NULL(esw_psample)) + return ERR_PTR(-EOPNOTSUPP); + + /* If slow path flag is set, eg. when the neigh is invalid for encap, + * don't offload sample action. + */ + esw = esw_psample->priv->mdev->priv.eswitch; + if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) + return mlx5_eswitch_add_offloaded_rule(esw, spec, attr); + + sample_flow = kzalloc(sizeof(*sample_flow), GFP_KERNEL); + if (!sample_flow) + return ERR_PTR(-ENOMEM); + esw_attr->sample->sample_flow = sample_flow; + + /* Allocate default table per vport, chain and prio. Otherwise, there is + * only one default table for the same sampler object. Rules with different + * prio and chain may overlap. For CT sample action, per vport default + * table is needed to resotre the metadata. + */ + per_vport_tbl_attr.chain = attr->chain; + per_vport_tbl_attr.prio = attr->prio; + per_vport_tbl_attr.vport = esw_attr->in_rep->vport; + per_vport_tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns; + default_tbl = mlx5_esw_vporttbl_get(esw, &per_vport_tbl_attr); + if (IS_ERR(default_tbl)) { + err = PTR_ERR(default_tbl); + goto err_default_tbl; + } + + /* Perform the original matches on the default table. + * Offload all actions except the sample action. + */ + esw_attr->sample->sample_default_tbl = default_tbl; + /* When offloading sample and encap action, if there is no valid + * neigh data struct, a slow path rule is offloaded first. Source + * port metadata match is set at that time. A per vport table is + * already allocated. No need to match it again. So clear the source + * port metadata match. + */ + mlx5_eswitch_clear_rule_source_port(esw, spec); + sample_flow->rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr); + if (IS_ERR(sample_flow->rule)) { + err = PTR_ERR(sample_flow->rule); + goto err_offload_rule; + } + + /* Create sampler object. */ + sample_flow->sampler = sampler_get(esw_psample, esw_attr->sample->rate, default_tbl->id); + if (IS_ERR(sample_flow->sampler)) { + err = PTR_ERR(sample_flow->sampler); + goto err_sampler; + } + + /* Create an id mapping reg_c0 value to sample object. */ + restore_obj.type = MLX5_MAPPED_OBJ_SAMPLE; + restore_obj.sample.group_id = esw_attr->sample->group_num; + restore_obj.sample.rate = esw_attr->sample->rate; + restore_obj.sample.trunc_size = esw_attr->sample->trunc_size; + err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id); + if (err) + goto err_obj_id; + esw_attr->sample->restore_obj_id = obj_id; + + /* Create sample restore context. */ + sample_flow->restore = sample_restore_get(esw_psample, obj_id); + if (IS_ERR(sample_flow->restore)) { + err = PTR_ERR(sample_flow->restore); + goto err_sample_restore; + } + + /* Perform the original matches on the original table. Offload the + * sample action. The destination is the sampler object. + */ + pre_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB); + if (!pre_attr) { + err = -ENOMEM; + goto err_alloc_flow_attr; + } + sample_attr = kzalloc(sizeof(*sample_attr), GFP_KERNEL); + if (!sample_attr) { + err = -ENOMEM; + goto err_alloc_sample_attr; + } + pre_esw_attr = pre_attr->esw_attr; + pre_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + pre_attr->modify_hdr = sample_flow->restore->modify_hdr; + pre_attr->flags = MLX5_ESW_ATTR_FLAG_SAMPLE; + pre_attr->chain = attr->chain; + pre_attr->prio = attr->prio; + pre_esw_attr->sample = sample_attr; + pre_esw_attr->sample->sampler_id = sample_flow->sampler->sampler_id; + pre_esw_attr->in_mdev = esw_attr->in_mdev; + pre_esw_attr->in_rep = esw_attr->in_rep; + sample_flow->pre_rule = mlx5_eswitch_add_offloaded_rule(esw, spec, pre_attr); + if (IS_ERR(sample_flow->pre_rule)) { + err = PTR_ERR(sample_flow->pre_rule); + goto err_pre_offload_rule; + } + sample_flow->pre_attr = pre_attr; + + return sample_flow->rule; + +err_pre_offload_rule: + kfree(sample_attr); +err_alloc_sample_attr: + kfree(pre_attr); +err_alloc_flow_attr: + sample_restore_put(esw_psample, sample_flow->restore); +err_sample_restore: + mapping_remove(esw->offloads.reg_c0_obj_pool, obj_id); +err_obj_id: + sampler_put(esw_psample, sample_flow->sampler); +err_sampler: + /* For sample offload, rule is added in default_tbl. No need to call + * mlx5_esw_chains_put_table() + */ + attr->prio = 0; + attr->chain = 0; + mlx5_eswitch_del_offloaded_rule(esw, sample_flow->rule, attr); +err_offload_rule: + mlx5_esw_vporttbl_put(esw, &per_vport_tbl_attr); +err_default_tbl: + return ERR_PTR(err); +} + +void +mlx5_esw_sample_unoffload(struct mlx5_esw_psample *esw_psample, + struct mlx5_flow_handle *rule, + struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct mlx5_sample_flow *sample_flow; + struct mlx5_vport_tbl_attr tbl_attr; + struct mlx5_flow_attr *pre_attr; + struct mlx5_eswitch *esw; + + if (IS_ERR_OR_NULL(esw_psample)) + return; + + /* If slow path flag is set, sample action is not offloaded. + * No need to delete sample rule. + */ + esw = esw_psample->priv->mdev->priv.eswitch; + if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) { + mlx5_eswitch_del_offloaded_rule(esw, rule, attr); + return; + } + + sample_flow = esw_attr->sample->sample_flow; + pre_attr = sample_flow->pre_attr; + memset(pre_attr, 0, sizeof(*pre_attr)); + esw = esw_psample->priv->mdev->priv.eswitch; + mlx5_eswitch_del_offloaded_rule(esw, sample_flow->pre_rule, pre_attr); + mlx5_eswitch_del_offloaded_rule(esw, sample_flow->rule, attr); + + sample_restore_put(esw_psample, sample_flow->restore); + mapping_remove(esw->offloads.reg_c0_obj_pool, esw_attr->sample->restore_obj_id); + sampler_put(esw_psample, sample_flow->sampler); + tbl_attr.chain = attr->chain; + tbl_attr.prio = attr->prio; + tbl_attr.vport = esw_attr->in_rep->vport; + tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns; + mlx5_esw_vporttbl_put(esw, &tbl_attr); + + kfree(pre_attr->esw_attr->sample); + kfree(pre_attr); + kfree(sample_flow); +} + +struct mlx5_esw_psample * +mlx5_esw_sample_init(struct mlx5e_priv *priv) +{ + struct mlx5_esw_psample *esw_psample; + int err; + + esw_psample = kzalloc(sizeof(*esw_psample), GFP_KERNEL); + if (!esw_psample) + return ERR_PTR(-ENOMEM); + esw_psample->priv = priv; + err = sampler_termtbl_create(esw_psample); + if (err) + goto err_termtbl; + + mutex_init(&esw_psample->ht_lock); + mutex_init(&esw_psample->restore_lock); + + return esw_psample; + +err_termtbl: + kfree(esw_psample); + return ERR_PTR(err); +} + +void +mlx5_esw_sample_cleanup(struct mlx5_esw_psample *esw_psample) +{ + if (IS_ERR_OR_NULL(esw_psample)) + return; + + mutex_destroy(&esw_psample->restore_lock); + mutex_destroy(&esw_psample->ht_lock); + sampler_termtbl_destroy(esw_psample); + kfree(esw_psample); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.h new file mode 100644 index 000000000000..2a3f4be10030 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021 Mellanox Technologies. */ + +#ifndef __MLX5_EN_TC_SAMPLE_H__ +#define __MLX5_EN_TC_SAMPLE_H__ + +#include "en.h" +#include "eswitch.h" + +struct mlx5e_priv; +struct mlx5_flow_attr; +struct mlx5_esw_psample; + +struct mlx5_sample_attr { + u32 group_num; + u32 rate; + u32 trunc_size; + u32 restore_obj_id; + u32 sampler_id; + struct mlx5_flow_table *sample_default_tbl; + struct mlx5_sample_flow *sample_flow; +}; + +void mlx5_esw_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj); + +struct mlx5_flow_handle * +mlx5_esw_sample_offload(struct mlx5_esw_psample *sample_priv, + struct mlx5_flow_spec *spec, + struct mlx5_flow_attr *attr); + +void +mlx5_esw_sample_unoffload(struct mlx5_esw_psample *sample_priv, + struct mlx5_flow_handle *rule, + struct mlx5_flow_attr *attr); + +struct mlx5_esw_psample * +mlx5_esw_sample_init(struct mlx5e_priv *priv); + +void +mlx5_esw_sample_cleanup(struct mlx5_esw_psample *esw_psample); + +#endif /* __MLX5_EN_TC_SAMPLE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c new file mode 100644 index 000000000000..9e72118f2e4c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021 Mellanox Technologies. + +#include "eswitch.h" + +/* This struct is used as a key to the hash table and we need it to be packed + * so hash result is consistent + */ +struct mlx5_vport_key { + u32 chain; + u16 prio; + u16 vport; + u16 vhca_id; + const struct esw_vport_tbl_namespace *vport_ns; +} __packed; + +struct mlx5_vport_table { + struct hlist_node hlist; + struct mlx5_flow_table *fdb; + u32 num_rules; + struct mlx5_vport_key key; +}; + +static struct mlx5_flow_table * +esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns, + const struct esw_vport_tbl_namespace *vport_ns) +{ + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_table *fdb; + + if (vport_ns->max_num_groups) + ft_attr.autogroup.max_num_groups = vport_ns->max_num_groups; + else + ft_attr.autogroup.max_num_groups = esw->params.large_group_num; + ft_attr.max_fte = vport_ns->max_fte; + ft_attr.prio = FDB_PER_VPORT; + ft_attr.flags = vport_ns->flags; + fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); + if (IS_ERR(fdb)) { + esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n", + PTR_ERR(fdb)); + } + + return fdb; +} + +static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw, + struct mlx5_vport_tbl_attr *attr, + struct mlx5_vport_key *key) +{ + key->vport = attr->vport; + key->chain = attr->chain; + key->prio = attr->prio; + key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id); + key->vport_ns = attr->vport_ns; + return jhash(key, sizeof(*key), 0); +} + +/* caller must hold vports.lock */ +static struct mlx5_vport_table * +esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key) +{ + struct mlx5_vport_table *e; + + hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key) + if (!memcmp(&e->key, skey, sizeof(*skey))) + return e; + + return NULL; +} + +struct mlx5_flow_table * +mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr) +{ + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_namespace *ns; + struct mlx5_flow_table *fdb; + struct mlx5_vport_table *e; + struct mlx5_vport_key skey; + u32 hkey; + + mutex_lock(&esw->fdb_table.offloads.vports.lock); + hkey = flow_attr_to_vport_key(esw, attr, &skey); + e = esw_vport_tbl_lookup(esw, &skey, hkey); + if (e) { + e->num_rules++; + goto out; + } + + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) { + fdb = ERR_PTR(-ENOMEM); + goto err_alloc; + } + + ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); + if (!ns) { + esw_warn(dev, "Failed to get FDB namespace\n"); + fdb = ERR_PTR(-ENOENT); + goto err_ns; + } + + fdb = esw_vport_tbl_create(esw, ns, attr->vport_ns); + if (IS_ERR(fdb)) + goto err_ns; + + e->fdb = fdb; + e->num_rules = 1; + e->key = skey; + hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey); +out: + mutex_unlock(&esw->fdb_table.offloads.vports.lock); + return e->fdb; + +err_ns: + kfree(e); +err_alloc: + mutex_unlock(&esw->fdb_table.offloads.vports.lock); + return fdb; +} + +void +mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr) +{ + struct mlx5_vport_table *e; + struct mlx5_vport_key key; + u32 hkey; + + mutex_lock(&esw->fdb_table.offloads.vports.lock); + hkey = flow_attr_to_vport_key(esw, attr, &key); + e = esw_vport_tbl_lookup(esw, &key, hkey); + if (!e || --e->num_rules) + goto out; + + hash_del(&e->hlist); + mlx5_destroy_flow_table(e->fdb); + kfree(e); +out: + mutex_unlock(&esw->fdb_table.offloads.vports.lock); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index aba17835465b..570f2280823c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -36,6 +36,7 @@ #include <linux/mlx5/vport.h> #include <linux/mlx5/fs.h> #include "esw/acl/lgcy.h" +#include "esw/legacy.h" #include "mlx5_core.h" #include "lib/eq.h" #include "eswitch.h" @@ -61,9 +62,6 @@ struct vport_addr { bool mc_promisc; }; -static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw); -static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw); - static int mlx5_eswitch_check(const struct mlx5_core_dev *dev) { if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) @@ -90,20 +88,17 @@ struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink) struct mlx5_vport *__must_check mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num) { - u16 idx; + struct mlx5_vport *vport; if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager)) return ERR_PTR(-EPERM); - idx = mlx5_eswitch_vport_num_to_index(esw, vport_num); - - if (idx > esw->total_vports - 1) { - esw_debug(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n", - vport_num, idx); + vport = xa_load(&esw->vports, vport_num); + if (!vport) { + esw_debug(esw->dev, "vport out of range: num(0x%x)\n", vport_num); return ERR_PTR(-EINVAL); } - - return &esw->vports[idx]; + return vport; } static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, @@ -278,224 +273,6 @@ esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport) return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v); } -enum { - LEGACY_VEPA_PRIO = 0, - LEGACY_FDB_PRIO, -}; - -static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw) -{ - struct mlx5_flow_table_attr ft_attr = {}; - struct mlx5_core_dev *dev = esw->dev; - struct mlx5_flow_namespace *root_ns; - struct mlx5_flow_table *fdb; - int err; - - root_ns = mlx5_get_fdb_sub_ns(dev, 0); - if (!root_ns) { - esw_warn(dev, "Failed to get FDB flow namespace\n"); - return -EOPNOTSUPP; - } - - /* num FTE 2, num FG 2 */ - ft_attr.prio = LEGACY_VEPA_PRIO; - ft_attr.max_fte = 2; - ft_attr.autogroup.max_num_groups = 2; - fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr); - if (IS_ERR(fdb)) { - err = PTR_ERR(fdb); - esw_warn(dev, "Failed to create VEPA FDB err %d\n", err); - return err; - } - esw->fdb_table.legacy.vepa_fdb = fdb; - - return 0; -} - -static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw) -{ - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - struct mlx5_flow_table_attr ft_attr = {}; - struct mlx5_core_dev *dev = esw->dev; - struct mlx5_flow_namespace *root_ns; - struct mlx5_flow_table *fdb; - struct mlx5_flow_group *g; - void *match_criteria; - int table_size; - u32 *flow_group_in; - u8 *dmac; - int err = 0; - - esw_debug(dev, "Create FDB log_max_size(%d)\n", - MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); - - root_ns = mlx5_get_fdb_sub_ns(dev, 0); - if (!root_ns) { - esw_warn(dev, "Failed to get FDB flow namespace\n"); - return -EOPNOTSUPP; - } - - flow_group_in = kvzalloc(inlen, GFP_KERNEL); - if (!flow_group_in) - return -ENOMEM; - - table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); - ft_attr.max_fte = table_size; - ft_attr.prio = LEGACY_FDB_PRIO; - fdb = mlx5_create_flow_table(root_ns, &ft_attr); - if (IS_ERR(fdb)) { - err = PTR_ERR(fdb); - esw_warn(dev, "Failed to create FDB Table err %d\n", err); - goto out; - } - esw->fdb_table.legacy.fdb = fdb; - - /* Addresses group : Full match unicast/multicast addresses */ - MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, - MLX5_MATCH_OUTER_HEADERS); - match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); - dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16); - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); - /* Preserve 2 entries for allmulti and promisc rules*/ - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3); - eth_broadcast_addr(dmac); - g = mlx5_create_flow_group(fdb, flow_group_in); - if (IS_ERR(g)) { - err = PTR_ERR(g); - esw_warn(dev, "Failed to create flow group err(%d)\n", err); - goto out; - } - esw->fdb_table.legacy.addr_grp = g; - - /* Allmulti group : One rule that forwards any mcast traffic */ - MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, - MLX5_MATCH_OUTER_HEADERS); - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2); - eth_zero_addr(dmac); - dmac[0] = 0x01; - g = mlx5_create_flow_group(fdb, flow_group_in); - if (IS_ERR(g)) { - err = PTR_ERR(g); - esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err); - goto out; - } - esw->fdb_table.legacy.allmulti_grp = g; - - /* Promiscuous group : - * One rule that forward all unmatched traffic from previous groups - */ - eth_zero_addr(dmac); - MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, - MLX5_MATCH_MISC_PARAMETERS); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1); - g = mlx5_create_flow_group(fdb, flow_group_in); - if (IS_ERR(g)) { - err = PTR_ERR(g); - esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err); - goto out; - } - esw->fdb_table.legacy.promisc_grp = g; - -out: - if (err) - esw_destroy_legacy_fdb_table(esw); - - kvfree(flow_group_in); - return err; -} - -static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw) -{ - esw_debug(esw->dev, "Destroy VEPA Table\n"); - if (!esw->fdb_table.legacy.vepa_fdb) - return; - - mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb); - esw->fdb_table.legacy.vepa_fdb = NULL; -} - -static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw) -{ - esw_debug(esw->dev, "Destroy FDB Table\n"); - if (!esw->fdb_table.legacy.fdb) - return; - - if (esw->fdb_table.legacy.promisc_grp) - mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp); - if (esw->fdb_table.legacy.allmulti_grp) - mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp); - if (esw->fdb_table.legacy.addr_grp) - mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp); - mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb); - - esw->fdb_table.legacy.fdb = NULL; - esw->fdb_table.legacy.addr_grp = NULL; - esw->fdb_table.legacy.allmulti_grp = NULL; - esw->fdb_table.legacy.promisc_grp = NULL; -} - -static int esw_create_legacy_table(struct mlx5_eswitch *esw) -{ - int err; - - memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb)); - - err = esw_create_legacy_vepa_table(esw); - if (err) - return err; - - err = esw_create_legacy_fdb_table(esw); - if (err) - esw_destroy_legacy_vepa_table(esw); - - return err; -} - -static void esw_destroy_legacy_table(struct mlx5_eswitch *esw) -{ - esw_cleanup_vepa_rules(esw); - esw_destroy_legacy_fdb_table(esw); - esw_destroy_legacy_vepa_table(esw); -} - -#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \ - MLX5_VPORT_MC_ADDR_CHANGE | \ - MLX5_VPORT_PROMISC_CHANGE) - -static int esw_legacy_enable(struct mlx5_eswitch *esw) -{ - struct mlx5_vport *vport; - int ret, i; - - ret = esw_create_legacy_table(esw); - if (ret) - return ret; - - mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) - vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; - - ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS); - if (ret) - esw_destroy_legacy_table(esw); - return ret; -} - -static void esw_legacy_disable(struct mlx5_eswitch *esw) -{ - struct esw_mc_addr *mc_promisc; - - mlx5_eswitch_disable_pf_vf_vports(esw); - - mc_promisc = &esw->mc_promisc; - if (mc_promisc->uplink_rule) - mlx5_del_flow_rules(mc_promisc->uplink_rule); - - esw_destroy_legacy_table(esw); -} - /* E-Switch vport UC/MC lists management */ typedef int (*vport_addr_action)(struct mlx5_eswitch *esw, struct vport_addr *vaddr); @@ -565,9 +342,10 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw, { u8 *mac = vaddr->node.addr; struct mlx5_vport *vport; - u16 i, vport_num; + unsigned long i; + u16 vport_num; - mlx5_esw_for_all_vports(esw, i, vport) { + mlx5_esw_for_each_vport(esw, i, vport) { struct hlist_head *vport_hash = vport->mc_list; struct vport_addr *iter_vaddr = l2addr_hash_find(vport_hash, @@ -917,7 +695,7 @@ static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, (promisc_all || promisc_mc)); } -static void esw_vport_change_handle_locked(struct mlx5_vport *vport) +void esw_vport_change_handle_locked(struct mlx5_vport *vport) { struct mlx5_core_dev *dev = vport->dev; struct mlx5_eswitch *esw = dev->priv.eswitch; @@ -1141,6 +919,8 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, struct mlx5_vport *vport; vport = mlx5_eswitch_get_vport(esw, vport_num); + if (IS_ERR(vport)) + return PTR_ERR(vport); if (!vport->qos.enabled) return -EOPNOTSUPP; @@ -1166,56 +946,20 @@ static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac) ((u8 *)node_guid)[0] = mac[5]; } -static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) -{ - int ret; - - /* Only non manager vports need ACL in legacy mode */ - if (mlx5_esw_is_manager_vport(esw, vport->vport)) - return 0; - - ret = esw_acl_ingress_lgcy_setup(esw, vport); - if (ret) - goto ingress_err; - - ret = esw_acl_egress_lgcy_setup(esw, vport); - if (ret) - goto egress_err; - - return 0; - -egress_err: - esw_acl_ingress_lgcy_cleanup(esw, vport); -ingress_err: - return ret; -} - static int esw_vport_setup_acl(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { if (esw->mode == MLX5_ESWITCH_LEGACY) - return esw_vport_create_legacy_acl_tables(esw, vport); + return esw_legacy_vport_acl_setup(esw, vport); else return esw_vport_create_offloads_acl_tables(esw, vport); } -static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) - -{ - if (mlx5_esw_is_manager_vport(esw, vport->vport)) - return; - - esw_acl_egress_lgcy_cleanup(esw, vport); - esw_acl_ingress_lgcy_cleanup(esw, vport); -} - static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { if (esw->mode == MLX5_ESWITCH_LEGACY) - esw_vport_destroy_legacy_acl_tables(esw, vport); + esw_legacy_vport_acl_cleanup(esw, vport); else esw_vport_destroy_offloads_acl_tables(esw, vport); } @@ -1231,7 +975,7 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) return err; /* Attach vport to the eswitch rate limiter */ - esw_vport_enable_qos(esw, vport, vport->info.max_rate, vport->qos.bw_share); + esw_vport_enable_qos(esw, vport, vport->qos.max_rate, vport->qos.bw_share); if (mlx5_esw_is_manager_vport(esw, vport_num)) return 0; @@ -1279,6 +1023,8 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num, int ret; vport = mlx5_eswitch_get_vport(esw, vport_num); + if (IS_ERR(vport)) + return PTR_ERR(vport); mutex_lock(&esw->state_lock); WARN_ON(vport->enabled); @@ -1326,6 +1072,8 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num) struct mlx5_vport *vport; vport = mlx5_eswitch_get_vport(esw, vport_num); + if (IS_ERR(vport)) + return; mutex_lock(&esw->state_lock); if (!vport->enabled) @@ -1382,15 +1130,9 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) { int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out); u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {}; - u16 max_sf_vports; u32 *out; int err; - max_sf_vports = mlx5_sf_max_functions(dev); - /* Device interface is array of 64-bits */ - if (max_sf_vports) - outlen += DIV_ROUND_UP(max_sf_vports, BITS_PER_TYPE(__be64)) * sizeof(__be64); - out = kvzalloc(outlen, GFP_KERNEL); if (!out) return ERR_PTR(-ENOMEM); @@ -1431,7 +1173,7 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw) static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw) { struct mlx5_vport *vport; - int i; + unsigned long i; mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) { memset(&vport->qos, 0, sizeof(vport->qos)); @@ -1441,8 +1183,6 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw) } /* Public E-Switch API */ -#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev)) - int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num, enum mlx5_eswitch_vport_event enabled_events) { @@ -1471,20 +1211,25 @@ void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num) void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs) { - int i; + struct mlx5_vport *vport; + unsigned long i; - mlx5_esw_for_each_vf_vport_num_reverse(esw, i, num_vfs) - mlx5_eswitch_unload_vport(esw, i); + mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) { + if (!vport->enabled) + continue; + mlx5_eswitch_unload_vport(esw, vport->vport); + } } int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs, enum mlx5_eswitch_vport_event enabled_events) { + struct mlx5_vport *vport; + unsigned long i; int err; - int i; - mlx5_esw_for_each_vf_vport_num(esw, i, num_vfs) { - err = mlx5_eswitch_load_vport(esw, i, enabled_events); + mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) { + err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events); if (err) goto vf_err; } @@ -1492,7 +1237,7 @@ int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs, return 0; vf_err: - mlx5_eswitch_unload_vf_vports(esw, i - 1); + mlx5_eswitch_unload_vf_vports(esw, num_vfs); return err; } @@ -1625,6 +1370,47 @@ static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode) blocking_notifier_call_chain(&esw->n_head, 0, &info); } +static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw) +{ + struct mlx5_core_dev *dev = esw->dev; + int total_vports; + int err; + + total_vports = mlx5_eswitch_get_total_vports(dev); + + if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { + err = mlx5_fs_egress_acls_init(dev, total_vports); + if (err) + return err; + } else { + esw_warn(dev, "engress ACL is not supported by FW\n"); + } + + if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { + err = mlx5_fs_ingress_acls_init(dev, total_vports); + if (err) + goto err; + } else { + esw_warn(dev, "ingress ACL is not supported by FW\n"); + } + return 0; + +err: + if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) + mlx5_fs_egress_acls_cleanup(dev); + return err; +} + +static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw) +{ + struct mlx5_core_dev *dev = esw->dev; + + if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) + mlx5_fs_ingress_acls_cleanup(dev); + if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) + mlx5_fs_egress_acls_cleanup(dev); +} + /** * mlx5_eswitch_enable_locked - Enable eswitch * @esw: Pointer to eswitch @@ -1653,14 +1439,12 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs) return -EOPNOTSUPP; } - if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support)) - esw_warn(esw->dev, "ingress ACL is not supported by FW\n"); - - if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support)) - esw_warn(esw->dev, "engress ACL is not supported by FW\n"); - mlx5_eswitch_get_devlink_param(esw); + err = mlx5_esw_acls_ns_init(esw); + if (err) + return err; + mlx5_eswitch_update_num_of_vfs(esw, num_vfs); esw_create_tsar(esw); @@ -1696,6 +1480,7 @@ abort: mlx5_rescan_drivers(esw->dev); esw_destroy_tsar(esw); + mlx5_esw_acls_ns_cleanup(esw); return err; } @@ -1711,10 +1496,10 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { int ret; - if (!ESW_ALLOWED(esw)) + if (!mlx5_esw_allowed(esw)) return 0; - mutex_lock(&esw->mode_lock); + down_write(&esw->mode_lock); if (esw->mode == MLX5_ESWITCH_NONE) { ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs); } else { @@ -1726,7 +1511,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) if (!ret) esw->esw_funcs.num_vfs = num_vfs; } - mutex_unlock(&esw->mode_lock); + up_write(&esw->mode_lock); return ret; } @@ -1764,6 +1549,7 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf) mlx5_rescan_drivers(esw->dev); esw_destroy_tsar(esw); + mlx5_esw_acls_ns_cleanup(esw); if (clear_vf) mlx5_eswitch_clear_vf_vports_info(esw); @@ -1771,33 +1557,170 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf) void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) { - if (!ESW_ALLOWED(esw)) + if (!mlx5_esw_allowed(esw)) return; - mutex_lock(&esw->mode_lock); + down_write(&esw->mode_lock); mlx5_eswitch_disable_locked(esw, clear_vf); esw->esw_funcs.num_vfs = 0; - mutex_unlock(&esw->mode_lock); + up_write(&esw->mode_lock); +} + +static int mlx5_query_hca_cap_host_pf(struct mlx5_core_dev *dev, void *out) +{ + u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01); + u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {}; + + MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); + MLX5_SET(query_hca_cap_in, in, op_mod, opmod); + MLX5_SET(query_hca_cap_in, in, function_id, MLX5_VPORT_PF); + MLX5_SET(query_hca_cap_in, in, other_function, true); + return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out); +} + +int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id) + +{ + int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); + void *query_ctx; + void *hca_caps; + int err; + + if (!mlx5_core_is_ecpf(dev)) { + *max_sfs = 0; + return 0; + } + + query_ctx = kzalloc(query_out_sz, GFP_KERNEL); + if (!query_ctx) + return -ENOMEM; + + err = mlx5_query_hca_cap_host_pf(dev, query_ctx); + if (err) + goto out_free; + + hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); + *max_sfs = MLX5_GET(cmd_hca_cap, hca_caps, max_num_sf); + *sf_base_id = MLX5_GET(cmd_hca_cap, hca_caps, sf_base_id); + +out_free: + kfree(query_ctx); + return err; +} + +static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, struct mlx5_core_dev *dev, + int index, u16 vport_num) +{ + struct mlx5_vport *vport; + int err; + + vport = kzalloc(sizeof(*vport), GFP_KERNEL); + if (!vport) + return -ENOMEM; + + vport->dev = esw->dev; + vport->vport = vport_num; + vport->index = index; + vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; + INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler); + err = xa_insert(&esw->vports, vport_num, vport, GFP_KERNEL); + if (err) + goto insert_err; + + esw->total_vports++; + return 0; + +insert_err: + kfree(vport); + return err; +} + +static void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport) +{ + xa_erase(&esw->vports, vport->vport); + kfree(vport); +} + +static void mlx5_esw_vports_cleanup(struct mlx5_eswitch *esw) +{ + struct mlx5_vport *vport; + unsigned long i; + + mlx5_esw_for_each_vport(esw, i, vport) + mlx5_esw_vport_free(esw, vport); + xa_destroy(&esw->vports); +} + +static int mlx5_esw_vports_init(struct mlx5_eswitch *esw) +{ + struct mlx5_core_dev *dev = esw->dev; + u16 max_host_pf_sfs; + u16 base_sf_num; + int idx = 0; + int err; + int i; + + xa_init(&esw->vports); + + err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_PF); + if (err) + goto err; + if (esw->first_host_vport == MLX5_VPORT_PF) + xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN); + idx++; + + for (i = 0; i < mlx5_core_max_vfs(dev); i++) { + err = mlx5_esw_vport_alloc(esw, dev, idx, idx); + if (err) + goto err; + xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF); + xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN); + idx++; + } + base_sf_num = mlx5_sf_start_function_id(dev); + for (i = 0; i < mlx5_sf_max_functions(dev); i++) { + err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i); + if (err) + goto err; + xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF); + idx++; + } + + err = mlx5_esw_sf_max_hpf_functions(dev, &max_host_pf_sfs, &base_sf_num); + if (err) + goto err; + for (i = 0; i < max_host_pf_sfs; i++) { + err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i); + if (err) + goto err; + xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF); + idx++; + } + + if (mlx5_ecpf_vport_exists(dev)) { + err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_ECPF); + if (err) + goto err; + idx++; + } + err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_UPLINK); + if (err) + goto err; + return 0; + +err: + mlx5_esw_vports_cleanup(esw); + return err; } int mlx5_eswitch_init(struct mlx5_core_dev *dev) { struct mlx5_eswitch *esw; - struct mlx5_vport *vport; - int total_vports; - int err, i; + int err; if (!MLX5_VPORT_MANAGER(dev)) return 0; - total_vports = mlx5_eswitch_get_total_vports(dev); - - esw_info(dev, - "Total vports %d, per vport: max uc(%d) max mc(%d)\n", - total_vports, - MLX5_MAX_UC_PER_VPORT(dev), - MLX5_MAX_MC_PER_VPORT(dev)); - esw = kzalloc(sizeof(*esw), GFP_KERNEL); if (!esw) return -ENOMEM; @@ -1812,18 +1735,13 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) goto abort; } - esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport), - GFP_KERNEL); - if (!esw->vports) { - err = -ENOMEM; + err = mlx5_esw_vports_init(esw); + if (err) goto abort; - } - - esw->total_vports = total_vports; err = esw_offloads_init_reps(esw); if (err) - goto abort; + goto reps_err; mutex_init(&esw->offloads.encap_tbl_lock); hash_init(esw->offloads.encap_tbl); @@ -1834,15 +1752,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ida_init(&esw->offloads.vport_metadata_ida); xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC); mutex_init(&esw->state_lock); - mutex_init(&esw->mode_lock); - - mlx5_esw_for_all_vports(esw, i, vport) { - vport->vport = mlx5_eswitch_index_to_vport_num(esw, i); - vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; - vport->dev = dev; - INIT_WORK(&vport->vport_change_handler, - esw_vport_change_handler); - } + init_rwsem(&esw->mode_lock); esw->enabled_vports = 0; esw->mode = MLX5_ESWITCH_NONE; @@ -1850,12 +1760,19 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) dev->priv.eswitch = esw; BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head); + + esw_info(dev, + "Total vports %d, per vport: max uc(%d) max mc(%d)\n", + esw->total_vports, + MLX5_MAX_UC_PER_VPORT(dev), + MLX5_MAX_MC_PER_VPORT(dev)); return 0; + +reps_err: + mlx5_esw_vports_cleanup(esw); abort: if (esw->work_queue) destroy_workqueue(esw->work_queue); - esw_offloads_cleanup_reps(esw); - kfree(esw->vports); kfree(esw); return err; } @@ -1869,8 +1786,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) esw->dev->priv.eswitch = NULL; destroy_workqueue(esw->work_queue); - esw_offloads_cleanup_reps(esw); - mutex_destroy(&esw->mode_lock); mutex_destroy(&esw->state_lock); WARN_ON(!xa_empty(&esw->offloads.vhca_map)); xa_destroy(&esw->offloads.vhca_map); @@ -1878,7 +1793,8 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr); mutex_destroy(&esw->offloads.encap_tbl_lock); mutex_destroy(&esw->offloads.decap_tbl_lock); - kfree(esw->vports); + esw_offloads_cleanup_reps(esw); + mlx5_esw_vports_cleanup(esw); kfree(esw); } @@ -1937,8 +1853,29 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, return err; } +static bool mlx5_esw_check_port_type(struct mlx5_eswitch *esw, u16 vport_num, xa_mark_t mark) +{ + struct mlx5_vport *vport; + + vport = mlx5_eswitch_get_vport(esw, vport_num); + if (IS_ERR(vport)) + return false; + + return xa_get_mark(&esw->vports, vport_num, mark); +} + +bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num) +{ + return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_VF); +} + +bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num) +{ + return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF); +} + static bool -is_port_function_supported(const struct mlx5_eswitch *esw, u16 vport_num) +is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num) { return vport_num == MLX5_VPORT_PF || mlx5_eswitch_is_vf_vport(esw, vport_num) || @@ -2023,7 +1960,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, int other_vport = 1; int err = 0; - if (!ESW_ALLOWED(esw)) + if (!mlx5_esw_allowed(esw)) return -EPERM; if (IS_ERR(evport)) return PTR_ERR(evport); @@ -2034,6 +1971,10 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, vport = 0; } mutex_lock(&esw->state_lock); + if (esw->mode != MLX5_ESWITCH_LEGACY) { + err = -EOPNOTSUPP; + goto unlock; + } err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state); if (err) { @@ -2067,8 +2008,8 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, ivi->qos = evport->info.qos; ivi->spoofchk = evport->info.spoofchk; ivi->trusted = evport->info.trusted; - ivi->min_tx_rate = evport->info.min_rate; - ivi->max_tx_rate = evport->info.max_rate; + ivi->min_tx_rate = evport->qos.min_rate; + ivi->max_tx_rate = evport->qos.max_rate; mutex_unlock(&esw->state_lock); return 0; @@ -2101,196 +2042,17 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, return err; } -int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, - u16 vport, u16 vlan, u8 qos) -{ - u8 set_flags = 0; - int err; - - if (!ESW_ALLOWED(esw)) - return -EPERM; - - if (vlan || qos) - set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT; - - mutex_lock(&esw->state_lock); - err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags); - mutex_unlock(&esw->state_lock); - - return err; -} - -int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, - u16 vport, bool spoofchk) -{ - struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); - bool pschk; - int err = 0; - - if (!ESW_ALLOWED(esw)) - return -EPERM; - if (IS_ERR(evport)) - return PTR_ERR(evport); - - mutex_lock(&esw->state_lock); - pschk = evport->info.spoofchk; - evport->info.spoofchk = spoofchk; - if (pschk && !is_valid_ether_addr(evport->info.mac)) - mlx5_core_warn(esw->dev, - "Spoofchk in set while MAC is invalid, vport(%d)\n", - evport->vport); - if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) - err = esw_acl_ingress_lgcy_setup(esw, evport); - if (err) - evport->info.spoofchk = pschk; - mutex_unlock(&esw->state_lock); - - return err; -} - -static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw) -{ - if (esw->fdb_table.legacy.vepa_uplink_rule) - mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule); - - if (esw->fdb_table.legacy.vepa_star_rule) - mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule); - - esw->fdb_table.legacy.vepa_uplink_rule = NULL; - esw->fdb_table.legacy.vepa_star_rule = NULL; -} - -static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw, - u8 setting) -{ - struct mlx5_flow_destination dest = {}; - struct mlx5_flow_act flow_act = {}; - struct mlx5_flow_handle *flow_rule; - struct mlx5_flow_spec *spec; - int err = 0; - void *misc; - - if (!setting) { - esw_cleanup_vepa_rules(esw); - return 0; - } - - if (esw->fdb_table.legacy.vepa_uplink_rule) - return 0; - - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) - return -ENOMEM; - - /* Uplink rule forward uplink traffic to FDB */ - misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); - MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK); - - misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); - MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); - - spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; - dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = esw->fdb_table.legacy.fdb; - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec, - &flow_act, &dest, 1); - if (IS_ERR(flow_rule)) { - err = PTR_ERR(flow_rule); - goto out; - } else { - esw->fdb_table.legacy.vepa_uplink_rule = flow_rule; - } - - /* Star rule to forward all traffic to uplink vport */ - memset(&dest, 0, sizeof(dest)); - dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest.vport.num = MLX5_VPORT_UPLINK; - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL, - &flow_act, &dest, 1); - if (IS_ERR(flow_rule)) { - err = PTR_ERR(flow_rule); - goto out; - } else { - esw->fdb_table.legacy.vepa_star_rule = flow_rule; - } - -out: - kvfree(spec); - if (err) - esw_cleanup_vepa_rules(esw); - return err; -} - -int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting) -{ - int err = 0; - - if (!esw) - return -EOPNOTSUPP; - - if (!ESW_ALLOWED(esw)) - return -EPERM; - - mutex_lock(&esw->state_lock); - if (esw->mode != MLX5_ESWITCH_LEGACY) { - err = -EOPNOTSUPP; - goto out; - } - - err = _mlx5_eswitch_set_vepa_locked(esw, setting); - -out: - mutex_unlock(&esw->state_lock); - return err; -} - -int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting) -{ - if (!esw) - return -EOPNOTSUPP; - - if (!ESW_ALLOWED(esw)) - return -EPERM; - - if (esw->mode != MLX5_ESWITCH_LEGACY) - return -EOPNOTSUPP; - - *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0; - return 0; -} - -int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, - u16 vport, bool setting) -{ - struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); - - if (!ESW_ALLOWED(esw)) - return -EPERM; - if (IS_ERR(evport)) - return PTR_ERR(evport); - - mutex_lock(&esw->state_lock); - evport->info.trusted = setting; - if (evport->enabled) - esw_vport_change_handle_locked(evport); - mutex_unlock(&esw->state_lock); - - return 0; -} - static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw) { u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); struct mlx5_vport *evport; u32 max_guarantee = 0; - int i; + unsigned long i; - mlx5_esw_for_all_vports(esw, i, evport) { - if (!evport->enabled || evport->info.min_rate < max_guarantee) + mlx5_esw_for_each_vport(esw, i, evport) { + if (!evport->enabled || evport->qos.min_rate < max_guarantee) continue; - max_guarantee = evport->info.min_rate; + max_guarantee = evport->qos.min_rate; } if (max_guarantee) @@ -2305,15 +2067,15 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw) struct mlx5_vport *evport; u32 vport_max_rate; u32 vport_min_rate; + unsigned long i; u32 bw_share; int err; - int i; - mlx5_esw_for_all_vports(esw, i, evport) { + mlx5_esw_for_each_vport(esw, i, evport) { if (!evport->enabled) continue; - vport_min_rate = evport->info.min_rate; - vport_max_rate = evport->info.max_rate; + vport_min_rate = evport->qos.min_rate; + vport_max_rate = evport->qos.max_rate; bw_share = 0; if (divider) @@ -2345,7 +2107,7 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, bool max_rate_supported; int err = 0; - if (!ESW_ALLOWED(esw)) + if (!mlx5_esw_allowed(esw)) return -EPERM; if (IS_ERR(evport)) return PTR_ERR(evport); @@ -2360,68 +2122,24 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, mutex_lock(&esw->state_lock); - if (min_rate == evport->info.min_rate) + if (min_rate == evport->qos.min_rate) goto set_max_rate; - previous_min_rate = evport->info.min_rate; - evport->info.min_rate = min_rate; + previous_min_rate = evport->qos.min_rate; + evport->qos.min_rate = min_rate; err = normalize_vports_min_rate(esw); if (err) { - evport->info.min_rate = previous_min_rate; + evport->qos.min_rate = previous_min_rate; goto unlock; } set_max_rate: - if (max_rate == evport->info.max_rate) + if (max_rate == evport->qos.max_rate) goto unlock; err = esw_vport_qos_config(esw, evport, max_rate, evport->qos.bw_share); if (!err) - evport->info.max_rate = max_rate; - -unlock: - mutex_unlock(&esw->state_lock); - return err; -} - -static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev, - struct mlx5_vport *vport, - struct mlx5_vport_drop_stats *stats) -{ - struct mlx5_eswitch *esw = dev->priv.eswitch; - u64 rx_discard_vport_down, tx_discard_vport_down; - u64 bytes = 0; - int err = 0; - - if (esw->mode != MLX5_ESWITCH_LEGACY) - return 0; - - mutex_lock(&esw->state_lock); - if (!vport->enabled) - goto unlock; - - if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter)) - mlx5_fc_query(dev, vport->egress.legacy.drop_counter, - &stats->rx_dropped, &bytes); - - if (vport->ingress.legacy.drop_counter) - mlx5_fc_query(dev, vport->ingress.legacy.drop_counter, - &stats->tx_dropped, &bytes); - - if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) && - !MLX5_CAP_GEN(dev, transmit_discard_vport_down)) - goto unlock; - - err = mlx5_query_vport_down_stats(dev, vport->vport, 1, - &rx_discard_vport_down, - &tx_discard_vport_down); - if (err) - goto unlock; - - if (MLX5_CAP_GEN(dev, receive_discard_vport_down)) - stats->rx_dropped += rx_discard_vport_down; - if (MLX5_CAP_GEN(dev, transmit_discard_vport_down)) - stats->tx_dropped += tx_discard_vport_down; + evport->qos.max_rate = max_rate; unlock: mutex_unlock(&esw->state_lock); @@ -2495,7 +2213,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, vf_stats->broadcast = MLX5_GET_CTR(out, received_eth_broadcast.packets); - err = mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats); + err = mlx5_esw_query_vport_drop_stats(esw->dev, vport, &stats); if (err) goto free_out; vf_stats->rx_dropped = stats.rx_dropped; @@ -2510,7 +2228,7 @@ u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev) { struct mlx5_eswitch *esw = dev->priv.eswitch; - return ESW_ALLOWED(esw) ? esw->mode : MLX5_ESWITCH_NONE; + return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_NONE; } EXPORT_SYMBOL_GPL(mlx5_eswitch_mode); @@ -2520,7 +2238,7 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev) struct mlx5_eswitch *esw; esw = dev->priv.eswitch; - return ESW_ALLOWED(esw) ? esw->offloads.encap : + return mlx5_esw_allowed(esw) ? esw->offloads.encap : DEVLINK_ESWITCH_ENCAP_MODE_NONE; } EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode); @@ -2552,3 +2270,110 @@ void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifie { blocking_notifier_chain_unregister(&esw->n_head, nb); } + +/** + * mlx5_esw_hold() - Try to take a read lock on esw mode lock. + * @mdev: mlx5 core device. + * + * Should be called by esw resources callers. + * + * Return: true on success or false. + */ +bool mlx5_esw_hold(struct mlx5_core_dev *mdev) +{ + struct mlx5_eswitch *esw = mdev->priv.eswitch; + + /* e.g. VF doesn't have eswitch so nothing to do */ + if (!mlx5_esw_allowed(esw)) + return true; + + if (down_read_trylock(&esw->mode_lock) != 0) + return true; + + return false; +} + +/** + * mlx5_esw_release() - Release a read lock on esw mode lock. + * @mdev: mlx5 core device. + */ +void mlx5_esw_release(struct mlx5_core_dev *mdev) +{ + struct mlx5_eswitch *esw = mdev->priv.eswitch; + + if (mlx5_esw_allowed(esw)) + up_read(&esw->mode_lock); +} + +/** + * mlx5_esw_get() - Increase esw user count. + * @mdev: mlx5 core device. + */ +void mlx5_esw_get(struct mlx5_core_dev *mdev) +{ + struct mlx5_eswitch *esw = mdev->priv.eswitch; + + if (mlx5_esw_allowed(esw)) + atomic64_inc(&esw->user_count); +} + +/** + * mlx5_esw_put() - Decrease esw user count. + * @mdev: mlx5 core device. + */ +void mlx5_esw_put(struct mlx5_core_dev *mdev) +{ + struct mlx5_eswitch *esw = mdev->priv.eswitch; + + if (mlx5_esw_allowed(esw)) + atomic64_dec_if_positive(&esw->user_count); +} + +/** + * mlx5_esw_try_lock() - Take a write lock on esw mode lock. + * @esw: eswitch device. + * + * Should be called by esw mode change routine. + * + * Return: + * * 0 - esw mode if successfully locked and refcount is 0. + * * -EBUSY - refcount is not 0. + * * -EINVAL - In the middle of switching mode or lock is already held. + */ +int mlx5_esw_try_lock(struct mlx5_eswitch *esw) +{ + if (down_write_trylock(&esw->mode_lock) == 0) + return -EINVAL; + + if (atomic64_read(&esw->user_count) > 0) { + up_write(&esw->mode_lock); + return -EBUSY; + } + + return esw->mode; +} + +/** + * mlx5_esw_unlock() - Release write lock on esw mode lock + * @esw: eswitch device. + */ +void mlx5_esw_unlock(struct mlx5_eswitch *esw) +{ + up_write(&esw->mode_lock); +} + +/** + * mlx5_eswitch_get_total_vports - Get total vports of the eswitch + * + * @dev: Pointer to core device + * + * mlx5_eswitch_get_total_vports returns total number of eswitch vports. + */ +u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev) +{ + struct mlx5_eswitch *esw; + + esw = dev->priv.eswitch; + return mlx5_esw_allowed(esw) ? esw->total_vports : 0; +} +EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index fdf5c8c05c1b..64ccb2bc0b58 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -46,6 +46,24 @@ #include "lib/fs_chains.h" #include "sf/sf.h" #include "en/tc_ct.h" +#include "esw/sample.h" + +enum mlx5_mapped_obj_type { + MLX5_MAPPED_OBJ_CHAIN, + MLX5_MAPPED_OBJ_SAMPLE, +}; + +struct mlx5_mapped_obj { + enum mlx5_mapped_obj_type type; + union { + u32 chain; + struct { + u32 group_id; + u32 rate; + u32 trunc_size; + } sample; + }; +}; #ifdef CONFIG_MLX5_ESWITCH @@ -118,13 +136,11 @@ struct mlx5_vport_drop_stats { struct mlx5_vport_info { u8 mac[ETH_ALEN]; u16 vlan; - u8 qos; u64 node_guid; int link_state; - u32 min_rate; - u32 max_rate; - bool spoofchk; - bool trusted; + u8 qos; + u8 spoofchk: 1; + u8 trusted: 1; }; /* Vport context events */ @@ -136,7 +152,6 @@ enum mlx5_eswitch_vport_event { struct mlx5_vport { struct mlx5_core_dev *dev; - int vport; struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE]; struct mlx5_flow_handle *promisc_rule; @@ -154,10 +169,14 @@ struct mlx5_vport { bool enabled; u32 esw_tsar_ix; u32 bw_share; + u32 min_rate; + u32 max_rate; } qos; + u16 vport; bool enabled; enum mlx5_eswitch_vport_event enabled_events; + int index; struct devlink_port *dl_port; }; @@ -206,10 +225,11 @@ struct mlx5_esw_offload { struct mlx5_flow_table *ft_offloads_restore; struct mlx5_flow_group *restore_group; struct mlx5_modify_hdr *restore_copy_hdr_id; + struct mapping_ctx *reg_c0_obj_pool; struct mlx5_flow_table *ft_offloads; struct mlx5_flow_group *vport_rx_group; - struct mlx5_eswitch_rep *vport_reps; + struct xarray vport_reps; struct list_head peer_flows; struct mutex peer_mutex; struct mutex encap_tbl_lock; /* protects encap_tbl */ @@ -259,7 +279,7 @@ struct mlx5_eswitch { struct esw_mc_addr mc_promisc; /* end of legacy */ struct workqueue_struct *work_queue; - struct mlx5_vport *vports; + struct xarray vports; u32 flags; int total_vports; int enabled_vports; @@ -271,7 +291,8 @@ struct mlx5_eswitch { /* Protects eswitch mode change that occurs via one or more * user commands, i.e. sriov state change, devlink commands. */ - struct mutex mode_lock; + struct rw_semaphore mode_lock; + atomic64_t user_count; struct { bool enabled; @@ -294,6 +315,8 @@ int esw_offloads_enable(struct mlx5_eswitch *esw); void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw); int esw_offloads_init_reps(struct mlx5_eswitch *esw); +bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw); +int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable); u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw); void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata); @@ -356,6 +379,9 @@ void mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw, struct mlx5_termtbl_handle *tt); +void +mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec); + struct mlx5_flow_handle * mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec, @@ -403,6 +429,7 @@ enum { MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1), MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2), MLX5_ESW_ATTR_FLAG_SRC_REWRITE = BIT(3), + MLX5_ESW_ATTR_FLAG_SAMPLE = BIT(4), }; struct mlx5_esw_flow_attr { @@ -427,6 +454,7 @@ struct mlx5_esw_flow_attr { } dests[MLX5_MAX_FLOW_FWD_VPORTS]; struct mlx5_rx_tun_attr *rx_tun_attr; struct mlx5_pkt_reformat *decap_pkt_reformat; + struct mlx5_sample_attr *sample; }; int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, @@ -494,6 +522,11 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev); #define esw_debug(dev, format, ...) \ mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) +static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw) +{ + return esw && MLX5_ESWITCH_MANAGER(esw->dev); +} + /* The returned number is valid only when the dev is eswitch manager. */ static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev) { @@ -513,94 +546,11 @@ static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev) MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF; } -static inline int mlx5_esw_sf_start_idx(const struct mlx5_eswitch *esw) -{ - /* PF and VF vports indices start from 0 to max_vfs */ - return MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev); -} - -static inline int mlx5_esw_sf_end_idx(const struct mlx5_eswitch *esw) -{ - return mlx5_esw_sf_start_idx(esw) + mlx5_sf_max_functions(esw->dev); -} - -static inline int -mlx5_esw_sf_vport_num_to_index(const struct mlx5_eswitch *esw, u16 vport_num) -{ - return vport_num - mlx5_sf_start_function_id(esw->dev) + - MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev); -} - -static inline u16 -mlx5_esw_sf_vport_index_to_num(const struct mlx5_eswitch *esw, int idx) -{ - return mlx5_sf_start_function_id(esw->dev) + idx - - (MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev)); -} - -static inline bool -mlx5_esw_is_sf_vport(const struct mlx5_eswitch *esw, u16 vport_num) -{ - return mlx5_sf_supported(esw->dev) && - vport_num >= mlx5_sf_start_function_id(esw->dev) && - (vport_num < (mlx5_sf_start_function_id(esw->dev) + - mlx5_sf_max_functions(esw->dev))); -} - static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev) { return mlx5_core_is_ecpf_esw_manager(dev); } -static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw) -{ - /* Uplink always locate at the last element of the array.*/ - return esw->total_vports - 1; -} - -static inline int mlx5_eswitch_ecpf_idx(struct mlx5_eswitch *esw) -{ - return esw->total_vports - 2; -} - -static inline int mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch *esw, - u16 vport_num) -{ - if (vport_num == MLX5_VPORT_ECPF) { - if (!mlx5_ecpf_vport_exists(esw->dev)) - esw_warn(esw->dev, "ECPF vport doesn't exist!\n"); - return mlx5_eswitch_ecpf_idx(esw); - } - - if (vport_num == MLX5_VPORT_UPLINK) - return mlx5_eswitch_uplink_idx(esw); - - if (mlx5_esw_is_sf_vport(esw, vport_num)) - return mlx5_esw_sf_vport_num_to_index(esw, vport_num); - - /* PF and VF vports start from 0 to max_vfs */ - return vport_num; -} - -static inline u16 mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw, - int index) -{ - if (index == mlx5_eswitch_ecpf_idx(esw) && - mlx5_ecpf_vport_exists(esw->dev)) - return MLX5_VPORT_ECPF; - - if (index == mlx5_eswitch_uplink_idx(esw)) - return MLX5_VPORT_UPLINK; - - /* SF vports indices are after VFs and before ECPF */ - if (mlx5_sf_supported(esw->dev) && - index > mlx5_core_max_vfs(esw->dev)) - return mlx5_esw_sf_vport_index_to_num(esw, index); - - /* PF and VF vports start from 0 to max_vfs */ - return index; -} - static inline unsigned int mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev, u16 vport_num) @@ -617,82 +567,42 @@ mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index) /* TODO: This mlx5e_tc function shouldn't be called by eswitch */ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); -/* The vport getter/iterator are only valid after esw->total_vports - * and vport->vport are initialized in mlx5_eswitch_init. +/* Each mark identifies eswitch vport type. + * MLX5_ESW_VPT_HOST_FN is used to identify both PF and VF ports using + * a single mark. + * MLX5_ESW_VPT_VF identifies a SRIOV VF vport. + * MLX5_ESW_VPT_SF identifies SF vport. */ -#define mlx5_esw_for_all_vports(esw, i, vport) \ - for ((i) = MLX5_VPORT_PF; \ - (vport) = &(esw)->vports[i], \ - (i) < (esw)->total_vports; (i)++) - -#define mlx5_esw_for_all_vports_reverse(esw, i, vport) \ - for ((i) = (esw)->total_vports - 1; \ - (vport) = &(esw)->vports[i], \ - (i) >= MLX5_VPORT_PF; (i)--) - -#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \ - for ((i) = MLX5_VPORT_FIRST_VF; \ - (vport) = &(esw)->vports[(i)], \ - (i) <= (nvfs); (i)++) - -#define mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, nvfs) \ - for ((i) = (nvfs); \ - (vport) = &(esw)->vports[(i)], \ - (i) >= MLX5_VPORT_FIRST_VF; (i)--) - -/* The rep getter/iterator are only valid after esw->total_vports - * and vport->vport are initialized in mlx5_eswitch_init. +#define MLX5_ESW_VPT_HOST_FN XA_MARK_0 +#define MLX5_ESW_VPT_VF XA_MARK_1 +#define MLX5_ESW_VPT_SF XA_MARK_2 + +/* The vport iterator is valid only after vport are initialized in mlx5_eswitch_init. + * Borrowed the idea from xa_for_each_marked() but with support for desired last element. */ -#define mlx5_esw_for_all_reps(esw, i, rep) \ - for ((i) = MLX5_VPORT_PF; \ - (rep) = &(esw)->offloads.vport_reps[i], \ - (i) < (esw)->total_vports; (i)++) - -#define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \ - for ((i) = MLX5_VPORT_FIRST_VF; \ - (rep) = &(esw)->offloads.vport_reps[i], \ - (i) <= (nvfs); (i)++) - -#define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \ - for ((i) = (nvfs); \ - (rep) = &(esw)->offloads.vport_reps[i], \ - (i) >= MLX5_VPORT_FIRST_VF; (i)--) - -#define mlx5_esw_for_each_vf_vport_num(esw, vport, nvfs) \ - for ((vport) = MLX5_VPORT_FIRST_VF; (vport) <= (nvfs); (vport)++) - -#define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \ - for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--) - -/* Includes host PF (vport 0) if it's not esw manager. */ -#define mlx5_esw_for_each_host_func_rep(esw, i, rep, nvfs) \ - for ((i) = (esw)->first_host_vport; \ - (rep) = &(esw)->offloads.vport_reps[i], \ - (i) <= (nvfs); (i)++) - -#define mlx5_esw_for_each_host_func_rep_reverse(esw, i, rep, nvfs) \ - for ((i) = (nvfs); \ - (rep) = &(esw)->offloads.vport_reps[i], \ - (i) >= (esw)->first_host_vport; (i)--) - -#define mlx5_esw_for_each_host_func_vport(esw, vport, nvfs) \ - for ((vport) = (esw)->first_host_vport; \ - (vport) <= (nvfs); (vport)++) - -#define mlx5_esw_for_each_host_func_vport_reverse(esw, vport, nvfs) \ - for ((vport) = (nvfs); \ - (vport) >= (esw)->first_host_vport; (vport)--) - -#define mlx5_esw_for_each_sf_rep(esw, i, rep) \ - for ((i) = mlx5_esw_sf_start_idx(esw); \ - (rep) = &(esw)->offloads.vport_reps[(i)], \ - (i) < mlx5_esw_sf_end_idx(esw); (i++)) + +#define mlx5_esw_for_each_vport(esw, index, vport) \ + xa_for_each(&((esw)->vports), index, vport) + +#define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter) \ + for (index = 0, entry = xa_find(xa, &index, last, filter); \ + entry; entry = xa_find_after(xa, &index, last, filter)) + +#define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter) \ + mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter) + +#define mlx5_esw_for_each_vf_vport(esw, index, vport, last) \ + mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF) + +#define mlx5_esw_for_each_host_func_vport(esw, index, vport, last) \ + mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN) struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink); struct mlx5_vport *__must_check mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num); -bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num); +bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num); +bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num); int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); @@ -712,13 +622,26 @@ void esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, struct mlx5_vport *vport); -int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw); -void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw); +struct esw_vport_tbl_namespace { + int max_fte; + int max_num_groups; + u32 flags; +}; + +struct mlx5_vport_tbl_attr { + u16 chain; + u16 prio; + u16 vport; + const struct esw_vport_tbl_namespace *vport_ns; +}; + +struct mlx5_flow_table * +mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr); +void +mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr); struct mlx5_flow_handle * esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag); -u32 -esw_get_max_restore_tag(struct mlx5_eswitch *esw); int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num); void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num); @@ -739,12 +662,13 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vpo struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num); int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port, - u16 vport_num, u32 sfnum); + u16 vport_num, u32 controller, u32 sfnum); void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num); int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port, - u16 vport_num, u32 sfnum); + u16 vport_num, u32 controller, u32 sfnum); void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num); +int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id); int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num); void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num); @@ -761,6 +685,18 @@ struct mlx5_esw_event_info { int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n); void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n); + +bool mlx5_esw_hold(struct mlx5_core_dev *dev); +void mlx5_esw_release(struct mlx5_core_dev *dev); +void mlx5_esw_get(struct mlx5_core_dev *dev); +void mlx5_esw_put(struct mlx5_core_dev *dev); +int mlx5_esw_try_lock(struct mlx5_eswitch *esw); +void mlx5_esw_unlock(struct mlx5_eswitch *esw); + +void esw_vport_change_handle_locked(struct mlx5_vport *vport); + +bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller); + #else /* CONFIG_MLX5_ESWITCH */ /* eswitch API stubs */ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } @@ -781,6 +717,13 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) { return ERR_PTR(-EOPNOTSUPP); } + +static inline unsigned int +mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev, + u16 vport_num) +{ + return vport_num; +} #endif /* CONFIG_MLX5_ESWITCH */ #endif /* __MLX5_ESWITCH_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index d4a2f8d1ee9f..db1e74280e57 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -40,7 +40,6 @@ #include "eswitch.h" #include "esw/indir_table.h" #include "esw/acl/ofld.h" -#include "esw/indir_table.h" #include "rdma.h" #include "en.h" #include "fs_core.h" @@ -48,6 +47,17 @@ #include "lib/eq.h" #include "lib/fs_chains.h" #include "en_tc.h" +#include "en/mapping.h" + +#define mlx5_esw_for_each_rep(esw, i, rep) \ + xa_for_each(&((esw)->offloads.vport_reps), i, rep) + +#define mlx5_esw_for_each_sf_rep(esw, i, rep) \ + xa_for_each_marked(&((esw)->offloads.vport_reps), i, rep, MLX5_ESW_VPT_SF) + +#define mlx5_esw_for_each_vf_rep(esw, index, rep) \ + mlx5_esw_for_each_entry_marked(&((esw)->offloads.vport_reps), index, \ + rep, (esw)->esw_funcs.num_vfs, MLX5_ESW_VPT_VF) /* There are two match-all miss flows, one for unicast dst mac and * one for multicast. @@ -55,192 +65,19 @@ #define MLX5_ESW_MISS_FLOWS (2) #define UPLINK_REP_INDEX 0 -/* Per vport tables */ - -#define MLX5_ESW_VPORT_TABLE_SIZE 128 - -/* This struct is used as a key to the hash table and we need it to be packed - * so hash result is consistent - */ -struct mlx5_vport_key { - u32 chain; - u16 prio; - u16 vport; - u16 vhca_id; -} __packed; - -struct mlx5_vport_tbl_attr { - u16 chain; - u16 prio; - u16 vport; -}; - -struct mlx5_vport_table { - struct hlist_node hlist; - struct mlx5_flow_table *fdb; - u32 num_rules; - struct mlx5_vport_key key; -}; - +#define MLX5_ESW_VPORT_TBL_SIZE 128 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4 -static struct mlx5_flow_table * -esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns) -{ - struct mlx5_flow_table_attr ft_attr = {}; - struct mlx5_flow_table *fdb; - - ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS; - ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE; - ft_attr.prio = FDB_PER_VPORT; - fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); - if (IS_ERR(fdb)) { - esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n", - PTR_ERR(fdb)); - } - - return fdb; -} - -static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw, - struct mlx5_vport_tbl_attr *attr, - struct mlx5_vport_key *key) -{ - key->vport = attr->vport; - key->chain = attr->chain; - key->prio = attr->prio; - key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id); - return jhash(key, sizeof(*key), 0); -} - -/* caller must hold vports.lock */ -static struct mlx5_vport_table * -esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key) -{ - struct mlx5_vport_table *e; - - hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key) - if (!memcmp(&e->key, skey, sizeof(*skey))) - return e; - - return NULL; -} - -static void -esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr) -{ - struct mlx5_vport_table *e; - struct mlx5_vport_key key; - u32 hkey; - - mutex_lock(&esw->fdb_table.offloads.vports.lock); - hkey = flow_attr_to_vport_key(esw, attr, &key); - e = esw_vport_tbl_lookup(esw, &key, hkey); - if (!e || --e->num_rules) - goto out; - - hash_del(&e->hlist); - mlx5_destroy_flow_table(e->fdb); - kfree(e); -out: - mutex_unlock(&esw->fdb_table.offloads.vports.lock); -} - -static struct mlx5_flow_table * -esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr) -{ - struct mlx5_core_dev *dev = esw->dev; - struct mlx5_flow_namespace *ns; - struct mlx5_flow_table *fdb; - struct mlx5_vport_table *e; - struct mlx5_vport_key skey; - u32 hkey; - - mutex_lock(&esw->fdb_table.offloads.vports.lock); - hkey = flow_attr_to_vport_key(esw, attr, &skey); - e = esw_vport_tbl_lookup(esw, &skey, hkey); - if (e) { - e->num_rules++; - goto out; - } - - e = kzalloc(sizeof(*e), GFP_KERNEL); - if (!e) { - fdb = ERR_PTR(-ENOMEM); - goto err_alloc; - } - - ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); - if (!ns) { - esw_warn(dev, "Failed to get FDB namespace\n"); - fdb = ERR_PTR(-ENOENT); - goto err_ns; - } - - fdb = esw_vport_tbl_create(esw, ns); - if (IS_ERR(fdb)) - goto err_ns; - - e->fdb = fdb; - e->num_rules = 1; - e->key = skey; - hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey); -out: - mutex_unlock(&esw->fdb_table.offloads.vports.lock); - return e->fdb; - -err_ns: - kfree(e); -err_alloc: - mutex_unlock(&esw->fdb_table.offloads.vports.lock); - return fdb; -} - -int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw) -{ - struct mlx5_vport_tbl_attr attr; - struct mlx5_flow_table *fdb; - struct mlx5_vport *vport; - int i; - - attr.chain = 0; - attr.prio = 1; - mlx5_esw_for_all_vports(esw, i, vport) { - attr.vport = vport->vport; - fdb = esw_vport_tbl_get(esw, &attr); - if (IS_ERR(fdb)) - goto out; - } - return 0; - -out: - mlx5_esw_vport_tbl_put(esw); - return PTR_ERR(fdb); -} - -void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw) -{ - struct mlx5_vport_tbl_attr attr; - struct mlx5_vport *vport; - int i; - - attr.chain = 0; - attr.prio = 1; - mlx5_esw_for_all_vports(esw, i, vport) { - attr.vport = vport->vport; - esw_vport_tbl_put(esw, &attr); - } -} - -/* End: Per vport tables */ +static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = { + .max_fte = MLX5_ESW_VPORT_TBL_SIZE, + .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS, + .flags = 0, +}; static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, u16 vport_num) { - int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num); - - WARN_ON(idx > esw->total_vports - 1); - return &esw->offloads.vport_reps[idx]; + return xa_load(&esw->offloads.vport_reps, vport_num); } static void @@ -256,6 +93,26 @@ mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw, MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; } +/* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits + * are not needed as well in the following process. So clear them all for simplicity. + */ +void +mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec) +{ + if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { + void *misc2; + + misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); + MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0); + + misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); + MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0); + + if (!memchr_inv(misc2, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc2))) + spec->match_criteria_enable &= ~MLX5_MATCH_MISC_PARAMETERS_2; + } +} + static void mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec, @@ -327,6 +184,19 @@ esw_cleanup_decap_indir(struct mlx5_eswitch *esw, } static int +esw_setup_sampler_dest(struct mlx5_flow_destination *dest, + struct mlx5_flow_act *flow_act, + struct mlx5_esw_flow_attr *esw_attr, + int i) +{ + flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER; + dest[i].sampler_id = esw_attr->sample->sampler_id; + + return 0; +} + +static int esw_setup_ft_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, struct mlx5_eswitch *esw, @@ -561,7 +431,10 @@ esw_setup_dests(struct mlx5_flow_destination *dest, esw_src_port_rewrite_supported(esw)) attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE; - if (attr->dest_ft) { + if (attr->flags & MLX5_ESW_ATTR_FLAG_SAMPLE) { + esw_setup_sampler_dest(dest, flow_act, esw_attr, *i); + (*i)++; + } else if (attr->dest_ft) { esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i); (*i)++; } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) { @@ -664,12 +537,16 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) flow_act.modify_hdr = attr->modify_hdr; - if (split) { + /* esw_attr->sample is allocated only when there is a sample action */ + if (esw_attr->sample && esw_attr->sample->sample_default_tbl) { + fdb = esw_attr->sample->sample_default_tbl; + } else if (split) { fwd_attr.chain = attr->chain; fwd_attr.prio = attr->prio; fwd_attr.vport = esw_attr->in_rep->vport; + fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; - fdb = esw_vport_tbl_get(esw, &fwd_attr); + fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr); } else { if (attr->chain || attr->prio) fdb = mlx5_chains_get_table(chains, attr->chain, @@ -701,7 +578,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, err_add_rule: if (split) - esw_vport_tbl_put(esw, &fwd_attr); + mlx5_esw_vporttbl_put(esw, &fwd_attr); else if (attr->chain || attr->prio) mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); err_esw_get: @@ -734,7 +611,8 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, fwd_attr.chain = attr->chain; fwd_attr.prio = attr->prio; fwd_attr.vport = esw_attr->in_rep->vport; - fwd_fdb = esw_vport_tbl_get(esw, &fwd_attr); + fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; + fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr); if (IS_ERR(fwd_fdb)) { rule = ERR_CAST(fwd_fdb); goto err_get_fwd; @@ -779,7 +657,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, return rule; err_chain_src_rewrite: esw_put_dest_tables_loop(esw, attr, 0, i); - esw_vport_tbl_put(esw, &fwd_attr); + mlx5_esw_vporttbl_put(esw, &fwd_attr); err_get_fwd: mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); err_get_fast: @@ -814,15 +692,16 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, fwd_attr.chain = attr->chain; fwd_attr.prio = attr->prio; fwd_attr.vport = esw_attr->in_rep->vport; + fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; } if (fwd_rule) { - esw_vport_tbl_put(esw, &fwd_attr); + mlx5_esw_vporttbl_put(esw, &fwd_attr); mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count); } else { if (split) - esw_vport_tbl_put(esw, &fwd_attr); + mlx5_esw_vporttbl_put(esw, &fwd_attr); else if (attr->chain || attr->prio) mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); esw_cleanup_dests(esw, attr); @@ -848,10 +727,11 @@ mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) { struct mlx5_eswitch_rep *rep; - int i, err = 0; + unsigned long i; + int err = 0; esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none"); - mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) { + mlx5_esw_for_each_host_func_vport(esw, i, rep, esw->esw_funcs.num_vfs) { if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED) continue; @@ -1043,7 +923,8 @@ out: } struct mlx5_flow_handle * -mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport, +mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, + struct mlx5_eswitch_rep *rep, u32 sqn) { struct mlx5_flow_act flow_act = {0}; @@ -1061,21 +942,30 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport, misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn); /* source vport is the esw manager */ - MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport); + MLX5_SET(fte_match_set_misc, misc, source_port, rep->esw->manager_vport); + if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch)) + MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, + MLX5_CAP_GEN(rep->esw->dev, vhca_id)); misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn); MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch)) + MLX5_SET_TO_ONES(fte_match_set_misc, misc, + source_eswitch_owner_vhca_id); spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest.vport.num = vport; + dest.vport.num = rep->vport; + dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id); + dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, + flow_rule = mlx5_add_flow_rules(on_esw->fdb_table.offloads.slow_fdb, spec, &flow_act, &dest, 1); if (IS_ERR(flow_rule)) - esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule)); + esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n", + PTR_ERR(flow_rule)); out: kvfree(spec); return flow_rule; @@ -1090,13 +980,13 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule) static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw) { struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules; - int i = 0, num_vfs = esw->esw_funcs.num_vfs, vport_num; + int i = 0, num_vfs = esw->esw_funcs.num_vfs; if (!num_vfs || !flows) return; - mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs) - mlx5_del_flow_rules(flows[i++]); + for (i = 0; i < num_vfs; i++) + mlx5_del_flow_rules(flows[i]); kvfree(flows); } @@ -1104,12 +994,15 @@ static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw) static int mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw) { - int num_vfs, vport_num, rule_idx = 0, err = 0; struct mlx5_flow_destination dest = {}; struct mlx5_flow_act flow_act = {0}; + int num_vfs, rule_idx = 0, err = 0; struct mlx5_flow_handle *flow_rule; struct mlx5_flow_handle **flows; struct mlx5_flow_spec *spec; + struct mlx5_vport *vport; + unsigned long i; + u16 vport_num; num_vfs = esw->esw_funcs.num_vfs; flows = kvzalloc(num_vfs * sizeof(*flows), GFP_KERNEL); @@ -1133,7 +1026,8 @@ mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw) dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs) { + mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) { + vport_num = vport->vport; MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num)); dest.vport.num = vport_num; @@ -1275,12 +1169,14 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, struct mlx5_flow_destination dest = {}; struct mlx5_flow_act flow_act = {0}; struct mlx5_flow_handle **flows; - struct mlx5_flow_handle *flow; - struct mlx5_flow_spec *spec; /* total vports is the same for both e-switches */ int nvports = esw->total_vports; + struct mlx5_flow_handle *flow; + struct mlx5_flow_spec *spec; + struct mlx5_vport *vport; + unsigned long i; void *misc; - int err, i; + int err; spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) @@ -1299,6 +1195,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, misc_parameters); if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch, spec, MLX5_VPORT_PF); @@ -1308,10 +1205,11 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, err = PTR_ERR(flow); goto add_pf_flow_err; } - flows[MLX5_VPORT_PF] = flow; + flows[vport->index] = flow; } if (mlx5_ecpf_vport_exists(esw->dev)) { + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF); flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, &flow_act, &dest, 1); @@ -1319,13 +1217,13 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, err = PTR_ERR(flow); goto add_ecpf_flow_err; } - flows[mlx5_eswitch_ecpf_idx(esw)] = flow; + flows[vport->index] = flow; } - mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) { + mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) { esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch, - spec, i); + spec, vport->vport); flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, &flow_act, &dest, 1); @@ -1333,7 +1231,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, err = PTR_ERR(flow); goto add_vf_flow_err; } - flows[i] = flow; + flows[vport->index] = flow; } esw->fdb_table.offloads.peer_miss_rules = flows; @@ -1342,15 +1240,20 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, return 0; add_vf_flow_err: - nvports = --i; - mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports) - mlx5_del_flow_rules(flows[i]); - - if (mlx5_ecpf_vport_exists(esw->dev)) - mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]); + mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) { + if (!flows[vport->index]) + continue; + mlx5_del_flow_rules(flows[vport->index]); + } + if (mlx5_ecpf_vport_exists(esw->dev)) { + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); + mlx5_del_flow_rules(flows[vport->index]); + } add_ecpf_flow_err: - if (mlx5_core_is_ecpf_esw_manager(esw->dev)) - mlx5_del_flow_rules(flows[MLX5_VPORT_PF]); + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); + mlx5_del_flow_rules(flows[vport->index]); + } add_pf_flow_err: esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err); kvfree(flows); @@ -1362,20 +1265,23 @@ alloc_flows_err: static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw) { struct mlx5_flow_handle **flows; - int i; + struct mlx5_vport *vport; + unsigned long i; flows = esw->fdb_table.offloads.peer_miss_rules; - mlx5_esw_for_each_vf_vport_num_reverse(esw, i, - mlx5_core_max_vfs(esw->dev)) - mlx5_del_flow_rules(flows[i]); + mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) + mlx5_del_flow_rules(flows[vport->index]); - if (mlx5_ecpf_vport_exists(esw->dev)) - mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]); - - if (mlx5_core_is_ecpf_esw_manager(esw->dev)) - mlx5_del_flow_rules(flows[MLX5_VPORT_PF]); + if (mlx5_ecpf_vport_exists(esw->dev)) { + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); + mlx5_del_flow_rules(flows[vport->index]); + } + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); + mlx5_del_flow_rules(flows[vport->index]); + } kvfree(flows); } @@ -1453,14 +1359,14 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) return ERR_PTR(-EOPNOTSUPP); - spec = kzalloc(sizeof(*spec), GFP_KERNEL); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return ERR_PTR(-ENOMEM); misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, - ESW_CHAIN_TAG_METADATA_MASK); + ESW_REG_C0_USER_DATA_METADATA_MASK); misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag); @@ -1476,7 +1382,7 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) dest.ft = esw->offloads.ft_offloads; flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); - kfree(spec); + kvfree(spec); if (IS_ERR(flow_rule)) esw_warn(esw->dev, @@ -1486,12 +1392,6 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) return flow_rule; } -u32 -esw_get_max_restore_tag(struct mlx5_eswitch *esw) -{ - return ESW_CHAIN_TAG_METADATA_MASK; -} - #define MAX_PF_SQ 256 #define MAX_SQ_NVPORTS 32 @@ -1521,6 +1421,44 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw, } #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) +static void esw_vport_tbl_put(struct mlx5_eswitch *esw) +{ + struct mlx5_vport_tbl_attr attr; + struct mlx5_vport *vport; + unsigned long i; + + attr.chain = 0; + attr.prio = 1; + mlx5_esw_for_each_vport(esw, i, vport) { + attr.vport = vport->vport; + attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; + mlx5_esw_vporttbl_put(esw, &attr); + } +} + +static int esw_vport_tbl_get(struct mlx5_eswitch *esw) +{ + struct mlx5_vport_tbl_attr attr; + struct mlx5_flow_table *fdb; + struct mlx5_vport *vport; + unsigned long i; + + attr.chain = 0; + attr.prio = 1; + mlx5_esw_for_each_vport(esw, i, vport) { + attr.vport = vport->vport; + attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; + fdb = mlx5_esw_vporttbl_get(esw, &attr); + if (IS_ERR(fdb)) + goto out; + } + return 0; + +out: + esw_vport_tbl_put(esw); + return PTR_ERR(fdb); +} + #define fdb_modify_header_fwd_to_table_supported(esw) \ (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table)) static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags) @@ -1570,7 +1508,7 @@ esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb) attr.max_ft_sz = fdb_max; attr.max_grp_num = esw->params.large_group_num; attr.default_ft = miss_fdb; - attr.max_restore_tag = esw_get_max_restore_tag(esw); + attr.mapping = esw->offloads.reg_c0_obj_pool; chains = mlx5_chains_create(dev, &attr); if (IS_ERR(chains)) { @@ -1598,7 +1536,7 @@ esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb) /* Open level 1 for split fdb rules now if prios isn't supported */ if (!mlx5_chains_prios_supported(chains)) { - err = mlx5_esw_vport_tbl_get(esw); + err = esw_vport_tbl_get(esw); if (err) goto level_1_err; } @@ -1622,7 +1560,7 @@ static void esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains) { if (!mlx5_chains_prios_supported(chains)) - mlx5_esw_vport_tbl_put(esw); + esw_vport_tbl_put(esw); mlx5_chains_put_table(chains, 0, 1, 0); mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0); mlx5_chains_destroy(chains); @@ -1709,6 +1647,12 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw) MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn); MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + misc_parameters.source_eswitch_owner_vhca_id); + MLX5_SET(create_flow_group_in, flow_group_in, + source_eswitch_owner_vhca_id_valid, 1); + } ix = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ; MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); @@ -1865,6 +1809,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) /* Holds true only as long as DMFS is the default */ mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns, MLX5_FLOW_STEERING_MODE_DMFS); + atomic64_set(&esw->user_count, 0); } static int esw_create_offloads_table(struct mlx5_eswitch *esw) @@ -1988,12 +1933,12 @@ out: return flow_rule; } - -static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode) +static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode) { u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2; struct mlx5_core_dev *dev = esw->dev; - int vport; + struct mlx5_vport *vport; + unsigned long i; if (!MLX5_CAP_GEN(dev, vport_group_manager)) return -EOPNOTSUPP; @@ -2014,8 +1959,8 @@ static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode query_vports: mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode); - mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) { - mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode); + mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) { + mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode); if (prev_mlx5_mode != mlx5_mode) return -EINVAL; prev_mlx5_mode = mlx5_mode; @@ -2067,7 +2012,7 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw) goto out_free; } - ft_attr.max_fte = 1 << ESW_CHAIN_TAG_METADATA_BITS; + ft_attr.max_fte = 1 << ESW_REG_C0_USER_DATA_METADATA_BITS; ft = mlx5_create_flow_table(ns, &ft_attr); if (IS_ERR(ft)) { err = PTR_ERR(ft); @@ -2082,7 +2027,7 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw) misc_parameters_2); MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, - ESW_CHAIN_TAG_METADATA_MASK); + ESW_REG_C0_USER_DATA_METADATA_MASK); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft_attr.max_fte - 1); @@ -2158,34 +2103,82 @@ static int esw_offloads_start(struct mlx5_eswitch *esw, return err; } -void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw) +static void mlx5_esw_offloads_rep_mark_set(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep, + xa_mark_t mark) { - kfree(esw->offloads.vport_reps); + bool mark_set; + + /* Copy the mark from vport to its rep */ + mark_set = xa_get_mark(&esw->vports, rep->vport, mark); + if (mark_set) + xa_set_mark(&esw->offloads.vport_reps, rep->vport, mark); } -int esw_offloads_init_reps(struct mlx5_eswitch *esw) +static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport) { - int total_vports = esw->total_vports; struct mlx5_eswitch_rep *rep; - int vport_index; - u8 rep_type; + int rep_type; + int err; - esw->offloads.vport_reps = kcalloc(total_vports, - sizeof(struct mlx5_eswitch_rep), - GFP_KERNEL); - if (!esw->offloads.vport_reps) + rep = kzalloc(sizeof(*rep), GFP_KERNEL); + if (!rep) return -ENOMEM; - mlx5_esw_for_all_reps(esw, vport_index, rep) { - rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index); - rep->vport_index = vport_index; + rep->vport = vport->vport; + rep->vport_index = vport->index; + for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) + atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED); - for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) - atomic_set(&rep->rep_data[rep_type].state, - REP_UNREGISTERED); - } + err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL); + if (err) + goto insert_err; + mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_HOST_FN); + mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_VF); + mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_SF); return 0; + +insert_err: + kfree(rep); + return err; +} + +static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep) +{ + xa_erase(&esw->offloads.vport_reps, rep->vport); + kfree(rep); +} + +void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw) +{ + struct mlx5_eswitch_rep *rep; + unsigned long i; + + mlx5_esw_for_each_rep(esw, i, rep) + mlx5_esw_offloads_rep_cleanup(esw, rep); + xa_destroy(&esw->offloads.vport_reps); +} + +int esw_offloads_init_reps(struct mlx5_eswitch *esw) +{ + struct mlx5_vport *vport; + unsigned long i; + int err; + + xa_init(&esw->offloads.vport_reps); + + mlx5_esw_for_each_vport(esw, i, vport) { + err = mlx5_esw_offloads_rep_init(esw, vport); + if (err) + goto err; + } + return 0; + +err: + esw_offloads_cleanup_reps(esw); + return err; } static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw, @@ -2199,7 +2192,7 @@ static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw, static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type) { struct mlx5_eswitch_rep *rep; - int i; + unsigned long i; mlx5_esw_for_each_sf_rep(esw, i, rep) __esw_offloads_unload_rep(esw, rep, rep_type); @@ -2208,11 +2201,11 @@ static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type) static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type) { struct mlx5_eswitch_rep *rep; - int i; + unsigned long i; __unload_reps_sf_vport(esw, rep_type); - mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs) + mlx5_esw_for_each_vf_rep(esw, i, rep) __esw_offloads_unload_rep(esw, rep, rep_type); if (mlx5_ecpf_vport_exists(esw->dev)) { @@ -2270,9 +2263,11 @@ int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num) if (esw->mode != MLX5_ESWITCH_OFFLOADS) return 0; - err = mlx5_esw_offloads_devlink_port_register(esw, vport_num); - if (err) - return err; + if (vport_num != MLX5_VPORT_UPLINK) { + err = mlx5_esw_offloads_devlink_port_register(esw, vport_num); + if (err) + return err; + } err = mlx5_esw_offloads_rep_load(esw, vport_num); if (err) @@ -2280,7 +2275,8 @@ int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num) return err; load_err: - mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); + if (vport_num != MLX5_VPORT_UPLINK) + mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); return err; } @@ -2290,7 +2286,9 @@ void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num) return; mlx5_esw_offloads_rep_unload(esw, vport_num); - mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); + + if (vport_num != MLX5_VPORT_UPLINK) + mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); } #define ESW_OFFLOADS_DEVCOM_PAIR (0) @@ -2299,13 +2297,8 @@ void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num) static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw, struct mlx5_eswitch *peer_esw) { - int err; - - err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev); - if (err) - return err; - return 0; + return esw_add_fdb_peer_miss_rules(esw, peer_esw->dev); } static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw) @@ -2430,8 +2423,7 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS); } -static bool -esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw) +bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw) { if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl)) return false; @@ -2500,25 +2492,25 @@ static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw, static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw) { struct mlx5_vport *vport; - int i; + unsigned long i; if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) return; - mlx5_esw_for_all_vports_reverse(esw, i, vport) + mlx5_esw_for_each_vport(esw, i, vport) esw_offloads_vport_metadata_cleanup(esw, vport); } static int esw_offloads_metadata_init(struct mlx5_eswitch *esw) { struct mlx5_vport *vport; + unsigned long i; int err; - int i; if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) return 0; - mlx5_esw_for_all_vports(esw, i, vport) { + mlx5_esw_for_each_vport(esw, i, vport) { err = esw_offloads_vport_metadata_setup(esw, vport); if (err) goto metadata_err; @@ -2531,6 +2523,28 @@ metadata_err: return err; } +int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable) +{ + int err = 0; + + down_write(&esw->mode_lock); + if (esw->mode != MLX5_ESWITCH_NONE) { + err = -EBUSY; + goto done; + } + if (!mlx5_esw_vport_match_metadata_supported(esw)) { + err = -EOPNOTSUPP; + goto done; + } + if (enable) + esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; + else + esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA; +done: + up_write(&esw->mode_lock); + return err; +} + int esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, struct mlx5_vport *vport) @@ -2565,6 +2579,9 @@ static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) struct mlx5_vport *vport; vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); + if (IS_ERR(vport)) + return PTR_ERR(vport); + return esw_vport_create_offloads_acl_tables(esw, vport); } @@ -2573,6 +2590,9 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) struct mlx5_vport *vport; vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); + if (IS_ERR(vport)) + return; + esw_vport_destroy_offloads_acl_tables(esw, vport); } @@ -2584,6 +2604,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); mutex_init(&esw->fdb_table.offloads.vports.lock); hash_init(esw->fdb_table.offloads.vports.table); + atomic64_set(&esw->user_count, 0); indir = mlx5_esw_indir_table_init(); if (IS_ERR(indir)) { @@ -2726,10 +2747,25 @@ static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw) return 0; } +bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller) +{ + /* Local controller is always valid */ + if (controller == 0) + return true; + + if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) + return false; + + /* External host number starts with zero in device */ + return (controller == esw->offloads.host_number + 1); +} + int esw_offloads_enable(struct mlx5_eswitch *esw) { + struct mapping_ctx *reg_c0_obj_pool; struct mlx5_vport *vport; - int err, i; + unsigned long i; + int err; if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) && MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap)) @@ -2744,9 +2780,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) if (err) goto err_metadata; - if (esw_check_vport_match_metadata_supported(esw)) - esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; - err = esw_offloads_metadata_init(esw); if (err) goto err_metadata; @@ -2755,6 +2788,15 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) if (err) goto err_vport_metadata; + reg_c0_obj_pool = mapping_create(sizeof(struct mlx5_mapped_obj), + ESW_REG_C0_USER_DATA_METADATA_MASK, + true); + if (IS_ERR(reg_c0_obj_pool)) { + err = PTR_ERR(reg_c0_obj_pool); + goto err_pool; + } + esw->offloads.reg_c0_obj_pool = reg_c0_obj_pool; + err = esw_offloads_steering_init(esw); if (err) goto err_steering_init; @@ -2781,11 +2823,12 @@ err_vports: err_uplink: esw_offloads_steering_cleanup(esw); err_steering_init: + mapping_destroy(reg_c0_obj_pool); +err_pool: esw_set_passing_vport_metadata(esw, false); err_vport_metadata: esw_offloads_metadata_uninit(esw); err_metadata: - esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA; mlx5_rdma_disable_roce(esw->dev); mutex_destroy(&esw->offloads.termtbl_mutex); return err; @@ -2819,8 +2862,8 @@ void esw_offloads_disable(struct mlx5_eswitch *esw) esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); esw_set_passing_vport_metadata(esw, false); esw_offloads_steering_cleanup(esw); + mapping_destroy(esw->offloads.reg_c0_obj_pool); esw_offloads_metadata_uninit(esw); - esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA; mlx5_rdma_disable_roce(esw->dev); mutex_destroy(&esw->offloads.termtbl_mutex); esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; @@ -2925,8 +2968,14 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, if (esw_mode_from_devlink(mode, &mlx5_mode)) return -EINVAL; - mutex_lock(&esw->mode_lock); - cur_mlx5_mode = esw->mode; + err = mlx5_esw_try_lock(esw); + if (err < 0) { + NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy"); + return err; + } + cur_mlx5_mode = err; + err = 0; + if (cur_mlx5_mode == mlx5_mode) goto unlock; @@ -2938,7 +2987,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, err = -EINVAL; unlock: - mutex_unlock(&esw->mode_lock); + mlx5_esw_unlock(esw); return err; } @@ -2951,14 +3000,45 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) if (IS_ERR(esw)) return PTR_ERR(esw); - mutex_lock(&esw->mode_lock); + down_write(&esw->mode_lock); err = eswitch_devlink_esw_mode_check(esw); if (err) goto unlock; err = esw_mode_to_devlink(esw->mode, mode); unlock: - mutex_unlock(&esw->mode_lock); + up_write(&esw->mode_lock); + return err; +} + +static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode, + struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_vport *vport; + u16 err_vport_num = 0; + unsigned long i; + int err = 0; + + mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) { + err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode); + if (err) { + err_vport_num = vport->vport; + NL_SET_ERR_MSG_MOD(extack, + "Failed to set min inline on vport"); + goto revert_inline_mode; + } + } + return 0; + +revert_inline_mode: + mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) { + if (vport->vport == err_vport_num) + break; + mlx5_modify_nic_vport_min_inline(dev, + vport->vport, + esw->offloads.inline_mode); + } return err; } @@ -2966,15 +3046,15 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); - int err, vport, num_vport; struct mlx5_eswitch *esw; u8 mlx5_mode; + int err; esw = mlx5_devlink_eswitch_get(devlink); if (IS_ERR(esw)) return PTR_ERR(esw); - mutex_lock(&esw->mode_lock); + down_write(&esw->mode_lock); err = eswitch_devlink_esw_mode_check(esw); if (err) goto out; @@ -3003,27 +3083,16 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, if (err) goto out; - mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) { - err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode); - if (err) { - NL_SET_ERR_MSG_MOD(extack, - "Failed to set min inline on vport"); - goto revert_inline_mode; - } - } + err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack); + if (err) + goto out; esw->offloads.inline_mode = mlx5_mode; - mutex_unlock(&esw->mode_lock); + up_write(&esw->mode_lock); return 0; -revert_inline_mode: - num_vport = --vport; - mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport) - mlx5_modify_nic_vport_min_inline(dev, - vport, - esw->offloads.inline_mode); out: - mutex_unlock(&esw->mode_lock); + up_write(&esw->mode_lock); return err; } @@ -3036,14 +3105,14 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) if (IS_ERR(esw)) return PTR_ERR(esw); - mutex_lock(&esw->mode_lock); + down_write(&esw->mode_lock); err = eswitch_devlink_esw_mode_check(esw); if (err) goto unlock; err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); unlock: - mutex_unlock(&esw->mode_lock); + up_write(&esw->mode_lock); return err; } @@ -3059,7 +3128,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, if (IS_ERR(esw)) return PTR_ERR(esw); - mutex_lock(&esw->mode_lock); + down_write(&esw->mode_lock); err = eswitch_devlink_esw_mode_check(esw); if (err) goto unlock; @@ -3105,7 +3174,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, } unlock: - mutex_unlock(&esw->mode_lock); + up_write(&esw->mode_lock); return err; } @@ -3120,14 +3189,14 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, return PTR_ERR(esw); - mutex_lock(&esw->mode_lock); + down_write(&esw->mode_lock); err = eswitch_devlink_esw_mode_check(esw); if (err) goto unlock; *encap = esw->offloads.encap; unlock: - mutex_unlock(&esw->mode_lock); + up_write(&esw->mode_lock); return 0; } @@ -3152,11 +3221,12 @@ void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, { struct mlx5_eswitch_rep_data *rep_data; struct mlx5_eswitch_rep *rep; - int i; + unsigned long i; esw->offloads.rep_ops[rep_type] = ops; - mlx5_esw_for_all_reps(esw, i, rep) { - if (likely(mlx5_eswitch_vport_has_rep(esw, i))) { + mlx5_esw_for_each_rep(esw, i, rep) { + if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) { + rep->esw = esw; rep_data = &rep->rep_data[rep_type]; atomic_set(&rep_data->state, REP_REGISTERED); } @@ -3167,12 +3237,12 @@ EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps); void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type) { struct mlx5_eswitch_rep *rep; - int i; + unsigned long i; if (esw->mode == MLX5_ESWITCH_OFFLOADS) __unload_reps_all_vport(esw, rep_type); - mlx5_esw_for_all_reps(esw, i, rep) + mlx5_esw_for_each_rep(esw, i, rep) atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED); } EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps); @@ -3213,12 +3283,6 @@ struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, } EXPORT_SYMBOL(mlx5_eswitch_vport_rep); -bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num) -{ - return vport_num >= MLX5_VPORT_FIRST_VF && - vport_num <= esw->dev->priv.sriov.max_vfs; -} - bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw) { return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED); @@ -3244,7 +3308,7 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match); int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port, - u16 vport_num, u32 sfnum) + u16 vport_num, u32 controller, u32 sfnum) { int err; @@ -3252,7 +3316,7 @@ int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_p if (err) return err; - err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, sfnum); + err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, controller, sfnum); if (err) goto devlink_err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c index ec679560a95d..a81ece94f599 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c @@ -83,14 +83,16 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev, ft_attr.autogroup.max_num_groups = 1; tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr); if (IS_ERR(tt->termtbl)) { - esw_warn(dev, "Failed to create termination table\n"); + esw_warn(dev, "Failed to create termination table (error %d)\n", + IS_ERR(tt->termtbl)); return -EOPNOTSUPP; } tt->rule = mlx5_add_flow_rules(tt->termtbl, NULL, flow_act, &tt->dest, 1); if (IS_ERR(tt->rule)) { - esw_warn(dev, "Failed to create termination table rule\n"); + esw_warn(dev, "Failed to create termination table rule (error %d)\n", + IS_ERR(tt->rule)); goto add_flow_err; } return 0; @@ -140,10 +142,9 @@ mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw, memcpy(&tt->flow_act, flow_act, sizeof(*flow_act)); err = mlx5_eswitch_termtbl_create(esw->dev, tt, flow_act); - if (err) { - esw_warn(esw->dev, "Failed to create termination table\n"); + if (err) goto tt_create_err; - } + hash_add(esw->offloads.termtbl_tbl, &tt->termtbl_hlist, hash_key); tt_add_ref: tt->ref_count++; @@ -282,7 +283,8 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw, tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act, &dest[i], attr); if (IS_ERR(tt)) { - esw_warn(esw->dev, "Failed to create termination table\n"); + esw_warn(esw->dev, "Failed to get termination table (error %d)\n", + IS_ERR(tt)); goto revert_changes; } attr->dests[num_vport_dests].termtbl = tt; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index 22bee4990232..0bba92cf5dc0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -707,7 +707,7 @@ static void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev, } if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) { - err = ida_simple_get(&fipsec->halloc, 1, 0, GFP_KERNEL); + err = ida_alloc_min(&fipsec->halloc, 1, GFP_KERNEL); if (err < 0) { context = ERR_PTR(err); goto exists; @@ -758,7 +758,7 @@ delete_hash: unlock_hash: mutex_unlock(&fipsec->sa_hash_lock); if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) - ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle); + ida_free(&fipsec->halloc, sa_ctx->sa_handle); exists: mutex_unlock(&fpga_xfrm->lock); kfree(sa_ctx); @@ -850,9 +850,9 @@ mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx) return; } - if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action & + if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) - ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle); + ida_free(&fipsec->halloc, sa_ctx->sa_handle); mutex_lock(&fipsec->sa_hash_lock); WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash, @@ -1085,6 +1085,7 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_flow_root_namespace *ns, rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress); if (IS_ERR(rule->ctx)) { int err = PTR_ERR(rule->ctx); + kfree(rule); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 66ad599bd488..f74d2c834037 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -105,7 +105,7 @@ #define ETHTOOL_PRIO_NUM_LEVELS 1 #define ETHTOOL_NUM_PRIOS 11 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) -/* Promiscuous, Vlan, mac, ttc, inner ttc, {aRFS/accel and esp/esp_err} */ +/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}} */ #define KERNEL_NIC_PRIO_NUM_LEVELS 7 #define KERNEL_NIC_NUM_PRIOS 1 /* One more level for tc */ @@ -590,7 +590,7 @@ static void del_sw_fte(struct fs_node *node) &fte->hash, rhash_fte); WARN_ON(err); - ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index); + ida_free(&fg->fte_allocator, fte->index - fg->start_index); kmem_cache_free(steering->ftes_cache, fte); } @@ -640,7 +640,7 @@ static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte) int index; int ret; - index = ida_simple_get(&fg->fte_allocator, 0, fg->max_ftes, GFP_KERNEL); + index = ida_alloc_max(&fg->fte_allocator, fg->max_ftes - 1, GFP_KERNEL); if (index < 0) return index; @@ -656,7 +656,7 @@ static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte) return 0; err_ida_remove: - ida_simple_remove(&fg->fte_allocator, index); + ida_free(&fg->fte_allocator, index); return ret; } @@ -2229,17 +2229,21 @@ struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_d { struct mlx5_flow_steering *steering = dev->priv.steering; - if (!steering || vport >= mlx5_eswitch_get_total_vports(dev)) + if (!steering) return NULL; switch (type) { case MLX5_FLOW_NAMESPACE_ESW_EGRESS: + if (vport >= steering->esw_egress_acl_vports) + return NULL; if (steering->esw_egress_root_ns && steering->esw_egress_root_ns[vport]) return &steering->esw_egress_root_ns[vport]->ns; else return NULL; case MLX5_FLOW_NAMESPACE_ESW_INGRESS: + if (vport >= steering->esw_ingress_acl_vports) + return NULL; if (steering->esw_ingress_root_ns && steering->esw_ingress_root_ns[vport]) return &steering->esw_ingress_root_ns[vport]->ns; @@ -2395,14 +2399,12 @@ static int init_root_tree(struct mlx5_flow_steering *steering, struct init_tree_node *init_node, struct fs_node *fs_parent_node) { - int i; - struct mlx5_flow_namespace *fs_ns; int err; + int i; - fs_get_obj(fs_ns, fs_parent_node); for (i = 0; i < init_node->ar_size; i++) { err = init_root_tree_recursive(steering, &init_node->children[i], - &fs_ns->node, + fs_parent_node, init_node, i); if (err) return err; @@ -2573,43 +2575,11 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns) clean_tree(&root_ns->ns.node); } -static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev) -{ - struct mlx5_flow_steering *steering = dev->priv.steering; - int i; - - if (!steering->esw_egress_root_ns) - return; - - for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++) - cleanup_root_ns(steering->esw_egress_root_ns[i]); - - kfree(steering->esw_egress_root_ns); - steering->esw_egress_root_ns = NULL; -} - -static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev) -{ - struct mlx5_flow_steering *steering = dev->priv.steering; - int i; - - if (!steering->esw_ingress_root_ns) - return; - - for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++) - cleanup_root_ns(steering->esw_ingress_root_ns[i]); - - kfree(steering->esw_ingress_root_ns); - steering->esw_ingress_root_ns = NULL; -} - void mlx5_cleanup_fs(struct mlx5_core_dev *dev) { struct mlx5_flow_steering *steering = dev->priv.steering; cleanup_root_ns(steering->root_ns); - cleanup_egress_acls_root_ns(dev); - cleanup_ingress_acls_root_ns(dev); cleanup_root_ns(steering->fdb_root_ns); steering->fdb_root_ns = NULL; kfree(steering->fdb_sub_ns); @@ -2854,10 +2824,9 @@ static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vpo return PTR_ERR_OR_ZERO(prio); } -static int init_egress_acls_root_ns(struct mlx5_core_dev *dev) +int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports) { struct mlx5_flow_steering *steering = dev->priv.steering; - int total_vports = mlx5_eswitch_get_total_vports(dev); int err; int i; @@ -2873,7 +2842,7 @@ static int init_egress_acls_root_ns(struct mlx5_core_dev *dev) if (err) goto cleanup_root_ns; } - + steering->esw_egress_acl_vports = total_vports; return 0; cleanup_root_ns: @@ -2884,10 +2853,24 @@ cleanup_root_ns: return err; } -static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev) +void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev) +{ + struct mlx5_flow_steering *steering = dev->priv.steering; + int i; + + if (!steering->esw_egress_root_ns) + return; + + for (i = 0; i < steering->esw_egress_acl_vports; i++) + cleanup_root_ns(steering->esw_egress_root_ns[i]); + + kfree(steering->esw_egress_root_ns); + steering->esw_egress_root_ns = NULL; +} + +int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports) { struct mlx5_flow_steering *steering = dev->priv.steering; - int total_vports = mlx5_eswitch_get_total_vports(dev); int err; int i; @@ -2903,7 +2886,7 @@ static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev) if (err) goto cleanup_root_ns; } - + steering->esw_ingress_acl_vports = total_vports; return 0; cleanup_root_ns: @@ -2914,6 +2897,21 @@ cleanup_root_ns: return err; } +void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev) +{ + struct mlx5_flow_steering *steering = dev->priv.steering; + int i; + + if (!steering->esw_ingress_root_ns) + return; + + for (i = 0; i < steering->esw_ingress_acl_vports; i++) + cleanup_root_ns(steering->esw_ingress_root_ns[i]); + + kfree(steering->esw_ingress_root_ns); + steering->esw_ingress_root_ns = NULL; +} + static int init_egress_root_ns(struct mlx5_flow_steering *steering) { int err; @@ -2976,16 +2974,6 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) if (err) goto err; } - if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { - err = init_egress_acls_root_ns(dev); - if (err) - goto err; - } - if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { - err = init_ingress_acls_root_ns(dev); - if (err) - goto err; - } } if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index b24a9849c45e..e577a2c424af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -129,6 +129,8 @@ struct mlx5_flow_steering { struct mlx5_flow_root_namespace *rdma_rx_root_ns; struct mlx5_flow_root_namespace *rdma_tx_root_ns; struct mlx5_flow_root_namespace *egress_root_ns; + int esw_egress_acl_vports; + int esw_ingress_acl_vports; }; struct fs_node { @@ -287,6 +289,11 @@ int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns, int mlx5_init_fs(struct mlx5_core_dev *dev); void mlx5_cleanup_fs(struct mlx5_core_dev *dev); +int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports); +void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev); +int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports); +void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev); + #define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); } #define fs_list_for_each_entry(pos, root) \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index f43caefd07a1..18e5aec14641 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -497,13 +497,13 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev) alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc); bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1; - bulk = kzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc), - GFP_KERNEL); + bulk = kvzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc), + GFP_KERNEL); if (!bulk) goto err_alloc_bulk; - bulk->bitmask = kcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long), - GFP_KERNEL); + bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long), + GFP_KERNEL); if (!bulk->bitmask) goto err_alloc_bitmask; @@ -521,9 +521,9 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev) return bulk; err_mlx5_cmd_bulk_alloc: - kfree(bulk->bitmask); + kvfree(bulk->bitmask); err_alloc_bitmask: - kfree(bulk); + kvfree(bulk); err_alloc_bulk: return ERR_PTR(err); } @@ -537,8 +537,8 @@ mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk) } mlx5_cmd_fc_free(dev, bulk->base_id); - kfree(bulk->bitmask); - kfree(bulk); + kvfree(bulk->bitmask); + kvfree(bulk); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index f9042e147c7f..d5d57630015f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -104,7 +104,7 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev) if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) { complete(&fw_reset->done); } else { - mlx5_load_one(dev, false); + mlx5_load_one(dev); devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0, BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE)); @@ -119,7 +119,7 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work) int err; mlx5_enter_error_state(dev, true); - mlx5_unload_one(dev, false); + mlx5_unload_one(dev); err = mlx5_health_wait_pci_up(dev); if (err) mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n"); @@ -199,16 +199,11 @@ static void mlx5_fw_live_patch_event(struct work_struct *work) struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset, fw_live_patch_work); struct mlx5_core_dev *dev = fw_reset->dev; - struct mlx5_fw_tracer *tracer; mlx5_core_info(dev, "Live patch updated firmware version: %d.%d.%d\n", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); - tracer = dev->tracer; - if (IS_ERR_OR_NULL(tracer)) - return; - - if (mlx5_fw_tracer_reload(tracer)) + if (mlx5_fw_tracer_reload(dev->tracer)) mlx5_core_err(dev, "Failed to reload FW tracer\n"); } @@ -342,7 +337,7 @@ static void mlx5_sync_reset_now_event(struct work_struct *work) } mlx5_enter_error_state(dev, true); - mlx5_unload_one(dev, false); + mlx5_unload_one(dev); done: fw_reset->ret = err; mlx5_fw_reset_complete_reload(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 0c32c485eb58..9ff163c5bcde 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -335,12 +335,12 @@ static int mlx5_health_try_recover(struct mlx5_core_dev *dev) return -EIO; } mlx5_core_err(dev, "starting health recovery flow\n"); - mlx5_recover_device(dev); - if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state) || - mlx5_health_check_fatal_sensors(dev)) { + if (mlx5_recover_device(dev) || mlx5_health_check_fatal_sensors(dev)) { mlx5_core_err(dev, "health recovery failed\n"); return -EIO; } + + mlx5_core_info(dev, "health recovery succeeded\n"); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 6f7cef47e04c..612a7f69366d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -33,6 +33,7 @@ #include <rdma/ib_verbs.h> #include <linux/mlx5/fs.h> #include "en.h" +#include "en/params.h" #include "ipoib.h" #define IB_DEFAULT_Q_KEY 0xb1b @@ -372,6 +373,7 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) static int mlx5i_init_rx(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; + u16 max_nch = priv->max_nch; int err; mlx5e_create_q_counters(priv); @@ -386,7 +388,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) if (err) goto err_close_drop_rq; - err = mlx5e_create_direct_rqts(priv, priv->direct_tir); + err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch); if (err) goto err_destroy_indirect_rqts; @@ -394,7 +396,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) if (err) goto err_destroy_direct_rqts; - err = mlx5e_create_direct_tirs(priv, priv->direct_tir); + err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch); if (err) goto err_destroy_indirect_tirs; @@ -405,11 +407,11 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) return 0; err_destroy_direct_tirs: - mlx5e_destroy_direct_tirs(priv, priv->direct_tir); + mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch); err_destroy_indirect_tirs: mlx5e_destroy_indirect_tirs(priv); err_destroy_direct_rqts: - mlx5e_destroy_direct_rqts(priv, priv->direct_tir); + mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch); err_destroy_indirect_rqts: mlx5e_destroy_rqt(priv, &priv->indir_rqt); err_close_drop_rq: @@ -421,10 +423,12 @@ err_destroy_q_counters: static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) { + u16 max_nch = priv->max_nch; + mlx5i_destroy_flow_steering(priv); - mlx5e_destroy_direct_tirs(priv, priv->direct_tir); + mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch); mlx5e_destroy_indirect_tirs(priv); - mlx5e_destroy_direct_rqts(priv, priv->direct_tir); + mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch); mlx5e_destroy_rqt(priv, &priv->indir_rqt); mlx5e_close_drop_rq(&priv->drop_rq); mlx5e_destroy_q_counters(priv); @@ -469,6 +473,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = { .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), .stats_grps = mlx5i_stats_grps, .stats_grps_num = mlx5i_stats_grps_num, + .rx_ptp_support = false, }; /* mlx5i netdev NDos */ @@ -476,28 +481,19 @@ static const struct mlx5e_profile mlx5i_nic_profile = { static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); - struct mlx5e_channels new_channels = {}; - struct mlx5e_params *params; + struct mlx5e_params new_params; int err = 0; mutex_lock(&priv->state_lock); - params = &priv->channels.params; - - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - params->sw_mtu = new_mtu; - netdev->mtu = params->sw_mtu; - goto out; - } - - new_channels.params = *params; - new_channels.params.sw_mtu = new_mtu; + new_params = priv->channels.params; + new_params.sw_mtu = new_mtu; - err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true); if (err) goto out; - netdev->mtu = new_channels.params.sw_mtu; + netdev->mtu = new_params.sw_mtu; out: mutex_unlock(&priv->state_lock); @@ -710,7 +706,7 @@ static void mlx5_rdma_netdev_free(struct net_device *netdev) static bool mlx5_is_sub_interface(struct mlx5_core_dev *mdev) { - return mdev->mlx5e_res.pdn != 0; + return mdev->mlx5e_res.hw_objs.pdn != 0; } static const struct mlx5e_profile *mlx5_get_profile(struct mlx5_core_dev *mdev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index 3d0a18a0bed4..18ee21b06a00 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -350,6 +350,7 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = { .rx_handlers = &mlx5i_rx_handlers, .max_tc = MLX5I_MAX_NUM_TC, .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), + .rx_ptp_support = false, }; const struct mlx5e_profile *mlx5i_pkey_get_profile(void) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index 83a05371e2aa..b8748390335f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -603,8 +603,6 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev) if (err) mlx5_core_err(dev, "Failed to init multipath lag err=%d\n", err); - - return; } /* Must be called with intf_mutex held */ @@ -768,7 +766,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, spin_lock(&lag_lock); ldev = mlx5_lag_dev_get(dev); - if (ldev && __mlx5_lag_is_roce(ldev)) { + if (ldev && __mlx5_lag_is_active(ldev)) { num_ports = MLX5_MAX_PORTS; mdev[MLX5_LAG_P1] = ldev->pf[MLX5_LAG_P1].dev; mdev[MLX5_LAG_P2] = ldev->pf[MLX5_LAG_P2].dev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c index 88e58ac902de..2c41a6920264 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c @@ -35,7 +35,7 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev) } /** - * Set lag port affinity + * mlx5_lag_set_port_affinity * * @ldev: lag device * @port: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 1e7f26b240de..ce696d523493 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -645,16 +645,19 @@ static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin) return PTP_PF_NONE; } -static int mlx5_init_pin_config(struct mlx5_clock *clock) +static void mlx5_init_pin_config(struct mlx5_clock *clock) { int i; + if (!clock->ptp_info.n_pins) + return; + clock->ptp_info.pin_config = kcalloc(clock->ptp_info.n_pins, sizeof(*clock->ptp_info.pin_config), GFP_KERNEL); if (!clock->ptp_info.pin_config) - return -ENOMEM; + return; clock->ptp_info.enable = mlx5_ptp_enable; clock->ptp_info.verify = mlx5_ptp_verify; clock->ptp_info.pps = 1; @@ -667,8 +670,6 @@ static int mlx5_init_pin_config(struct mlx5_clock *clock) clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i); clock->ptp_info.pin_config[i].chan = 0; } - - return 0; } static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev) @@ -859,6 +860,17 @@ static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev) } } +static void mlx5_init_pps(struct mlx5_core_dev *mdev) +{ + struct mlx5_clock *clock = &mdev->clock; + + if (!MLX5_PPS_CAP(mdev)) + return; + + mlx5_get_pps_caps(mdev); + mlx5_init_pin_config(clock); +} + void mlx5_init_clock(struct mlx5_core_dev *mdev) { struct mlx5_clock *clock = &mdev->clock; @@ -876,10 +888,7 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev) clock->ptp_info = mlx5_ptp_clock_info; /* Initialize 1PPS data structures */ - if (MLX5_PPS_CAP(mdev)) - mlx5_get_pps_caps(mdev); - if (clock->ptp_info.n_pins) - mlx5_init_pin_config(clock); + mlx5_init_pps(mdev); clock->ptp = ptp_clock_register(&clock->ptp_info, &mdev->pdev->dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h index a12c7da618a7..ceae6bc378e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h @@ -105,4 +105,15 @@ static inline ktime_t mlx5_real_time_cyc2time(struct mlx5_clock *clock, } #endif +static inline cqe_ts_to_ns mlx5_rq_ts_translator(struct mlx5_core_dev *mdev) +{ + return mlx5_is_real_time_rq(mdev) ? mlx5_real_time_cyc2time : + mlx5_timecounter_cyc2time; +} + +static inline cqe_ts_to_ns mlx5_sq_ts_translator(struct mlx5_core_dev *mdev) +{ + return mlx5_is_real_time_sq(mdev) ? mlx5_real_time_cyc2time : + mlx5_timecounter_cyc2time; +} #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c index 57eb91bcbca7..e995f8378df7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c @@ -46,7 +46,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY); - MLX5_SET(encryption_key_obj, obj, pd, mdev->mlx5e_res.pdn); + MLX5_SET(encryption_key_obj, obj, pd, mdev->mlx5e_res.hw_objs.pdn); err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); if (!err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h index 81f2cc4ca1da..f607a3858ef5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h @@ -22,15 +22,15 @@ struct mlx5_cq_table { }; struct mlx5_eq { + struct mlx5_frag_buf_ctrl fbc; + struct mlx5_frag_buf frag_buf; struct mlx5_core_dev *dev; struct mlx5_cq_table cq_table; __be32 __iomem *doorbell; u32 cons_index; - struct mlx5_frag_buf buf; unsigned int vecidx; unsigned int irqn; u8 eqn; - int nent; struct mlx5_rsc_debug *dbg; }; @@ -47,16 +47,21 @@ struct mlx5_eq_comp { struct list_head list; }; +static inline u32 eq_get_size(struct mlx5_eq *eq) +{ + return eq->fbc.sz_m1 + 1; +} + static inline struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry) { - return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE); + return mlx5_frag_buf_get_wqe(&eq->fbc, entry); } static inline struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq) { - struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); + struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & eq->fbc.sz_m1); - return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; + return (eqe->owner ^ (eq->cons_index >> eq->fbc.log_sz)) & 1 ? NULL : eqe; } static inline void eq_update_ci(struct mlx5_eq *eq, int arm) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c index 381325b4a863..00ef10a1a9f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c @@ -7,15 +7,11 @@ #include "lib/fs_chains.h" #include "en/mapping.h" -#include "mlx5_core.h" #include "fs_core.h" -#include "eswitch.h" -#include "en.h" #include "en_tc.h" #define chains_lock(chains) ((chains)->lock) #define chains_ht(chains) ((chains)->chains_ht) -#define chains_mapping(chains) ((chains)->chains_mapping) #define prios_ht(chains) ((chains)->prios_ht) #define ft_pool_left(chains) ((chains)->ft_left) #define tc_default_ft(chains) ((chains)->tc_default_ft) @@ -300,7 +296,7 @@ create_chain_restore(struct fs_chain *chain) !mlx5_chains_prios_supported(chains)) return 0; - err = mapping_add(chains_mapping(chains), &chain->chain, &index); + err = mlx5_chains_get_chain_mapping(chains, chain->chain, &index); if (err) return err; if (index == MLX5_FS_DEFAULT_FLOW_TAG) { @@ -310,10 +306,8 @@ create_chain_restore(struct fs_chain *chain) * * This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0. */ - err = mapping_add(chains_mapping(chains), - &chain->chain, &index); - mapping_remove(chains_mapping(chains), - MLX5_FS_DEFAULT_FLOW_TAG); + err = mlx5_chains_get_chain_mapping(chains, chain->chain, &index); + mapping_remove(chains->chains_mapping, MLX5_FS_DEFAULT_FLOW_TAG); if (err) return err; } @@ -361,7 +355,7 @@ err_mod_hdr: mlx5_del_flow_rules(chain->restore_rule); err_rule: /* Datapath can't find this mapping, so we can safely remove it */ - mapping_remove(chains_mapping(chains), chain->id); + mapping_remove(chains->chains_mapping, chain->id); return err; } @@ -376,7 +370,7 @@ static void destroy_chain_restore(struct fs_chain *chain) mlx5_del_flow_rules(chain->restore_rule); mlx5_modify_header_dealloc(chains->dev, chain->miss_modify_hdr); - mapping_remove(chains_mapping(chains), chain->id); + mapping_remove(chains->chains_mapping, chain->id); } static struct fs_chain * @@ -797,7 +791,6 @@ static struct mlx5_fs_chains * mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr) { struct mlx5_fs_chains *chains_priv; - struct mapping_ctx *mapping; u32 max_flow_counter; int err; @@ -816,6 +809,7 @@ mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr) chains_priv->flags = attr->flags; chains_priv->ns = attr->ns; chains_priv->group_num = attr->max_grp_num; + chains_priv->chains_mapping = attr->mapping; tc_default_ft(chains_priv) = tc_end_ft(chains_priv) = attr->default_ft; mlx5_core_info(dev, "Supported tc offload range - chains: %u, prios: %u\n", @@ -832,20 +826,10 @@ mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr) if (err) goto init_prios_ht_err; - mapping = mapping_create(sizeof(u32), attr->max_restore_tag, - true); - if (IS_ERR(mapping)) { - err = PTR_ERR(mapping); - goto mapping_err; - } - chains_mapping(chains_priv) = mapping; - mutex_init(&chains_lock(chains_priv)); return chains_priv; -mapping_err: - rhashtable_destroy(&prios_ht(chains_priv)); init_prios_ht_err: rhashtable_destroy(&chains_ht(chains_priv)); init_chains_ht_err: @@ -857,7 +841,6 @@ static void mlx5_chains_cleanup(struct mlx5_fs_chains *chains) { mutex_destroy(&chains_lock(chains)); - mapping_destroy(chains_mapping(chains)); rhashtable_destroy(&prios_ht(chains)); rhashtable_destroy(&chains_ht(chains)); @@ -884,25 +867,18 @@ int mlx5_chains_get_chain_mapping(struct mlx5_fs_chains *chains, u32 chain, u32 *chain_mapping) { - return mapping_add(chains_mapping(chains), &chain, chain_mapping); + struct mapping_ctx *ctx = chains->chains_mapping; + struct mlx5_mapped_obj mapped_obj = {}; + + mapped_obj.type = MLX5_MAPPED_OBJ_CHAIN; + mapped_obj.chain = chain; + return mapping_add(ctx, &mapped_obj, chain_mapping); } int mlx5_chains_put_chain_mapping(struct mlx5_fs_chains *chains, u32 chain_mapping) { - return mapping_remove(chains_mapping(chains), chain_mapping); -} - -int mlx5_get_chain_for_tag(struct mlx5_fs_chains *chains, u32 tag, - u32 *chain) -{ - int err; + struct mapping_ctx *ctx = chains->chains_mapping; - err = mapping_find(chains_mapping(chains), tag, chain); - if (err) { - mlx5_core_warn(chains->dev, "Can't find chain for tag: %d\n", tag); - return -ENOENT; - } - - return 0; + return mapping_remove(ctx, chain_mapping); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h index 6d5be31b05dd..e96f345e7dae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h @@ -7,6 +7,7 @@ #include <linux/mlx5/fs.h> struct mlx5_fs_chains; +struct mlx5_mapped_obj; enum mlx5_chains_flags { MLX5_CHAINS_AND_PRIOS_SUPPORTED = BIT(0), @@ -20,7 +21,7 @@ struct mlx5_chains_attr { u32 max_ft_sz; u32 max_grp_num; struct mlx5_flow_table *default_ft; - u32 max_restore_tag; + struct mapping_ctx *mapping; }; #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) @@ -63,9 +64,6 @@ struct mlx5_fs_chains * mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr); void mlx5_chains_destroy(struct mlx5_fs_chains *chains); -int -mlx5_get_chain_for_tag(struct mlx5_fs_chains *chains, u32 tag, u32 *chain); - void mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains, struct mlx5_flow_table *ft); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c index a68738c8f4bc..3f9869c7e326 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c @@ -55,10 +55,6 @@ void mlx5_cleanup_reserved_gids(struct mlx5_core_dev *dev) int mlx5_core_reserve_gids(struct mlx5_core_dev *dev, unsigned int count) { - if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { - mlx5_core_err(dev, "Cannot reserve GIDs when interfaces are up\n"); - return -EPERM; - } if (dev->roce.reserved_gids.start < count) { mlx5_core_warn(dev, "GID table exhausted attempting to reserve %d more GIDs\n", count); @@ -79,7 +75,6 @@ int mlx5_core_reserve_gids(struct mlx5_core_dev *dev, unsigned int count) void mlx5_core_unreserve_gids(struct mlx5_core_dev *dev, unsigned int count) { - WARN(test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state), "Unreserving GIDs when interfaces are up"); WARN(count > dev->roce.reserved_gids.count, "Unreserving %u GIDs when only %u reserved", count, dev->roce.reserved_gids.count); @@ -93,12 +88,12 @@ void mlx5_core_unreserve_gids(struct mlx5_core_dev *dev, unsigned int count) int mlx5_core_reserved_gid_alloc(struct mlx5_core_dev *dev, int *gid_index) { int end = dev->roce.reserved_gids.start + - dev->roce.reserved_gids.count; + dev->roce.reserved_gids.count - 1; int index = 0; - index = ida_simple_get(&dev->roce.reserved_gids.ida, - dev->roce.reserved_gids.start, end, - GFP_KERNEL); + index = ida_alloc_range(&dev->roce.reserved_gids.ida, + dev->roce.reserved_gids.start, end, + GFP_KERNEL); if (index < 0) return index; @@ -110,7 +105,7 @@ int mlx5_core_reserved_gid_alloc(struct mlx5_core_dev *dev, int *gid_index) void mlx5_core_reserved_gid_free(struct mlx5_core_dev *dev, int gid_index) { mlx5_core_dbg(dev, "Freeing reserved GID %u\n", gid_index); - ida_simple_remove(&dev->roce.reserved_gids.ida, gid_index); + ida_free(&dev->roce.reserved_gids.ida, gid_index); } unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h index d046db7bb047..2f536c5d30b1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h @@ -95,4 +95,13 @@ static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev) return devlink_net(priv_to_devlink(dev)); } +static inline void mlx5_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev) +{ + mdev->mlx5e_res.uplink_netdev = netdev; +} + +static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *mdev) +{ + return mdev->mlx5e_res.uplink_netdev; +} #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index c568896cfb23..c114365eb126 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -571,6 +571,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) mlx5_vhca_state_cap_handle(dev, set_hca_cap); + if (MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix)) + MLX5_SET(cmd_hca_cap, set_hca_cap, num_total_dynamic_vf_msix, + MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix)); + return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE); } @@ -1235,7 +1239,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev) mlx5_put_uars_page(dev, dev->priv.uar); } -int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) +int mlx5_init_one(struct mlx5_core_dev *dev) { int err = 0; @@ -1247,16 +1251,14 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) /* remove any previous indication of internal error */ dev->state = MLX5_DEVICE_STATE_UP; - err = mlx5_function_setup(dev, boot); + err = mlx5_function_setup(dev, true); if (err) goto err_function; - if (boot) { - err = mlx5_init_once(dev); - if (err) { - mlx5_core_err(dev, "sw objs init failed\n"); - goto function_teardown; - } + err = mlx5_init_once(dev); + if (err) { + mlx5_core_err(dev, "sw objs init failed\n"); + goto function_teardown; } err = mlx5_load(dev); @@ -1265,16 +1267,11 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); - if (boot) { - err = mlx5_devlink_register(priv_to_devlink(dev), dev->device); - if (err) - goto err_devlink_reg; - - err = mlx5_register_device(dev); - } else { - err = mlx5_attach_device(dev); - } + err = mlx5_devlink_register(priv_to_devlink(dev), dev->device); + if (err) + goto err_devlink_reg; + err = mlx5_register_device(dev); if (err) goto err_register; @@ -1282,16 +1279,14 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) return 0; err_register: - if (boot) - mlx5_devlink_unregister(priv_to_devlink(dev)); + mlx5_devlink_unregister(priv_to_devlink(dev)); err_devlink_reg: clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); mlx5_unload(dev); err_load: - if (boot) - mlx5_cleanup_once(dev); + mlx5_cleanup_once(dev); function_teardown: - mlx5_function_teardown(dev, boot); + mlx5_function_teardown(dev, true); err_function: dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; out: @@ -1299,33 +1294,84 @@ out: return err; } -void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup) +void mlx5_uninit_one(struct mlx5_core_dev *dev) { mutex_lock(&dev->intf_state_mutex); - if (cleanup) { - mlx5_unregister_device(dev); - mlx5_devlink_unregister(priv_to_devlink(dev)); - } else { - mlx5_detach_device(dev); - } + mlx5_unregister_device(dev); + mlx5_devlink_unregister(priv_to_devlink(dev)); if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { mlx5_core_warn(dev, "%s: interface is down, NOP\n", __func__); - if (cleanup) - mlx5_cleanup_once(dev); + mlx5_cleanup_once(dev); goto out; } clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); + mlx5_unload(dev); + mlx5_cleanup_once(dev); + mlx5_function_teardown(dev, true); +out: + mutex_unlock(&dev->intf_state_mutex); +} +int mlx5_load_one(struct mlx5_core_dev *dev) +{ + int err = 0; + + mutex_lock(&dev->intf_state_mutex); + if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { + mlx5_core_warn(dev, "interface is up, NOP\n"); + goto out; + } + /* remove any previous indication of internal error */ + dev->state = MLX5_DEVICE_STATE_UP; + + err = mlx5_function_setup(dev, false); + if (err) + goto err_function; + + err = mlx5_load(dev); + if (err) + goto err_load; + + set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); + + err = mlx5_attach_device(dev); + if (err) + goto err_attach; + + mutex_unlock(&dev->intf_state_mutex); + return 0; + +err_attach: + clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); mlx5_unload(dev); +err_load: + mlx5_function_teardown(dev, false); +err_function: + dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; +out: + mutex_unlock(&dev->intf_state_mutex); + return err; +} - if (cleanup) - mlx5_cleanup_once(dev); +void mlx5_unload_one(struct mlx5_core_dev *dev) +{ + mutex_lock(&dev->intf_state_mutex); - mlx5_function_teardown(dev, cleanup); + mlx5_detach_device(dev); + + if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { + mlx5_core_warn(dev, "%s: interface is down, NOP\n", + __func__); + goto out; + } + + clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); + mlx5_unload(dev); + mlx5_function_teardown(dev, false); out: mutex_unlock(&dev->intf_state_mutex); } @@ -1397,7 +1443,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev) mutex_destroy(&dev->intf_state_mutex); } -static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) +static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct mlx5_core_dev *dev; struct devlink *devlink; @@ -1433,11 +1479,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) goto pci_init_err; } - err = mlx5_load_one(dev, true); + err = mlx5_init_one(dev); if (err) { - mlx5_core_err(dev, "mlx5_load_one failed with error code %d\n", + mlx5_core_err(dev, "mlx5_init_one failed with error code %d\n", err); - goto err_load_one; + goto err_init_one; } err = mlx5_crdump_enable(dev); @@ -1449,7 +1495,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) devlink_reload_enable(devlink); return 0; -err_load_one: +err_init_one: mlx5_pci_close(dev); pci_init_err: mlx5_mdev_uninit(dev); @@ -1469,7 +1515,7 @@ static void remove_one(struct pci_dev *pdev) devlink_reload_disable(devlink); mlx5_crdump_disable(dev); mlx5_drain_health_wq(dev); - mlx5_unload_one(dev, true); + mlx5_uninit_one(dev); mlx5_pci_close(dev); mlx5_mdev_uninit(dev); mlx5_adev_idx_free(dev->priv.adev_idx); @@ -1485,7 +1531,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, mlx5_enter_error_state(dev, false); mlx5_error_sw_reset(dev); - mlx5_unload_one(dev, false); + mlx5_unload_one(dev); mlx5_drain_health_wq(dev); mlx5_pci_disable_device(dev); @@ -1555,7 +1601,7 @@ static void mlx5_pci_resume(struct pci_dev *pdev) mlx5_core_info(dev, "%s was called\n", __func__); - err = mlx5_load_one(dev, false); + err = mlx5_load_one(dev); if (err) mlx5_core_err(dev, "%s: mlx5_load_one failed with error code: %d\n", __func__, err); @@ -1627,7 +1673,7 @@ static void shutdown(struct pci_dev *pdev) mlx5_core_info(dev, "Shutdown was called\n"); err = mlx5_try_fast_unload(dev); if (err) - mlx5_unload_one(dev, false); + mlx5_unload_one(dev); mlx5_pci_disable_device(dev); } @@ -1635,7 +1681,7 @@ static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); - mlx5_unload_one(dev, false); + mlx5_unload_one(dev); return 0; } @@ -1644,7 +1690,7 @@ static int mlx5_resume(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); - return mlx5_load_one(dev, false); + return mlx5_load_one(dev); } static const struct pci_device_id mlx5_core_pci_table[] = { @@ -1676,26 +1722,31 @@ MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table); void mlx5_disable_device(struct mlx5_core_dev *dev) { mlx5_error_sw_reset(dev); - mlx5_unload_one(dev, false); + mlx5_unload_one(dev); } -void mlx5_recover_device(struct mlx5_core_dev *dev) +int mlx5_recover_device(struct mlx5_core_dev *dev) { + int ret = -EIO; + mlx5_pci_disable_device(dev); if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED) - mlx5_pci_resume(dev->pdev); + ret = mlx5_load_one(dev); + return ret; } static struct pci_driver mlx5_core_driver = { .name = KBUILD_MODNAME, .id_table = mlx5_core_pci_table, - .probe = init_one, + .probe = probe_one, .remove = remove_one, .suspend = mlx5_suspend, .resume = mlx5_resume, .shutdown = shutdown, .err_handler = &mlx5_err_handler, .sriov_configure = mlx5_core_sriov_configure, + .sriov_get_vf_total_msix = mlx5_sriov_get_vf_total_msix, + .sriov_set_msix_vec_count = mlx5_core_sriov_set_msix_vec_count, }; static void mlx5_core_verify_params(void) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index efe403c7e354..a22b706eebd3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -134,12 +134,13 @@ void mlx5_error_sw_reset(struct mlx5_core_dev *dev); u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev); int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev); void mlx5_disable_device(struct mlx5_core_dev *dev); -void mlx5_recover_device(struct mlx5_core_dev *dev); +int mlx5_recover_device(struct mlx5_core_dev *dev); int mlx5_sriov_init(struct mlx5_core_dev *dev); void mlx5_sriov_cleanup(struct mlx5_core_dev *dev); int mlx5_sriov_attach(struct mlx5_core_dev *dev); void mlx5_sriov_detach(struct mlx5_core_dev *dev); int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs); +int mlx5_core_sriov_set_msix_vec_count(struct pci_dev *vf, int msix_vec_count); int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, @@ -174,6 +175,11 @@ int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx, struct notifier_block *nb); int mlx5_irq_detach_nb(struct mlx5_irq_table *irq_table, int vecidx, struct notifier_block *nb); + +int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int devfn, + int msix_vec_count); +int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs); + struct cpumask * mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx); struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *table); @@ -267,10 +273,18 @@ static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev) int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx); void mlx5_mdev_uninit(struct mlx5_core_dev *dev); -void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup); -int mlx5_load_one(struct mlx5_core_dev *dev, bool boot); +int mlx5_init_one(struct mlx5_core_dev *dev); +void mlx5_uninit_one(struct mlx5_core_dev *dev); +void mlx5_unload_one(struct mlx5_core_dev *dev); +int mlx5_load_one(struct mlx5_core_dev *dev); int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out); void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work); +static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + + return MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix); +} #endif /* __MLX5_CORE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index c0656d4782e1..110c0837f95b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -61,7 +61,7 @@ struct fw_page { u32 function; unsigned long bitmask; struct list_head list; - unsigned free_count; + unsigned int free_count; }; enum { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index a61e09aff152..1f907df5b3a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -61,6 +61,79 @@ static struct mlx5_irq *mlx5_irq_get(struct mlx5_core_dev *dev, int vecidx) return &irq_table->irq[vecidx]; } +/** + * mlx5_get_default_msix_vec_count - Get the default number of MSI-X vectors + * to be ssigned to each VF. + * @dev: PF to work on + * @num_vfs: Number of enabled VFs + */ +int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs) +{ + int num_vf_msix, min_msix, max_msix; + + num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix); + if (!num_vf_msix) + return 0; + + min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size); + max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size); + + /* Limit maximum number of MSI-X vectors so the default configuration + * has some available in the pool. This will allow the user to increase + * the number of vectors in a VF without having to first size-down other + * VFs. + */ + return max(min(num_vf_msix / num_vfs, max_msix / 2), min_msix); +} + +/** + * mlx5_set_msix_vec_count - Set dynamically allocated MSI-X on the VF + * @dev: PF to work on + * @function_id: Internal PCI VF function IDd + * @msix_vec_count: Number of MSI-X vectors to set + */ +int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id, + int msix_vec_count) +{ + int sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); + int num_vf_msix, min_msix, max_msix; + void *hca_cap, *cap; + int ret; + + num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix); + if (!num_vf_msix) + return 0; + + if (!MLX5_CAP_GEN(dev, vport_group_manager) || !mlx5_core_is_pf(dev)) + return -EOPNOTSUPP; + + min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size); + max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size); + + if (msix_vec_count < min_msix) + return -EINVAL; + + if (msix_vec_count > max_msix) + return -EOVERFLOW; + + hca_cap = kzalloc(sz, GFP_KERNEL); + if (!hca_cap) + return -ENOMEM; + + cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability); + MLX5_SET(cmd_hca_cap, cap, dynamic_msix_table_size, msix_vec_count); + + MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP); + MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1); + MLX5_SET(set_hca_cap_in, hca_cap, function_id, function_id); + + MLX5_SET(set_hca_cap_in, hca_cap, op_mod, + MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1); + ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap); + kfree(hca_cap); + return ret; +} + int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx, struct notifier_block *nb) { @@ -94,7 +167,6 @@ static void irq_set_name(char *name, int vecidx) snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx - MLX5_IRQ_VEC_COMP_BASE); - return; } static int request_irqs(struct mlx5_core_dev *dev, int nvec) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 4bb219565c58..1ef2b6a848c1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -353,69 +353,123 @@ static void mlx5_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset *offset -= MLX5_EEPROM_PAGE_LENGTH; } -int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, - u16 offset, u16 size, u8 *data) +static int mlx5_query_mcia(struct mlx5_core_dev *dev, + struct mlx5_module_eeprom_query_params *params, u8 *data) { - int module_num, status, err, page_num = 0; u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {}; u32 out[MLX5_ST_SZ_DW(mcia_reg)]; - u16 i2c_addr = 0; - u8 module_id; + int status, err; void *ptr; + u16 size; - err = mlx5_query_module_num(dev, &module_num); + size = min_t(int, params->size, MLX5_EEPROM_MAX_BYTES); + + MLX5_SET(mcia_reg, in, l, 0); + MLX5_SET(mcia_reg, in, size, size); + MLX5_SET(mcia_reg, in, module, params->module_number); + MLX5_SET(mcia_reg, in, device_address, params->offset); + MLX5_SET(mcia_reg, in, page_number, params->page); + MLX5_SET(mcia_reg, in, i2c_device_address, params->i2c_address); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_MCIA, 0, 0); if (err) return err; - err = mlx5_query_module_id(dev, module_num, &module_id); + status = MLX5_GET(mcia_reg, out, status); + if (status) { + mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n", + status); + return -EIO; + } + + ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); + memcpy(data, ptr, size); + + return size; +} + +int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, + u16 offset, u16 size, u8 *data) +{ + struct mlx5_module_eeprom_query_params query = {0}; + u8 module_id; + int err; + + err = mlx5_query_module_num(dev, &query.module_number); + if (err) + return err; + + err = mlx5_query_module_id(dev, query.module_number, &module_id); if (err) return err; switch (module_id) { case MLX5_MODULE_ID_SFP: - mlx5_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset); + mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset); break; case MLX5_MODULE_ID_QSFP: case MLX5_MODULE_ID_QSFP_PLUS: case MLX5_MODULE_ID_QSFP28: - mlx5_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset); + mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset); break; default: mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id); return -EINVAL; } - if (offset + size > MLX5_EEPROM_PAGE_LENGTH) + if (query.offset + size > MLX5_EEPROM_PAGE_LENGTH) /* Cross pages read, read until offset 256 in low page */ size -= offset + size - MLX5_EEPROM_PAGE_LENGTH; - size = min_t(int, size, MLX5_EEPROM_MAX_BYTES); + query.size = size; - MLX5_SET(mcia_reg, in, l, 0); - MLX5_SET(mcia_reg, in, module, module_num); - MLX5_SET(mcia_reg, in, i2c_device_address, i2c_addr); - MLX5_SET(mcia_reg, in, page_number, page_num); - MLX5_SET(mcia_reg, in, device_address, offset); - MLX5_SET(mcia_reg, in, size, size); + return mlx5_query_mcia(dev, &query, data); +} +EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom); - err = mlx5_core_access_reg(dev, in, sizeof(in), out, - sizeof(out), MLX5_REG_MCIA, 0, 0); +int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev, + struct mlx5_module_eeprom_query_params *params, + u8 *data) +{ + u8 module_id; + int err; + + err = mlx5_query_module_num(dev, ¶ms->module_number); if (err) return err; - status = MLX5_GET(mcia_reg, out, status); - if (status) { - mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n", - status); - return -EIO; + err = mlx5_query_module_id(dev, params->module_number, &module_id); + if (err) + return err; + + switch (module_id) { + case MLX5_MODULE_ID_SFP: + if (params->page > 0) + return -EINVAL; + break; + case MLX5_MODULE_ID_QSFP: + case MLX5_MODULE_ID_QSFP28: + case MLX5_MODULE_ID_QSFP_PLUS: + if (params->page > 3) + return -EINVAL; + break; + case MLX5_MODULE_ID_DSFP: + break; + default: + mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id); + return -EINVAL; } - ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); - memcpy(data, ptr, size); + if (params->i2c_address != MLX5_I2C_ADDR_HIGH && + params->i2c_address != MLX5_I2C_ADDR_LOW) { + mlx5_core_err(dev, "I2C address not recognized: 0x%x\n", params->i2c_address); + return -EINVAL; + } - return size; + return mlx5_query_mcia(dev, params, data); } -EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom); +EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom_by_page); static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc, int pvlc_size, u8 local_port) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c index 8e0dddc6383f..441b5453acae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c @@ -180,5 +180,4 @@ del_roce_addr: mlx5_rdma_del_roce_addr(dev); disable_roce: mlx5_nic_vport_disable_roce(dev); - return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c index 99039c47ef33..7161220afe30 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c @@ -117,6 +117,9 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table, bool empty_found = false; int i; + lockdep_assert_held(&table->rl_lock); + WARN_ON(!table->rl_entry); + for (i = 0; i < table->max_size; i++) { if (dedicated) { if (!table->rl_entry[i].refcount) @@ -172,38 +175,103 @@ bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0, } EXPORT_SYMBOL(mlx5_rl_are_equal); +static int mlx5_rl_table_get(struct mlx5_rl_table *table) +{ + int i; + + lockdep_assert_held(&table->rl_lock); + + if (table->rl_entry) { + table->refcount++; + return 0; + } + + table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry), + GFP_KERNEL); + if (!table->rl_entry) + return -ENOMEM; + + /* The index represents the index in HW rate limit table + * Index 0 is reserved for unlimited rate + */ + for (i = 0; i < table->max_size; i++) + table->rl_entry[i].index = i + 1; + + table->refcount++; + return 0; +} + +static void mlx5_rl_table_put(struct mlx5_rl_table *table) +{ + lockdep_assert_held(&table->rl_lock); + if (--table->refcount) + return; + + kfree(table->rl_entry); + table->rl_entry = NULL; +} + +static void mlx5_rl_table_free(struct mlx5_core_dev *dev, struct mlx5_rl_table *table) +{ + int i; + + if (!table->rl_entry) + return; + + /* Clear all configured rates */ + for (i = 0; i < table->max_size; i++) + if (table->rl_entry[i].refcount) + mlx5_set_pp_rate_limit_cmd(dev, &table->rl_entry[i], false); + kfree(table->rl_entry); +} + +static void mlx5_rl_entry_get(struct mlx5_rl_entry *entry) +{ + entry->refcount++; +} + +static void +mlx5_rl_entry_put(struct mlx5_core_dev *dev, struct mlx5_rl_entry *entry) +{ + entry->refcount--; + if (!entry->refcount) + mlx5_set_pp_rate_limit_cmd(dev, entry, false); +} + int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid, bool dedicated_entry, u16 *index) { struct mlx5_rl_table *table = &dev->priv.rl_table; struct mlx5_rl_entry *entry; - int err = 0; u32 rate; + int err; - rate = MLX5_GET(set_pp_rate_limit_context, rl_in, rate_limit); - mutex_lock(&table->rl_lock); + if (!table->max_size) + return -EOPNOTSUPP; + rate = MLX5_GET(set_pp_rate_limit_context, rl_in, rate_limit); if (!rate || !mlx5_rl_is_in_range(dev, rate)) { mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n", rate, table->min_rate, table->max_rate); - err = -EINVAL; - goto out; + return -EINVAL; } + mutex_lock(&table->rl_lock); + err = mlx5_rl_table_get(table); + if (err) + goto out; + entry = find_rl_entry(table, rl_in, uid, dedicated_entry); if (!entry) { mlx5_core_err(dev, "Max number of %u rates reached\n", table->max_size); err = -ENOSPC; - goto out; + goto rl_err; } - if (entry->refcount) { - /* rate already configured */ - entry->refcount++; - } else { + if (!entry->refcount) { + /* new rate limit */ memcpy(entry->rl_raw, rl_in, sizeof(entry->rl_raw)); entry->uid = uid; - /* new rate limit */ err = mlx5_set_pp_rate_limit_cmd(dev, entry, true); if (err) { mlx5_core_err( @@ -214,14 +282,18 @@ int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid, burst_upper_bound), MLX5_GET(set_pp_rate_limit_context, rl_in, typical_packet_size)); - goto out; + goto rl_err; } - entry->refcount = 1; entry->dedicated = dedicated_entry; } + mlx5_rl_entry_get(entry); *index = entry->index; + mutex_unlock(&table->rl_lock); + return 0; +rl_err: + mlx5_rl_table_put(table); out: mutex_unlock(&table->rl_lock); return err; @@ -235,10 +307,8 @@ void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index) mutex_lock(&table->rl_lock); entry = &table->rl_entry[index - 1]; - entry->refcount--; - if (!entry->refcount) - /* need to remove rate */ - mlx5_set_pp_rate_limit_cmd(dev, entry, false); + mlx5_rl_entry_put(dev, entry); + mlx5_rl_table_put(table); mutex_unlock(&table->rl_lock); } EXPORT_SYMBOL(mlx5_rl_remove_rate_raw); @@ -286,12 +356,8 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl) rl->rate, rl->max_burst_sz, rl->typical_pkt_sz); goto out; } - - entry->refcount--; - if (!entry->refcount) - /* need to remove rate */ - mlx5_set_pp_rate_limit_cmd(dev, entry, false); - + mlx5_rl_entry_put(dev, entry); + mlx5_rl_table_put(table); out: mutex_unlock(&table->rl_lock); } @@ -300,31 +366,19 @@ EXPORT_SYMBOL(mlx5_rl_remove_rate); int mlx5_init_rl_table(struct mlx5_core_dev *dev) { struct mlx5_rl_table *table = &dev->priv.rl_table; - int i; - mutex_init(&table->rl_lock); if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing)) { table->max_size = 0; return 0; } + mutex_init(&table->rl_lock); + /* First entry is reserved for unlimited rate */ table->max_size = MLX5_CAP_QOS(dev, packet_pacing_rate_table_size) - 1; table->max_rate = MLX5_CAP_QOS(dev, packet_pacing_max_rate); table->min_rate = MLX5_CAP_QOS(dev, packet_pacing_min_rate); - table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry), - GFP_KERNEL); - if (!table->rl_entry) - return -ENOMEM; - - /* The index represents the index in HW rate limit table - * Index 0 is reserved for unlimited rate - */ - for (i = 0; i < table->max_size; i++) - table->rl_entry[i].index = i + 1; - - /* Index 0 is reserved */ mlx5_core_info(dev, "Rate limit: %u rates are supported, range: %uMbps to %uMbps\n", table->max_size, table->min_rate >> 10, @@ -336,13 +390,10 @@ int mlx5_init_rl_table(struct mlx5_core_dev *dev) void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev) { struct mlx5_rl_table *table = &dev->priv.rl_table; - int i; - /* Clear all configured rates */ - for (i = 0; i < table->max_size; i++) - if (table->rl_entry[i].refcount) - mlx5_set_pp_rate_limit_cmd(dev, &table->rl_entry[i], - false); + if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing)) + return; - kfree(dev->priv.rl_table.rl_entry); + mlx5_rl_table_free(dev, table); + mutex_destroy(&table->rl_lock); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c index 90b524c59f3c..6a0c6f965ad1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c @@ -148,9 +148,19 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_ struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb); const struct mlx5_vhca_state_event *event = data; struct mlx5_sf_dev *sf_dev; + u16 max_functions; u16 sf_index; + u16 base_id; + + max_functions = mlx5_sf_max_functions(table->dev); + if (!max_functions) + return 0; + + base_id = MLX5_CAP_GEN(table->dev, sf_base_id); + if (event->function_id < base_id || event->function_id >= (base_id + max_functions)) + return 0; - sf_index = event->function_id - MLX5_CAP_GEN(table->dev, sf_base_id); + sf_index = event->function_id - base_id; sf_dev = xa_load(&table->devices, sf_index); switch (event->new_vhca_state) { case MLX5_VHCA_STATE_ALLOCATED: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h index 4de02902aef1..149fd9e698cf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h @@ -47,7 +47,7 @@ static inline void mlx5_sf_driver_unregister(void) static inline bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev) { - return 0; + return false; } #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c index c4bf555c25ea..42c8ee03fe3e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c @@ -41,14 +41,15 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia goto remap_err; } - err = mlx5_load_one(mdev, true); + err = mlx5_init_one(mdev); if (err) { - mlx5_core_warn(mdev, "mlx5_load_one err=%d\n", err); - goto load_one_err; + mlx5_core_warn(mdev, "mlx5_init_one err=%d\n", err); + goto init_one_err; } + devlink_reload_enable(devlink); return 0; -load_one_err: +init_one_err: iounmap(mdev->iseg); remap_err: mlx5_mdev_uninit(mdev); @@ -63,7 +64,8 @@ static void mlx5_sf_dev_remove(struct auxiliary_device *adev) struct devlink *devlink; devlink = priv_to_devlink(sf_dev->mdev); - mlx5_unload_one(sf_dev->mdev, true); + devlink_reload_disable(devlink); + mlx5_uninit_one(sf_dev->mdev); iounmap(sf_dev->mdev->iseg); mlx5_mdev_uninit(sf_dev->mdev); mlx5_devlink_free(devlink); @@ -73,7 +75,7 @@ static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev) { struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev); - mlx5_unload_one(sf_dev->mdev, false); + mlx5_unload_one(sf_dev->mdev); } static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c index c2ba41bb7a70..a8e73c9ed1ea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c @@ -12,6 +12,7 @@ struct mlx5_sf { struct devlink_port dl_port; unsigned int port_index; + u32 controller; u16 id; u16 hw_fn_id; u16 hw_state; @@ -58,7 +59,8 @@ static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf) } static struct mlx5_sf * -mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *extack) +mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw, + u32 controller, u32 sfnum, struct netlink_ext_ack *extack) { unsigned int dl_port_index; struct mlx5_sf *sf; @@ -66,7 +68,12 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex int id_err; int err; - id_err = mlx5_sf_hw_table_sf_alloc(table->dev, sfnum); + if (!mlx5_esw_offloads_controller_valid(esw, controller)) { + NL_SET_ERR_MSG_MOD(extack, "Invalid controller number"); + return ERR_PTR(-EINVAL); + } + + id_err = mlx5_sf_hw_table_sf_alloc(table->dev, controller, sfnum); if (id_err < 0) { err = id_err; goto id_err; @@ -78,11 +85,12 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex goto alloc_err; } sf->id = id_err; - hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sf->id); + hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, controller, sf->id); dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id); sf->port_index = dl_port_index; sf->hw_fn_id = hw_fn_id; sf->hw_state = MLX5_VHCA_STATE_ALLOCATED; + sf->controller = controller; err = mlx5_sf_id_insert(table, sf); if (err) @@ -93,7 +101,7 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex insert_err: kfree(sf); alloc_err: - mlx5_sf_hw_table_sf_free(table->dev, id_err); + mlx5_sf_hw_table_sf_free(table->dev, controller, id_err); id_err: if (err == -EEXIST) NL_SET_ERR_MSG_MOD(extack, "SF already exist. Choose different sfnum"); @@ -103,7 +111,7 @@ id_err: static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf) { mlx5_sf_id_erase(table, sf); - mlx5_sf_hw_table_sf_free(table->dev, sf->id); + mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id); kfree(sf); } @@ -270,15 +278,14 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table, { struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5_sf *sf; - u16 hw_fn_id; int err; - sf = mlx5_sf_alloc(table, new_attr->sfnum, extack); + sf = mlx5_sf_alloc(table, esw, new_attr->controller, new_attr->sfnum, extack); if (IS_ERR(sf)) return PTR_ERR(sf); - hw_fn_id = mlx5_sf_sw_to_hw_id(dev, sf->id); - err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, hw_fn_id, new_attr->sfnum); + err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, sf->hw_fn_id, + new_attr->controller, new_attr->sfnum); if (err) goto esw_err; *new_port_index = sf->port_index; @@ -307,7 +314,8 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_ "User must provide unique sfnum. Driver does not support auto assignment"); return -EOPNOTSUPP; } - if (new_attr->controller_valid && new_attr->controller) { + if (new_attr->controller_valid && new_attr->controller && + !mlx5_core_is_ecpf_esw_manager(dev)) { NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported"); return -EOPNOTSUPP; } @@ -353,10 +361,10 @@ static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf) * firmware gives confirmation that it is detached by the driver. */ mlx5_cmd_sf_disable_hca(table->dev, sf->hw_fn_id); - mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id); + mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id); kfree(sf); } else { - mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id); + mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id); kfree(sf); } } @@ -438,9 +446,6 @@ sf_err: static void mlx5_sf_table_enable(struct mlx5_sf_table *table) { - if (!mlx5_sf_max_functions(table->dev)) - return; - init_completion(&table->disable_complete); refcount_set(&table->refcount, 1); } @@ -463,9 +468,6 @@ static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table) static void mlx5_sf_table_disable(struct mlx5_sf_table *table) { - if (!mlx5_sf_max_functions(table->dev)) - return; - if (!refcount_read(&table->refcount)) return; @@ -492,14 +494,15 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi break; default: break; - }; + } return 0; } static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev) { - return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) && mlx5_sf_supported(dev); + return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) && + mlx5_sf_hw_table_supported(dev); } int mlx5_sf_table_init(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c index a5a0f60bef66..ef5f892aafad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c @@ -5,8 +5,10 @@ #include "priv.h" #include "sf.h" #include "mlx5_ifc_vhca_event.h" +#include "ecpf.h" #include "vhca_event.h" #include "mlx5_core.h" +#include "eswitch.h" struct mlx5_sf_hw { u32 usr_sfnum; @@ -14,60 +16,114 @@ struct mlx5_sf_hw { u8 pending_delete: 1; }; +struct mlx5_sf_hwc_table { + struct mlx5_sf_hw *sfs; + int max_fn; + u16 start_fn_id; +}; + +enum mlx5_sf_hwc_index { + MLX5_SF_HWC_LOCAL, + MLX5_SF_HWC_EXTERNAL, + MLX5_SF_HWC_MAX, +}; + struct mlx5_sf_hw_table { struct mlx5_core_dev *dev; - struct mlx5_sf_hw *sfs; - int max_local_functions; struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */ struct notifier_block vhca_nb; + struct mlx5_sf_hwc_table hwc[MLX5_SF_HWC_MAX]; }; -u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id) +static struct mlx5_sf_hwc_table * +mlx5_sf_controller_to_hwc(struct mlx5_core_dev *dev, u32 controller) { - return sw_id + mlx5_sf_start_function_id(dev); + int idx = !!controller; + + return &dev->priv.sf_hw_table->hwc[idx]; } -static u16 mlx5_sf_hw_to_sw_id(const struct mlx5_core_dev *dev, u16 hw_id) +u16 mlx5_sf_sw_to_hw_id(struct mlx5_core_dev *dev, u32 controller, u16 sw_id) { - return hw_id - mlx5_sf_start_function_id(dev); + struct mlx5_sf_hwc_table *hwc; + + hwc = mlx5_sf_controller_to_hwc(dev, controller); + return hwc->start_fn_id + sw_id; } -int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum) +static u16 mlx5_sf_hw_to_sw_id(struct mlx5_sf_hwc_table *hwc, u16 hw_id) +{ + return hw_id - hwc->start_fn_id; +} + +static struct mlx5_sf_hwc_table * +mlx5_sf_table_fn_to_hwc(struct mlx5_sf_hw_table *table, u16 fn_id) { - struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; - int sw_id = -ENOSPC; - u16 hw_fn_id; - int err; int i; - if (!table->max_local_functions) - return -EOPNOTSUPP; + for (i = 0; i < ARRAY_SIZE(table->hwc); i++) { + if (table->hwc[i].max_fn && + fn_id >= table->hwc[i].start_fn_id && + fn_id < (table->hwc[i].start_fn_id + table->hwc[i].max_fn)) + return &table->hwc[i]; + } + return NULL; +} + +static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 controller, + u32 usr_sfnum) +{ + struct mlx5_sf_hwc_table *hwc; + int i; + + hwc = mlx5_sf_controller_to_hwc(table->dev, controller); + if (!hwc->sfs) + return -ENOSPC; - mutex_lock(&table->table_lock); /* Check if sf with same sfnum already exists or not. */ - for (i = 0; i < table->max_local_functions; i++) { - if (table->sfs[i].allocated && table->sfs[i].usr_sfnum == usr_sfnum) { - err = -EEXIST; - goto exist_err; - } + for (i = 0; i < hwc->max_fn; i++) { + if (hwc->sfs[i].allocated && hwc->sfs[i].usr_sfnum == usr_sfnum) + return -EEXIST; } - /* Find the free entry and allocate the entry from the array */ - for (i = 0; i < table->max_local_functions; i++) { - if (!table->sfs[i].allocated) { - table->sfs[i].usr_sfnum = usr_sfnum; - table->sfs[i].allocated = true; - sw_id = i; - break; + for (i = 0; i < hwc->max_fn; i++) { + if (!hwc->sfs[i].allocated) { + hwc->sfs[i].usr_sfnum = usr_sfnum; + hwc->sfs[i].allocated = true; + return i; } } - if (sw_id == -ENOSPC) { - err = -ENOSPC; + return -ENOSPC; +} + +static void mlx5_sf_hw_table_id_free(struct mlx5_sf_hw_table *table, u32 controller, int id) +{ + struct mlx5_sf_hwc_table *hwc; + + hwc = mlx5_sf_controller_to_hwc(table->dev, controller); + hwc->sfs[id].allocated = false; + hwc->sfs[id].pending_delete = false; +} + +int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr_sfnum) +{ + struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; + u16 hw_fn_id; + int sw_id; + int err; + + if (!table) + return -EOPNOTSUPP; + + mutex_lock(&table->table_lock); + sw_id = mlx5_sf_hw_table_id_alloc(table, controller, usr_sfnum); + if (sw_id < 0) { + err = sw_id; goto exist_err; } - hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sw_id); - err = mlx5_cmd_alloc_sf(table->dev, hw_fn_id); + hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, sw_id); + err = mlx5_cmd_alloc_sf(dev, hw_fn_id); if (err) goto err; @@ -75,101 +131,161 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum) if (err) goto vhca_err; + if (controller) { + /* If this SF is for external controller, SF manager + * needs to arm firmware to receive the events. + */ + err = mlx5_vhca_event_arm(dev, hw_fn_id); + if (err) + goto vhca_err; + } + mutex_unlock(&table->table_lock); return sw_id; vhca_err: - mlx5_cmd_dealloc_sf(table->dev, hw_fn_id); + mlx5_cmd_dealloc_sf(dev, hw_fn_id); err: - table->sfs[i].allocated = false; + mlx5_sf_hw_table_id_free(table, controller, sw_id); exist_err: mutex_unlock(&table->table_lock); return err; } -static void _mlx5_sf_hw_id_free(struct mlx5_core_dev *dev, u16 id) +void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u32 controller, u16 id) { struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; u16 hw_fn_id; - hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, id); - mlx5_cmd_dealloc_sf(table->dev, hw_fn_id); - table->sfs[id].allocated = false; - table->sfs[id].pending_delete = false; + mutex_lock(&table->table_lock); + hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id); + mlx5_cmd_dealloc_sf(dev, hw_fn_id); + mlx5_sf_hw_table_id_free(table, controller, id); + mutex_unlock(&table->table_lock); } -void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id) +static void mlx5_sf_hw_table_hwc_sf_free(struct mlx5_core_dev *dev, + struct mlx5_sf_hwc_table *hwc, int idx) { - struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; - - mutex_lock(&table->table_lock); - _mlx5_sf_hw_id_free(dev, id); - mutex_unlock(&table->table_lock); + mlx5_cmd_dealloc_sf(dev, hwc->start_fn_id + idx); + hwc->sfs[idx].allocated = false; + hwc->sfs[idx].pending_delete = false; } -void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id) +void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u32 controller, u16 id) { struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {}; + struct mlx5_sf_hwc_table *hwc; u16 hw_fn_id; u8 state; int err; - hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id); + hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id); + hwc = mlx5_sf_controller_to_hwc(dev, controller); mutex_lock(&table->table_lock); err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, out, sizeof(out)); if (err) goto err; state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state); if (state == MLX5_VHCA_STATE_ALLOCATED) { - mlx5_cmd_dealloc_sf(table->dev, hw_fn_id); - table->sfs[id].allocated = false; + mlx5_cmd_dealloc_sf(dev, hw_fn_id); + hwc->sfs[id].allocated = false; } else { - table->sfs[id].pending_delete = true; + hwc->sfs[id].pending_delete = true; } err: mutex_unlock(&table->table_lock); } -static void mlx5_sf_hw_dealloc_all(struct mlx5_sf_hw_table *table) +static void mlx5_sf_hw_table_hwc_dealloc_all(struct mlx5_core_dev *dev, + struct mlx5_sf_hwc_table *hwc) { int i; - for (i = 0; i < table->max_local_functions; i++) { - if (table->sfs[i].allocated) - _mlx5_sf_hw_id_free(table->dev, i); + for (i = 0; i < hwc->max_fn; i++) { + if (hwc->sfs[i].allocated) + mlx5_sf_hw_table_hwc_sf_free(dev, hwc, i); } } +static void mlx5_sf_hw_table_dealloc_all(struct mlx5_sf_hw_table *table) +{ + mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_EXTERNAL]); + mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_LOCAL]); +} + +static int mlx5_sf_hw_table_hwc_init(struct mlx5_sf_hwc_table *hwc, u16 max_fn, u16 base_id) +{ + struct mlx5_sf_hw *sfs; + + if (!max_fn) + return 0; + + sfs = kcalloc(max_fn, sizeof(*sfs), GFP_KERNEL); + if (!sfs) + return -ENOMEM; + + hwc->sfs = sfs; + hwc->max_fn = max_fn; + hwc->start_fn_id = base_id; + return 0; +} + +static void mlx5_sf_hw_table_hwc_cleanup(struct mlx5_sf_hwc_table *hwc) +{ + kfree(hwc->sfs); +} + int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev) { struct mlx5_sf_hw_table *table; - struct mlx5_sf_hw *sfs; - int max_functions; + u16 max_ext_fn = 0; + u16 ext_base_id; + u16 max_fn = 0; + u16 base_id; + int err; - if (!mlx5_sf_supported(dev) || !mlx5_vhca_event_supported(dev)) + if (!mlx5_vhca_event_supported(dev)) + return 0; + + if (mlx5_sf_supported(dev)) + max_fn = mlx5_sf_max_functions(dev); + + err = mlx5_esw_sf_max_hpf_functions(dev, &max_ext_fn, &ext_base_id); + if (err) + return err; + + if (!max_fn && !max_ext_fn) return 0; - max_functions = mlx5_sf_max_functions(dev); table = kzalloc(sizeof(*table), GFP_KERNEL); if (!table) return -ENOMEM; - sfs = kcalloc(max_functions, sizeof(*sfs), GFP_KERNEL); - if (!sfs) - goto table_err; - mutex_init(&table->table_lock); table->dev = dev; - table->sfs = sfs; - table->max_local_functions = max_functions; dev->priv.sf_hw_table = table; - mlx5_core_dbg(dev, "SF HW table: max sfs = %d\n", max_functions); + + base_id = mlx5_sf_start_function_id(dev); + err = mlx5_sf_hw_table_hwc_init(&table->hwc[MLX5_SF_HWC_LOCAL], max_fn, base_id); + if (err) + goto table_err; + + err = mlx5_sf_hw_table_hwc_init(&table->hwc[MLX5_SF_HWC_EXTERNAL], + max_ext_fn, ext_base_id); + if (err) + goto ext_err; + + mlx5_core_dbg(dev, "SF HW table: max sfs = %d, ext sfs = %d\n", max_fn, max_ext_fn); return 0; +ext_err: + mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]); table_err: + mutex_destroy(&table->table_lock); kfree(table); - return -ENOMEM; + return err; } void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev) @@ -180,7 +296,8 @@ void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev) return; mutex_destroy(&table->table_lock); - kfree(table->sfs); + mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_EXTERNAL]); + mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]); kfree(table); } @@ -188,21 +305,26 @@ static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode { struct mlx5_sf_hw_table *table = container_of(nb, struct mlx5_sf_hw_table, vhca_nb); const struct mlx5_vhca_state_event *event = data; + struct mlx5_sf_hwc_table *hwc; struct mlx5_sf_hw *sf_hw; u16 sw_id; if (event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED) return 0; - sw_id = mlx5_sf_hw_to_sw_id(table->dev, event->function_id); - sf_hw = &table->sfs[sw_id]; + hwc = mlx5_sf_table_fn_to_hwc(table, event->function_id); + if (!hwc) + return 0; + + sw_id = mlx5_sf_hw_to_sw_id(hwc, event->function_id); + sf_hw = &hwc->sfs[sw_id]; mutex_lock(&table->table_lock); /* SF driver notified through firmware that SF is finally detached. * Hence recycle the sf hardware id for reuse. */ if (sf_hw->allocated && sf_hw->pending_delete) - _mlx5_sf_hw_id_free(table->dev, sw_id); + mlx5_sf_hw_table_hwc_sf_free(table->dev, hwc, sw_id); mutex_unlock(&table->table_lock); return 0; } @@ -215,7 +337,7 @@ int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev) return 0; table->vhca_nb.notifier_call = mlx5_sf_hw_vhca_event; - return mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb); + return mlx5_vhca_event_notifier_register(dev, &table->vhca_nb); } void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev) @@ -225,7 +347,12 @@ void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev) if (!table) return; - mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb); + mlx5_vhca_event_notifier_unregister(dev, &table->vhca_nb); /* Dealloc SFs whose firmware event has been missed. */ - mlx5_sf_hw_dealloc_all(table); + mlx5_sf_hw_table_dealloc_all(table); +} + +bool mlx5_sf_hw_table_supported(const struct mlx5_core_dev *dev) +{ + return !!dev->priv.sf_hw_table; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h index cb02a51d0986..7114f3fc335f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h @@ -12,10 +12,11 @@ int mlx5_cmd_dealloc_sf(struct mlx5_core_dev *dev, u16 function_id); int mlx5_cmd_sf_enable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_cmd_sf_disable_hca(struct mlx5_core_dev *dev, u16 func_id); -u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id); +u16 mlx5_sf_sw_to_hw_id(struct mlx5_core_dev *dev, u32 controller, u16 sw_id); -int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum); -void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id); -void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id); +int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr_sfnum); +void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u32 controller, u16 id); +void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u32 controller, u16 id); +bool mlx5_sf_hw_table_supported(const struct mlx5_core_dev *dev); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index 3094d20297a9..2338989d4403 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -71,8 +71,7 @@ static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf) static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) { struct mlx5_core_sriov *sriov = &dev->priv.sriov; - int err; - int vf; + int err, vf, num_msix_count; if (!MLX5_ESWITCH_MANAGER(dev)) goto enable_vfs_hca; @@ -85,12 +84,22 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) } enable_vfs_hca: + num_msix_count = mlx5_get_default_msix_vec_count(dev, num_vfs); for (vf = 0; vf < num_vfs; vf++) { err = mlx5_core_enable_hca(dev, vf + 1); if (err) { mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err); continue; } + + err = mlx5_set_msix_vec_count(dev, vf + 1, num_msix_count); + if (err) { + mlx5_core_warn(dev, + "failed to set MSI-X vector counts VF %d, err %d\n", + vf, err); + continue; + } + sriov->vfs_ctx[vf].enabled = 1; if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) { err = sriov_restore_guids(dev, vf); @@ -178,6 +187,41 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) return err ? err : num_vfs; } +int mlx5_core_sriov_set_msix_vec_count(struct pci_dev *vf, int msix_vec_count) +{ + struct pci_dev *pf = pci_physfn(vf); + struct mlx5_core_sriov *sriov; + struct mlx5_core_dev *dev; + int num_vf_msix, id; + + dev = pci_get_drvdata(pf); + num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix); + if (!num_vf_msix) + return -EOPNOTSUPP; + + if (!msix_vec_count) + msix_vec_count = + mlx5_get_default_msix_vec_count(dev, pci_num_vf(pf)); + + sriov = &dev->priv.sriov; + + /* Reversed translation of PCI VF function number to the internal + * function_id, which exists in the name of virtfn symlink. + */ + for (id = 0; id < pci_num_vf(pf); id++) { + if (!sriov->vfs_ctx[id].enabled) + continue; + + if (vf->devfn == pci_iov_virtfn_devfn(pf, id)) + break; + } + + if (id == pci_num_vf(pf) || !sriov->vfs_ctx[id].enabled) + return -EINVAL; + + return mlx5_set_msix_vec_count(dev, id + 1, msix_vec_count); +} + int mlx5_sriov_attach(struct mlx5_core_dev *dev) { if (!mlx5_core_is_pf(dev) || !pci_num_vf(dev->pdev)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index 28a7971cac6a..949879cf2092 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -313,8 +313,8 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn, * table, since there is an *assumption* that in such case FW * will recalculate the CS. */ - if (dest_action->dest_tbl.is_fw_tbl) { - *final_icm_addr = dest_action->dest_tbl.fw_tbl.rx_icm_addr; + if (dest_action->dest_tbl->is_fw_tbl) { + *final_icm_addr = dest_action->dest_tbl->fw_tbl.rx_icm_addr; } else { mlx5dr_dbg(dmn, "Destination FT should be terminating when modify TTL is used\n"); @@ -326,8 +326,8 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn, /* If destination is vport we will get the FW flow table * that recalculates the CS and forwards to the vport. */ - ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport.dmn, - dest_action->vport.caps->num, + ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport->dmn, + dest_action->vport->caps->num, final_icm_addr); if (ret) { mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n"); @@ -369,6 +369,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, action_domain = dr_action_get_action_domain(dmn->type, nic_dmn->ste_type); for (i = 0; i < num_actions; i++) { + struct mlx5dr_action_dest_tbl *dest_tbl; struct mlx5dr_action *action; int max_actions_type = 1; u32 action_type; @@ -382,37 +383,38 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, break; case DR_ACTION_TYP_FT: dest_action = action; - if (!action->dest_tbl.is_fw_tbl) { - if (action->dest_tbl.tbl->dmn != dmn) { + dest_tbl = action->dest_tbl; + if (!dest_tbl->is_fw_tbl) { + if (dest_tbl->tbl->dmn != dmn) { mlx5dr_err(dmn, "Destination table belongs to a different domain\n"); goto out_invalid_arg; } - if (action->dest_tbl.tbl->level <= matcher->tbl->level) { + if (dest_tbl->tbl->level <= matcher->tbl->level) { mlx5_core_warn_once(dmn->mdev, "Connecting table to a lower/same level destination table\n"); mlx5dr_dbg(dmn, "Connecting table at level %d to a destination table at level %d\n", matcher->tbl->level, - action->dest_tbl.tbl->level); + dest_tbl->tbl->level); } attr.final_icm_addr = rx_rule ? - action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr : - action->dest_tbl.tbl->tx.s_anchor->chunk->icm_addr; + dest_tbl->tbl->rx.s_anchor->chunk->icm_addr : + dest_tbl->tbl->tx.s_anchor->chunk->icm_addr; } else { struct mlx5dr_cmd_query_flow_table_details output; int ret; /* get the relevant addresses */ - if (!action->dest_tbl.fw_tbl.rx_icm_addr) { + if (!action->dest_tbl->fw_tbl.rx_icm_addr) { ret = mlx5dr_cmd_query_flow_table(dmn->mdev, - action->dest_tbl.fw_tbl.type, - action->dest_tbl.fw_tbl.id, + dest_tbl->fw_tbl.type, + dest_tbl->fw_tbl.id, &output); if (!ret) { - action->dest_tbl.fw_tbl.tx_icm_addr = + dest_tbl->fw_tbl.tx_icm_addr = output.sw_owner_icm_root_1; - action->dest_tbl.fw_tbl.rx_icm_addr = + dest_tbl->fw_tbl.rx_icm_addr = output.sw_owner_icm_root_0; } else { mlx5dr_err(dmn, @@ -422,50 +424,50 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, } } attr.final_icm_addr = rx_rule ? - action->dest_tbl.fw_tbl.rx_icm_addr : - action->dest_tbl.fw_tbl.tx_icm_addr; + dest_tbl->fw_tbl.rx_icm_addr : + dest_tbl->fw_tbl.tx_icm_addr; } break; case DR_ACTION_TYP_QP: mlx5dr_info(dmn, "Domain doesn't support QP\n"); goto out_invalid_arg; case DR_ACTION_TYP_CTR: - attr.ctr_id = action->ctr.ctr_id + - action->ctr.offeset; + attr.ctr_id = action->ctr->ctr_id + + action->ctr->offeset; break; case DR_ACTION_TYP_TAG: - attr.flow_tag = action->flow_tag; + attr.flow_tag = action->flow_tag->flow_tag; break; case DR_ACTION_TYP_TNL_L2_TO_L2: break; case DR_ACTION_TYP_TNL_L3_TO_L2: - attr.decap_index = action->rewrite.index; - attr.decap_actions = action->rewrite.num_of_actions; + attr.decap_index = action->rewrite->index; + attr.decap_actions = action->rewrite->num_of_actions; attr.decap_with_vlan = attr.decap_actions == WITH_VLAN_NUM_HW_ACTIONS; break; case DR_ACTION_TYP_MODIFY_HDR: - attr.modify_index = action->rewrite.index; - attr.modify_actions = action->rewrite.num_of_actions; - recalc_cs_required = action->rewrite.modify_ttl && + attr.modify_index = action->rewrite->index; + attr.modify_actions = action->rewrite->num_of_actions; + recalc_cs_required = action->rewrite->modify_ttl && !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps); break; case DR_ACTION_TYP_L2_TO_TNL_L2: case DR_ACTION_TYP_L2_TO_TNL_L3: - attr.reformat_size = action->reformat.reformat_size; - attr.reformat_id = action->reformat.reformat_id; + attr.reformat_size = action->reformat->reformat_size; + attr.reformat_id = action->reformat->reformat_id; break; case DR_ACTION_TYP_VPORT: - attr.hit_gvmi = action->vport.caps->vhca_gvmi; + attr.hit_gvmi = action->vport->caps->vhca_gvmi; dest_action = action; if (rx_rule) { /* Loopback on WIRE vport is not supported */ - if (action->vport.caps->num == WIRE_PORT) + if (action->vport->caps->num == WIRE_PORT) goto out_invalid_arg; - attr.final_icm_addr = action->vport.caps->icm_address_rx; + attr.final_icm_addr = action->vport->caps->icm_address_rx; } else { - attr.final_icm_addr = action->vport.caps->icm_address_tx; + attr.final_icm_addr = action->vport->caps->icm_address_tx; } break; case DR_ACTION_TYP_POP_VLAN: @@ -477,7 +479,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, if (attr.vlans.count == MLX5DR_MAX_VLANS) return -EINVAL; - attr.vlans.headers[attr.vlans.count++] = action->push_vlan.vlan_hdr; + attr.vlans.headers[attr.vlans.count++] = action->push_vlan->vlan_hdr; break; default: goto out_invalid_arg; @@ -530,17 +532,37 @@ out_invalid_arg: return -EINVAL; } +static unsigned int action_size[DR_ACTION_TYP_MAX] = { + [DR_ACTION_TYP_TNL_L2_TO_L2] = sizeof(struct mlx5dr_action_reformat), + [DR_ACTION_TYP_L2_TO_TNL_L2] = sizeof(struct mlx5dr_action_reformat), + [DR_ACTION_TYP_TNL_L3_TO_L2] = sizeof(struct mlx5dr_action_rewrite), + [DR_ACTION_TYP_L2_TO_TNL_L3] = sizeof(struct mlx5dr_action_reformat), + [DR_ACTION_TYP_FT] = sizeof(struct mlx5dr_action_dest_tbl), + [DR_ACTION_TYP_CTR] = sizeof(struct mlx5dr_action_ctr), + [DR_ACTION_TYP_TAG] = sizeof(struct mlx5dr_action_flow_tag), + [DR_ACTION_TYP_MODIFY_HDR] = sizeof(struct mlx5dr_action_rewrite), + [DR_ACTION_TYP_VPORT] = sizeof(struct mlx5dr_action_vport), + [DR_ACTION_TYP_PUSH_VLAN] = sizeof(struct mlx5dr_action_push_vlan), +}; + static struct mlx5dr_action * dr_action_create_generic(enum mlx5dr_action_type action_type) { struct mlx5dr_action *action; + int extra_size; + + if (action_type < DR_ACTION_TYP_MAX) + extra_size = action_size[action_type]; + else + return NULL; - action = kzalloc(sizeof(*action), GFP_KERNEL); + action = kzalloc(sizeof(*action) + extra_size, GFP_KERNEL); if (!action) return NULL; action->action_type = action_type; refcount_set(&action->refcount, 1); + action->data = action + 1; return action; } @@ -559,10 +581,10 @@ mlx5dr_action_create_dest_table_num(struct mlx5dr_domain *dmn, u32 table_num) if (!action) return NULL; - action->dest_tbl.is_fw_tbl = true; - action->dest_tbl.fw_tbl.dmn = dmn; - action->dest_tbl.fw_tbl.id = table_num; - action->dest_tbl.fw_tbl.type = FS_FT_FDB; + action->dest_tbl->is_fw_tbl = true; + action->dest_tbl->fw_tbl.dmn = dmn; + action->dest_tbl->fw_tbl.id = table_num; + action->dest_tbl->fw_tbl.type = FS_FT_FDB; refcount_inc(&dmn->refcount); return action; @@ -579,7 +601,7 @@ mlx5dr_action_create_dest_table(struct mlx5dr_table *tbl) if (!action) goto dec_ref; - action->dest_tbl.tbl = tbl; + action->dest_tbl->tbl = tbl; return action; @@ -624,12 +646,12 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, case DR_ACTION_TYP_VPORT: hw_dests[i].vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID; hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - hw_dests[i].vport.num = dest_action->vport.caps->num; - hw_dests[i].vport.vhca_id = dest_action->vport.caps->vhca_gvmi; + hw_dests[i].vport.num = dest_action->vport->caps->num; + hw_dests[i].vport.vhca_id = dest_action->vport->caps->vhca_gvmi; if (reformat_action) { reformat_req = true; hw_dests[i].vport.reformat_id = - reformat_action->reformat.reformat_id; + reformat_action->reformat->reformat_id; ref_actions[num_of_ref++] = reformat_action; hw_dests[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID; } @@ -637,10 +659,10 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, case DR_ACTION_TYP_FT: hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - if (dest_action->dest_tbl.is_fw_tbl) - hw_dests[i].ft_id = dest_action->dest_tbl.fw_tbl.id; + if (dest_action->dest_tbl->is_fw_tbl) + hw_dests[i].ft_id = dest_action->dest_tbl->fw_tbl.id; else - hw_dests[i].ft_id = dest_action->dest_tbl.tbl->table_id; + hw_dests[i].ft_id = dest_action->dest_tbl->tbl->table_id; break; default: @@ -657,8 +679,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, hw_dests, num_of_dests, reformat_req, - &action->dest_tbl.fw_tbl.id, - &action->dest_tbl.fw_tbl.group_id); + &action->dest_tbl->fw_tbl.id, + &action->dest_tbl->fw_tbl.group_id); if (ret) goto free_action; @@ -667,11 +689,11 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, for (i = 0; i < num_of_ref; i++) refcount_inc(&ref_actions[i]->refcount); - action->dest_tbl.is_fw_tbl = true; - action->dest_tbl.fw_tbl.dmn = dmn; - action->dest_tbl.fw_tbl.type = FS_FT_FDB; - action->dest_tbl.fw_tbl.ref_actions = ref_actions; - action->dest_tbl.fw_tbl.num_of_ref_actions = num_of_ref; + action->dest_tbl->is_fw_tbl = true; + action->dest_tbl->fw_tbl.dmn = dmn; + action->dest_tbl->fw_tbl.type = FS_FT_FDB; + action->dest_tbl->fw_tbl.ref_actions = ref_actions; + action->dest_tbl->fw_tbl.num_of_ref_actions = num_of_ref; kfree(hw_dests); @@ -696,10 +718,10 @@ mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *dmn, if (!action) return NULL; - action->dest_tbl.is_fw_tbl = 1; - action->dest_tbl.fw_tbl.type = ft->type; - action->dest_tbl.fw_tbl.id = ft->id; - action->dest_tbl.fw_tbl.dmn = dmn; + action->dest_tbl->is_fw_tbl = 1; + action->dest_tbl->fw_tbl.type = ft->type; + action->dest_tbl->fw_tbl.id = ft->id; + action->dest_tbl->fw_tbl.dmn = dmn; refcount_inc(&dmn->refcount); @@ -715,7 +737,7 @@ mlx5dr_action_create_flow_counter(u32 counter_id) if (!action) return NULL; - action->ctr.ctr_id = counter_id; + action->ctr->ctr_id = counter_id; return action; } @@ -728,7 +750,7 @@ struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value) if (!action) return NULL; - action->flow_tag = tag_value & 0xffffff; + action->flow_tag->flow_tag = tag_value & 0xffffff; return action; } @@ -794,8 +816,8 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn, if (ret) return ret; - action->reformat.reformat_id = reformat_id; - action->reformat.reformat_size = data_sz; + action->reformat->reformat_id = reformat_id; + action->reformat->reformat_size = data_sz; return 0; } case DR_ACTION_TYP_TNL_L2_TO_L2: @@ -811,28 +833,28 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn, data, data_sz, hw_actions, ACTION_CACHE_LINE_SIZE, - &action->rewrite.num_of_actions); + &action->rewrite->num_of_actions); if (ret) { mlx5dr_dbg(dmn, "Failed creating decap l3 action list\n"); return ret; } - action->rewrite.chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool, - DR_CHUNK_SIZE_8); - if (!action->rewrite.chunk) { + action->rewrite->chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool, + DR_CHUNK_SIZE_8); + if (!action->rewrite->chunk) { mlx5dr_dbg(dmn, "Failed allocating modify header chunk\n"); return -ENOMEM; } - action->rewrite.data = (void *)hw_actions; - action->rewrite.index = (action->rewrite.chunk->icm_addr - + action->rewrite->data = (void *)hw_actions; + action->rewrite->index = (action->rewrite->chunk->icm_addr - dmn->info.caps.hdr_modify_icm_addr) / ACTION_CACHE_LINE_SIZE; ret = mlx5dr_send_postsend_action(dmn, action); if (ret) { mlx5dr_dbg(dmn, "Writing decap l3 actions to ICM failed\n"); - mlx5dr_icm_free_chunk(action->rewrite.chunk); + mlx5dr_icm_free_chunk(action->rewrite->chunk); return ret; } return 0; @@ -867,7 +889,7 @@ struct mlx5dr_action *mlx5dr_action_create_push_vlan(struct mlx5dr_domain *dmn, if (!action) return NULL; - action->push_vlan.vlan_hdr = vlan_hdr_h; + action->push_vlan->vlan_hdr = vlan_hdr_h; return action; } @@ -898,7 +920,7 @@ mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn, if (!action) goto dec_ref; - action->reformat.dmn = dmn; + action->reformat->dmn = dmn; ret = dr_action_create_reformat_action(dmn, data_sz, @@ -1104,17 +1126,17 @@ dr_action_modify_check_set_field_limitation(struct mlx5dr_action *action, const __be64 *sw_action) { u16 sw_field = MLX5_GET(set_action_in, sw_action, field); - struct mlx5dr_domain *dmn = action->rewrite.dmn; + struct mlx5dr_domain *dmn = action->rewrite->dmn; if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_A) { - action->rewrite.allow_rx = 0; + action->rewrite->allow_rx = 0; if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) { mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n", sw_field); return -EINVAL; } } else if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_B) { - action->rewrite.allow_tx = 0; + action->rewrite->allow_tx = 0; if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) { mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n", sw_field); @@ -1122,7 +1144,7 @@ dr_action_modify_check_set_field_limitation(struct mlx5dr_action *action, } } - if (!action->rewrite.allow_rx && !action->rewrite.allow_tx) { + if (!action->rewrite->allow_rx && !action->rewrite->allow_tx) { mlx5dr_dbg(dmn, "Modify SET actions not supported on both RX and TX\n"); return -EINVAL; } @@ -1135,7 +1157,7 @@ dr_action_modify_check_add_field_limitation(struct mlx5dr_action *action, const __be64 *sw_action) { u16 sw_field = MLX5_GET(set_action_in, sw_action, field); - struct mlx5dr_domain *dmn = action->rewrite.dmn; + struct mlx5dr_domain *dmn = action->rewrite->dmn; if (sw_field != MLX5_ACTION_IN_FIELD_OUT_IP_TTL && sw_field != MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT && @@ -1153,7 +1175,7 @@ static int dr_action_modify_check_copy_field_limitation(struct mlx5dr_action *action, const __be64 *sw_action) { - struct mlx5dr_domain *dmn = action->rewrite.dmn; + struct mlx5dr_domain *dmn = action->rewrite->dmn; u16 sw_fields[2]; int i; @@ -1162,14 +1184,14 @@ dr_action_modify_check_copy_field_limitation(struct mlx5dr_action *action, for (i = 0; i < 2; i++) { if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_A) { - action->rewrite.allow_rx = 0; + action->rewrite->allow_rx = 0; if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) { mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n", sw_fields[i]); return -EINVAL; } } else if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_B) { - action->rewrite.allow_tx = 0; + action->rewrite->allow_tx = 0; if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) { mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n", sw_fields[i]); @@ -1178,7 +1200,7 @@ dr_action_modify_check_copy_field_limitation(struct mlx5dr_action *action, } } - if (!action->rewrite.allow_rx && !action->rewrite.allow_tx) { + if (!action->rewrite->allow_rx && !action->rewrite->allow_tx) { mlx5dr_dbg(dmn, "Modify copy actions not supported on both RX and TX\n"); return -EINVAL; } @@ -1190,7 +1212,7 @@ static int dr_action_modify_check_field_limitation(struct mlx5dr_action *action, const __be64 *sw_action) { - struct mlx5dr_domain *dmn = action->rewrite.dmn; + struct mlx5dr_domain *dmn = action->rewrite->dmn; u8 action_type; int ret; @@ -1239,7 +1261,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action, { const struct mlx5dr_ste_action_modify_field *hw_dst_action_info; const struct mlx5dr_ste_action_modify_field *hw_src_action_info; - struct mlx5dr_domain *dmn = action->rewrite.dmn; + struct mlx5dr_domain *dmn = action->rewrite->dmn; int ret, i, hw_idx = 0; __be64 *sw_action; __be64 hw_action; @@ -1249,8 +1271,8 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action, *modify_ttl = false; - action->rewrite.allow_rx = 1; - action->rewrite.allow_tx = 1; + action->rewrite->allow_rx = 1; + action->rewrite->allow_tx = 1; for (i = 0; i < num_sw_actions; i++) { sw_action = &sw_actions[i]; @@ -1358,13 +1380,13 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn, if (ret) goto free_hw_actions; - action->rewrite.chunk = chunk; - action->rewrite.modify_ttl = modify_ttl; - action->rewrite.data = (u8 *)hw_actions; - action->rewrite.num_of_actions = num_hw_actions; - action->rewrite.index = (chunk->icm_addr - - dmn->info.caps.hdr_modify_icm_addr) / - ACTION_CACHE_LINE_SIZE; + action->rewrite->chunk = chunk; + action->rewrite->modify_ttl = modify_ttl; + action->rewrite->data = (u8 *)hw_actions; + action->rewrite->num_of_actions = num_hw_actions; + action->rewrite->index = (chunk->icm_addr - + dmn->info.caps.hdr_modify_icm_addr) / + ACTION_CACHE_LINE_SIZE; ret = mlx5dr_send_postsend_action(dmn, action); if (ret) @@ -1399,7 +1421,7 @@ mlx5dr_action_create_modify_header(struct mlx5dr_domain *dmn, if (!action) goto dec_ref; - action->rewrite.dmn = dmn; + action->rewrite->dmn = dmn; ret = dr_action_create_modify_action(dmn, actions_sz, @@ -1451,8 +1473,8 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn, if (!action) return NULL; - action->vport.dmn = vport_dmn; - action->vport.caps = vport_cap; + action->vport->dmn = vport_dmn; + action->vport->caps = vport_cap; return action; } @@ -1464,44 +1486,44 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action) switch (action->action_type) { case DR_ACTION_TYP_FT: - if (action->dest_tbl.is_fw_tbl) - refcount_dec(&action->dest_tbl.fw_tbl.dmn->refcount); + if (action->dest_tbl->is_fw_tbl) + refcount_dec(&action->dest_tbl->fw_tbl.dmn->refcount); else - refcount_dec(&action->dest_tbl.tbl->refcount); + refcount_dec(&action->dest_tbl->tbl->refcount); - if (action->dest_tbl.is_fw_tbl && - action->dest_tbl.fw_tbl.num_of_ref_actions) { + if (action->dest_tbl->is_fw_tbl && + action->dest_tbl->fw_tbl.num_of_ref_actions) { struct mlx5dr_action **ref_actions; int i; - ref_actions = action->dest_tbl.fw_tbl.ref_actions; - for (i = 0; i < action->dest_tbl.fw_tbl.num_of_ref_actions; i++) + ref_actions = action->dest_tbl->fw_tbl.ref_actions; + for (i = 0; i < action->dest_tbl->fw_tbl.num_of_ref_actions; i++) refcount_dec(&ref_actions[i]->refcount); kfree(ref_actions); - mlx5dr_fw_destroy_md_tbl(action->dest_tbl.fw_tbl.dmn, - action->dest_tbl.fw_tbl.id, - action->dest_tbl.fw_tbl.group_id); + mlx5dr_fw_destroy_md_tbl(action->dest_tbl->fw_tbl.dmn, + action->dest_tbl->fw_tbl.id, + action->dest_tbl->fw_tbl.group_id); } break; case DR_ACTION_TYP_TNL_L2_TO_L2: - refcount_dec(&action->reformat.dmn->refcount); + refcount_dec(&action->reformat->dmn->refcount); break; case DR_ACTION_TYP_TNL_L3_TO_L2: - mlx5dr_icm_free_chunk(action->rewrite.chunk); - refcount_dec(&action->reformat.dmn->refcount); + mlx5dr_icm_free_chunk(action->rewrite->chunk); + refcount_dec(&action->rewrite->dmn->refcount); break; case DR_ACTION_TYP_L2_TO_TNL_L2: case DR_ACTION_TYP_L2_TO_TNL_L3: - mlx5dr_cmd_destroy_reformat_ctx((action->reformat.dmn)->mdev, - action->reformat.reformat_id); - refcount_dec(&action->reformat.dmn->refcount); + mlx5dr_cmd_destroy_reformat_ctx((action->reformat->dmn)->mdev, + action->reformat->reformat_id); + refcount_dec(&action->reformat->dmn->refcount); break; case DR_ACTION_TYP_MODIFY_HDR: - mlx5dr_icm_free_chunk(action->rewrite.chunk); - kfree(action->rewrite.data); - refcount_dec(&action->rewrite.dmn->refcount); + mlx5dr_icm_free_chunk(action->rewrite->chunk); + kfree(action->rewrite->data); + refcount_dec(&action->rewrite->dmn->refcount); break; default: break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 30b0136b5bc7..5970cb8fc0c0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -85,15 +85,53 @@ int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev, return 0; } +static int dr_cmd_query_nic_vport_roce_en(struct mlx5_core_dev *mdev, + u16 vport, bool *roce_en) +{ + u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {}; + u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {}; + int err; + + MLX5_SET(query_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); + MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); + MLX5_SET(query_nic_vport_context_in, in, other_vport, !!vport); + + err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (err) + return err; + + *roce_en = MLX5_GET(query_nic_vport_context_out, out, + nic_vport_context.roce_en); + return 0; +} + int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, struct mlx5dr_cmd_caps *caps) { + bool roce_en; + int err; + caps->prio_tag_required = MLX5_CAP_GEN(mdev, prio_tag_required); caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager); caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id); caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols); caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version); + if (MLX5_CAP_GEN(mdev, roce)) { + err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en); + if (err) + return err; + + caps->roce_caps.roce_en = roce_en; + caps->roce_caps.fl_rc_qp_when_roce_disabled = + MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled); + caps->roce_caps.fl_rc_qp_when_roce_enabled = + MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled); + } + + caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new); + if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) { caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0); caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1); @@ -106,6 +144,34 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1); } + if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED) + caps->flex_parser_id_geneve_tlv_option_0 = + MLX5_CAP_GEN(mdev, flex_parser_id_geneve_tlv_option_0); + + if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED) + caps->flex_parser_id_mpls_over_gre = + MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_gre); + + if (caps->flex_protocols & mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED) + caps->flex_parser_id_mpls_over_udp = + MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_udp_label); + + if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED) + caps->flex_parser_id_gtpu_dw_0 = + MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_0); + + if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED) + caps->flex_parser_id_gtpu_teid = + MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_teid); + + if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED) + caps->flex_parser_id_gtpu_dw_2 = + MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_2); + + if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED) + caps->flex_parser_id_gtpu_first_ext_dw_0 = + MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_first_ext_dw_0); + caps->nic_rx_drop_address = MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address); caps->nic_tx_drop_address = @@ -287,7 +353,7 @@ int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev, u32 *in; int err; - in = kzalloc(inlen, GFP_KERNEL); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -302,7 +368,7 @@ int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev, *group_id = MLX5_GET(create_flow_group_out, out, group_id); out: - kfree(in); + kvfree(in); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c index 15673cd10039..6f6191d1d5a6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -92,15 +92,17 @@ static bool dr_mask_is_tnl_gre_set(struct mlx5dr_match_misc *misc) misc->gre_k_present || misc->gre_s_present); } -#define DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET(_misc2, gre_udp) ( \ - (_misc2).outer_first_mpls_over_##gre_udp##_label || \ - (_misc2).outer_first_mpls_over_##gre_udp##_exp || \ - (_misc2).outer_first_mpls_over_##gre_udp##_s_bos || \ - (_misc2).outer_first_mpls_over_##gre_udp##_ttl) - -#define DR_MASK_IS_TNL_MPLS_SET(_misc2) ( \ - DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), gre) || \ - DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), udp)) +#define DR_MASK_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\ + (_misc)->outer_first_mpls_over_gre_label || \ + (_misc)->outer_first_mpls_over_gre_exp || \ + (_misc)->outer_first_mpls_over_gre_s_bos || \ + (_misc)->outer_first_mpls_over_gre_ttl) + +#define DR_MASK_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\ + (_misc)->outer_first_mpls_over_udp_label || \ + (_misc)->outer_first_mpls_over_udp_exp || \ + (_misc)->outer_first_mpls_over_udp_s_bos || \ + (_misc)->outer_first_mpls_over_udp_ttl) static bool dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3) @@ -133,6 +135,11 @@ static bool dr_mask_is_tnl_geneve_set(struct mlx5dr_match_misc *misc) misc->geneve_opt_len; } +static bool dr_mask_is_tnl_geneve_tlv_opt(struct mlx5dr_match_misc3 *misc3) +{ + return misc3->geneve_tlv_option_0_data; +} + static bool dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps) { @@ -148,6 +155,109 @@ dr_mask_is_tnl_geneve(struct mlx5dr_match_param *mask, dr_matcher_supp_tnl_geneve(&dmn->info.caps); } +static bool dr_mask_is_tnl_gtpu_set(struct mlx5dr_match_misc3 *misc3) +{ + return misc3->gtpu_msg_flags || misc3->gtpu_msg_type || misc3->gtpu_teid; +} + +static bool dr_matcher_supp_tnl_gtpu(struct mlx5dr_cmd_caps *caps) +{ + return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED; +} + +static bool dr_mask_is_tnl_gtpu(struct mlx5dr_match_param *mask, + struct mlx5dr_domain *dmn) +{ + return dr_mask_is_tnl_gtpu_set(&mask->misc3) && + dr_matcher_supp_tnl_gtpu(&dmn->info.caps); +} + +static int dr_matcher_supp_tnl_gtpu_dw_0(struct mlx5dr_cmd_caps *caps) +{ + return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED; +} + +static bool dr_mask_is_tnl_gtpu_dw_0(struct mlx5dr_match_param *mask, + struct mlx5dr_domain *dmn) +{ + return mask->misc3.gtpu_dw_0 && + dr_matcher_supp_tnl_gtpu_dw_0(&dmn->info.caps); +} + +static int dr_matcher_supp_tnl_gtpu_teid(struct mlx5dr_cmd_caps *caps) +{ + return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED; +} + +static bool dr_mask_is_tnl_gtpu_teid(struct mlx5dr_match_param *mask, + struct mlx5dr_domain *dmn) +{ + return mask->misc3.gtpu_teid && + dr_matcher_supp_tnl_gtpu_teid(&dmn->info.caps); +} + +static int dr_matcher_supp_tnl_gtpu_dw_2(struct mlx5dr_cmd_caps *caps) +{ + return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED; +} + +static bool dr_mask_is_tnl_gtpu_dw_2(struct mlx5dr_match_param *mask, + struct mlx5dr_domain *dmn) +{ + return mask->misc3.gtpu_dw_2 && + dr_matcher_supp_tnl_gtpu_dw_2(&dmn->info.caps); +} + +static int dr_matcher_supp_tnl_gtpu_first_ext(struct mlx5dr_cmd_caps *caps) +{ + return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED; +} + +static bool dr_mask_is_tnl_gtpu_first_ext(struct mlx5dr_match_param *mask, + struct mlx5dr_domain *dmn) +{ + return mask->misc3.gtpu_first_ext_dw_0 && + dr_matcher_supp_tnl_gtpu_first_ext(&dmn->info.caps); +} + +static bool dr_mask_is_tnl_gtpu_flex_parser_0(struct mlx5dr_match_param *mask, + struct mlx5dr_domain *dmn) +{ + struct mlx5dr_cmd_caps *caps = &dmn->info.caps; + + return (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_dw_0) && + dr_mask_is_tnl_gtpu_dw_0(mask, dmn)) || + (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_teid) && + dr_mask_is_tnl_gtpu_teid(mask, dmn)) || + (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_dw_2) && + dr_mask_is_tnl_gtpu_dw_2(mask, dmn)) || + (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_first_ext_dw_0) && + dr_mask_is_tnl_gtpu_first_ext(mask, dmn)); +} + +static bool dr_mask_is_tnl_gtpu_flex_parser_1(struct mlx5dr_match_param *mask, + struct mlx5dr_domain *dmn) +{ + struct mlx5dr_cmd_caps *caps = &dmn->info.caps; + + return (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_dw_0) && + dr_mask_is_tnl_gtpu_dw_0(mask, dmn)) || + (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_teid) && + dr_mask_is_tnl_gtpu_teid(mask, dmn)) || + (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_dw_2) && + dr_mask_is_tnl_gtpu_dw_2(mask, dmn)) || + (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_first_ext_dw_0) && + dr_mask_is_tnl_gtpu_first_ext(mask, dmn)); +} + +static bool dr_mask_is_tnl_gtpu_any(struct mlx5dr_match_param *mask, + struct mlx5dr_domain *dmn) +{ + return dr_mask_is_tnl_gtpu_flex_parser_0(mask, dmn) || + dr_mask_is_tnl_gtpu_flex_parser_1(mask, dmn) || + dr_mask_is_tnl_gtpu(mask, dmn); +} + static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps) { return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) || @@ -199,6 +309,65 @@ static bool dr_mask_is_gvmi_or_qpn_set(struct mlx5dr_match_misc *misc) return (misc->source_sqn || misc->source_port); } +static bool dr_mask_is_flex_parser_id_0_3_set(u32 flex_parser_id, + u32 flex_parser_value) +{ + if (flex_parser_id) + return flex_parser_id <= DR_STE_MAX_FLEX_0_ID; + + /* Using flex_parser 0 means that id is zero, thus value must be set. */ + return flex_parser_value; +} + +static bool dr_mask_is_flex_parser_0_3_set(struct mlx5dr_match_misc4 *misc4) +{ + return (dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_0, + misc4->prog_sample_field_value_0) || + dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_1, + misc4->prog_sample_field_value_1) || + dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_2, + misc4->prog_sample_field_value_2) || + dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_3, + misc4->prog_sample_field_value_3)); +} + +static bool dr_mask_is_flex_parser_id_4_7_set(u32 flex_parser_id) +{ + return flex_parser_id > DR_STE_MAX_FLEX_0_ID && + flex_parser_id <= DR_STE_MAX_FLEX_1_ID; +} + +static bool dr_mask_is_flex_parser_4_7_set(struct mlx5dr_match_misc4 *misc4) +{ + return (dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_0) || + dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_1) || + dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_2) || + dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_3)); +} + +static int dr_matcher_supp_tnl_mpls_over_gre(struct mlx5dr_cmd_caps *caps) +{ + return caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED; +} + +static bool dr_mask_is_tnl_mpls_over_gre(struct mlx5dr_match_param *mask, + struct mlx5dr_domain *dmn) +{ + return DR_MASK_IS_OUTER_MPLS_OVER_GRE_SET(&mask->misc2) && + dr_matcher_supp_tnl_mpls_over_gre(&dmn->info.caps); +} + +static int dr_matcher_supp_tnl_mpls_over_udp(struct mlx5dr_cmd_caps *caps) +{ + return caps->flex_protocols & mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED; +} + +static bool dr_mask_is_tnl_mpls_over_udp(struct mlx5dr_match_param *mask, + struct mlx5dr_domain *dmn) +{ + return DR_MASK_IS_OUTER_MPLS_OVER_UDP_SET(&mask->misc2) && + dr_matcher_supp_tnl_mpls_over_udp(&dmn->info.caps); +} int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher, struct mlx5dr_matcher_rx_tx *nic_matcher, enum mlx5dr_ipv outer_ipv, @@ -251,6 +420,9 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC3) mask.misc3 = matcher->mask.misc3; + if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC4) + mask.misc4 = matcher->mask.misc4; + ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria, &matcher->mask, NULL); if (ret) @@ -321,9 +493,28 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, if (dr_mask_is_tnl_vxlan_gpe(&mask, dmn)) mlx5dr_ste_build_tnl_vxlan_gpe(ste_ctx, &sb[idx++], &mask, inner, rx); - else if (dr_mask_is_tnl_geneve(&mask, dmn)) + else if (dr_mask_is_tnl_geneve(&mask, dmn)) { mlx5dr_ste_build_tnl_geneve(ste_ctx, &sb[idx++], &mask, inner, rx); + if (dr_mask_is_tnl_geneve_tlv_opt(&mask.misc3)) + mlx5dr_ste_build_tnl_geneve_tlv_opt(ste_ctx, &sb[idx++], + &mask, &dmn->info.caps, + inner, rx); + } else if (dr_mask_is_tnl_gtpu_any(&mask, dmn)) { + if (dr_mask_is_tnl_gtpu_flex_parser_0(&mask, dmn)) + mlx5dr_ste_build_tnl_gtpu_flex_parser_0(ste_ctx, &sb[idx++], + &mask, &dmn->info.caps, + inner, rx); + + if (dr_mask_is_tnl_gtpu_flex_parser_1(&mask, dmn)) + mlx5dr_ste_build_tnl_gtpu_flex_parser_1(ste_ctx, &sb[idx++], + &mask, &dmn->info.caps, + inner, rx); + + if (dr_mask_is_tnl_gtpu(&mask, dmn)) + mlx5dr_ste_build_tnl_gtpu(ste_ctx, &sb[idx++], + &mask, inner, rx); + } if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer)) mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++], @@ -333,17 +524,20 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++], &mask, inner, rx); - if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2)) - mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++], - &mask, inner, rx); + if (dr_mask_is_tnl_mpls_over_gre(&mask, dmn)) + mlx5dr_ste_build_tnl_mpls_over_gre(ste_ctx, &sb[idx++], + &mask, &dmn->info.caps, + inner, rx); + else if (dr_mask_is_tnl_mpls_over_udp(&mask, dmn)) + mlx5dr_ste_build_tnl_mpls_over_udp(ste_ctx, &sb[idx++], + &mask, &dmn->info.caps, + inner, rx); + + if (dr_mask_is_icmp(&mask, dmn)) + mlx5dr_ste_build_icmp(ste_ctx, &sb[idx++], + &mask, &dmn->info.caps, + inner, rx); - if (dr_mask_is_icmp(&mask, dmn)) { - ret = mlx5dr_ste_build_icmp(ste_ctx, &sb[idx++], - &mask, &dmn->info.caps, - inner, rx); - if (ret) - return ret; - } if (dr_mask_is_tnl_gre_set(&mask.misc)) mlx5dr_ste_build_tnl_gre(ste_ctx, &sb[idx++], &mask, inner, rx); @@ -404,10 +598,26 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++], &mask, inner, rx); - if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2)) - mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++], - &mask, inner, rx); + if (dr_mask_is_tnl_mpls_over_gre(&mask, dmn)) + mlx5dr_ste_build_tnl_mpls_over_gre(ste_ctx, &sb[idx++], + &mask, &dmn->info.caps, + inner, rx); + else if (dr_mask_is_tnl_mpls_over_udp(&mask, dmn)) + mlx5dr_ste_build_tnl_mpls_over_udp(ste_ctx, &sb[idx++], + &mask, &dmn->info.caps, + inner, rx); } + + if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC4) { + if (dr_mask_is_flex_parser_0_3_set(&mask.misc4)) + mlx5dr_ste_build_flex_parser_0(ste_ctx, &sb[idx++], + &mask, false, rx); + + if (dr_mask_is_flex_parser_4_7_set(&mask.misc4)) + mlx5dr_ste_build_flex_parser_1(ste_ctx, &sb[idx++], + &mask, false, rx); + } + /* Empty matcher, takes all */ if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY) mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c index b337d6626bff..43356fad53de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c @@ -952,6 +952,17 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher, return false; } } + + if (match_criteria & DR_MATCHER_CRITERIA_MISC4) { + s_idx = offsetof(struct mlx5dr_match_param, misc4); + e_idx = min(s_idx + sizeof(param->misc4), value_size); + + if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) { + mlx5dr_err(matcher->tbl->dmn, + "Rule misc4 parameters contains a value not specified by mask\n"); + return false; + } + } return true; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index 8a6a56f9dc4e..12cf323a5943 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -32,6 +32,7 @@ struct dr_qp_rtr_attr { u8 min_rnr_timer; u8 sgid_index; u16 udp_src_port; + u8 fl:1; }; struct dr_qp_rts_attr { @@ -45,6 +46,7 @@ struct dr_qp_init_attr { u32 pdn; u32 max_send_wr; struct mlx5_uars_page *uar; + u8 isolate_vl_tc:1; }; static int dr_parse_cqe(struct mlx5dr_cq *dr_cq, struct mlx5_cqe64 *cqe64) @@ -157,6 +159,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev, qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC); MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); + MLX5_SET(qpc, qpc, isolate_vl_tc, attr->isolate_vl_tc); MLX5_SET(qpc, qpc, pd, attr->pdn); MLX5_SET(qpc, qpc, uar_page, attr->uar->index); MLX5_SET(qpc, qpc, log_page_size, @@ -213,7 +216,7 @@ static void dr_destroy_qp(struct mlx5_core_dev *mdev, static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl) { dma_wmb(); - *dr_qp->wq.sq.db = cpu_to_be32(dr_qp->sq.pc & 0xfffff); + *dr_qp->wq.sq.db = cpu_to_be32(dr_qp->sq.pc & 0xffff); /* After wmb() the hw aware of new work */ wmb(); @@ -223,7 +226,7 @@ static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl) static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr, u32 rkey, struct dr_data_seg *data_seg, - u32 opcode, int nreq) + u32 opcode, bool notify_hw) { struct mlx5_wqe_raddr_seg *wq_raddr; struct mlx5_wqe_ctrl_seg *wq_ctrl; @@ -255,16 +258,16 @@ static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr, dr_qp->sq.wqe_head[idx] = dr_qp->sq.pc++; - if (nreq) + if (notify_hw) dr_cmd_notify_hw(dr_qp, wq_ctrl); } static void dr_post_send(struct mlx5dr_qp *dr_qp, struct postsend_info *send_info) { dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey, - &send_info->write, MLX5_OPCODE_RDMA_WRITE, 0); + &send_info->write, MLX5_OPCODE_RDMA_WRITE, false); dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey, - &send_info->read, MLX5_OPCODE_RDMA_READ, 1); + &send_info->read, MLX5_OPCODE_RDMA_READ, true); } /** @@ -406,7 +409,7 @@ static int dr_get_tbl_copy_details(struct mlx5dr_domain *dmn, alloc_size = *num_stes * DR_STE_SIZE; } - *data = kzalloc(alloc_size, GFP_KERNEL); + *data = kvzalloc(alloc_size, GFP_KERNEL); if (!*data) return -ENOMEM; @@ -505,7 +508,7 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn, } out_free: - kfree(data); + kvfree(data); return ret; } @@ -562,7 +565,7 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn, } out_free: - kfree(data); + kvfree(data); return ret; } @@ -572,12 +575,12 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn, struct postsend_info send_info = {}; int ret; - send_info.write.addr = (uintptr_t)action->rewrite.data; - send_info.write.length = action->rewrite.num_of_actions * + send_info.write.addr = (uintptr_t)action->rewrite->data; + send_info.write.length = action->rewrite->num_of_actions * DR_MODIFY_ACTION_SIZE; send_info.write.lkey = 0; - send_info.remote_addr = action->rewrite.chunk->mr_addr; - send_info.rkey = action->rewrite.chunk->rkey; + send_info.remote_addr = action->rewrite->chunk->mr_addr; + send_info.rkey = action->rewrite->chunk->rkey; ret = dr_postsend_icm_data(dmn, &send_info); @@ -650,6 +653,7 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev, attr->udp_src_port); MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, attr->port_num); + MLX5_SET(qpc, qpc, primary_address_path.fl, attr->fl); MLX5_SET(qpc, qpc, min_rnr_nak, 1); MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP); @@ -658,6 +662,19 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev, return mlx5_cmd_exec_in(mdev, init2rtr_qp, in); } +static bool dr_send_allow_fl(struct mlx5dr_cmd_caps *caps) +{ + /* Check whether RC RoCE QP creation with force loopback is allowed. + * There are two separate capability bits for this: + * - force loopback when RoCE is enabled + * - force loopback when RoCE is disabled + */ + return ((caps->roce_caps.roce_en && + caps->roce_caps.fl_rc_qp_when_roce_enabled) || + (!caps->roce_caps.roce_en && + caps->roce_caps.fl_rc_qp_when_roce_disabled)); +} + static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn) { struct mlx5dr_qp *dr_qp = dmn->send_ring->qp; @@ -676,17 +693,26 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn) } /* RTR */ - ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index, &rtr_attr.dgid_attr); - if (ret) - return ret; - rtr_attr.mtu = mtu; rtr_attr.qp_num = dr_qp->qpn; rtr_attr.min_rnr_timer = 12; rtr_attr.port_num = port; - rtr_attr.sgid_index = gid_index; rtr_attr.udp_src_port = dmn->info.caps.roce_min_src_udp; + /* If QP creation with force loopback is allowed, then there + * is no need for GID index when creating the QP. + * Otherwise we query GID attributes and use GID index. + */ + rtr_attr.fl = dr_send_allow_fl(&dmn->info.caps); + if (!rtr_attr.fl) { + ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index, + &rtr_attr.dgid_attr); + if (ret) + return ret; + + rtr_attr.sgid_index = gid_index; + } + ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr); if (ret) { mlx5dr_err(dmn, "Failed modify QP init2rtr\n"); @@ -900,6 +926,11 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn) init_attr.pdn = dmn->pdn; init_attr.uar = dmn->uar; init_attr.max_send_wr = QUEUE_SIZE; + + /* Isolated VL is applicable only if force loopback is supported */ + if (dr_send_allow_fl(&dmn->info.caps)) + init_attr.isolate_vl_tc = dmn->info.caps.isolate_vl_tc; + spin_lock_init(&dmn->send_ring->lock); dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index f49abc7a4b9b..9b1529137cba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -852,6 +852,35 @@ static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec) spec->icmpv4_code = MLX5_GET(fte_match_set_misc3, mask, icmp_code); spec->icmpv6_type = MLX5_GET(fte_match_set_misc3, mask, icmpv6_type); spec->icmpv6_code = MLX5_GET(fte_match_set_misc3, mask, icmpv6_code); + spec->geneve_tlv_option_0_data = + MLX5_GET(fte_match_set_misc3, mask, geneve_tlv_option_0_data); + spec->gtpu_msg_flags = MLX5_GET(fte_match_set_misc3, mask, gtpu_msg_flags); + spec->gtpu_msg_type = MLX5_GET(fte_match_set_misc3, mask, gtpu_msg_type); + spec->gtpu_teid = MLX5_GET(fte_match_set_misc3, mask, gtpu_teid); + spec->gtpu_dw_0 = MLX5_GET(fte_match_set_misc3, mask, gtpu_dw_0); + spec->gtpu_dw_2 = MLX5_GET(fte_match_set_misc3, mask, gtpu_dw_2); + spec->gtpu_first_ext_dw_0 = + MLX5_GET(fte_match_set_misc3, mask, gtpu_first_ext_dw_0); +} + +static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec) +{ + spec->prog_sample_field_id_0 = + MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_0); + spec->prog_sample_field_value_0 = + MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_0); + spec->prog_sample_field_id_1 = + MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_1); + spec->prog_sample_field_value_1 = + MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_1); + spec->prog_sample_field_id_2 = + MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_2); + spec->prog_sample_field_value_2 = + MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_2); + spec->prog_sample_field_id_3 = + MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_3); + spec->prog_sample_field_value_3 = + MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_3); } void mlx5dr_ste_copy_param(u8 match_criteria, @@ -925,6 +954,20 @@ void mlx5dr_ste_copy_param(u8 match_criteria, } dr_ste_copy_mask_misc3(buff, &set_param->misc3); } + + param_location += sizeof(struct mlx5dr_match_misc3); + + if (match_criteria & DR_MATCHER_CRITERIA_MISC4) { + if (mask->match_sz < param_location + + sizeof(struct mlx5dr_match_misc4)) { + memcpy(tail_param, data + param_location, + mask->match_sz - param_location); + buff = tail_param; + } else { + buff = data + param_location; + } + dr_ste_copy_mask_misc4(buff, &set_param->misc4); + } } void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx, @@ -1051,26 +1094,40 @@ void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx, ste_ctx->build_tnl_gre_init(sb, mask); } -void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx, - struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - bool inner, bool rx) +void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + bool inner, bool rx) +{ + sb->rx = rx; + sb->inner = inner; + sb->caps = caps; + return ste_ctx->build_tnl_mpls_over_gre_init(sb, mask); +} + +void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + bool inner, bool rx) { sb->rx = rx; sb->inner = inner; - ste_ctx->build_tnl_mpls_init(sb, mask); + sb->caps = caps; + return ste_ctx->build_tnl_mpls_over_udp_init(sb, mask); } -int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx, - struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - struct mlx5dr_cmd_caps *caps, - bool inner, bool rx) +void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + bool inner, bool rx) { sb->rx = rx; sb->inner = inner; sb->caps = caps; - return ste_ctx->build_icmp_init(sb, mask); + ste_ctx->build_icmp_init(sb, mask); } void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx, @@ -1113,6 +1170,52 @@ void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx, ste_ctx->build_tnl_geneve_init(sb, mask); } +void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + bool inner, bool rx) +{ + sb->rx = rx; + sb->caps = caps; + sb->inner = inner; + ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask); +} + +void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) +{ + sb->rx = rx; + sb->inner = inner; + ste_ctx->build_tnl_gtpu_init(sb, mask); +} + +void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + bool inner, bool rx) +{ + sb->rx = rx; + sb->caps = caps; + sb->inner = inner; + ste_ctx->build_tnl_gtpu_flex_parser_0_init(sb, mask); +} + +void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + bool inner, bool rx) +{ + sb->rx = rx; + sb->caps = caps; + sb->inner = inner; + ste_ctx->build_tnl_gtpu_flex_parser_1_init(sb, mask); +} + void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, @@ -1148,6 +1251,26 @@ void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx, ste_ctx->build_src_gvmi_qpn_init(sb, mask); } +void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) +{ + sb->rx = rx; + sb->inner = inner; + ste_ctx->build_flex_parser_0_init(sb, mask); +} + +void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) +{ + sb->rx = rx; + sb->inner = inner; + ste_ctx->build_flex_parser_1_init(sb, mask); +} + static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = { [MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0, [MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h index 06bcb0ee8f96..992b591bf0c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h @@ -62,6 +62,13 @@ in_out##_first_mpls_ttl); \ } while (0) +#define DR_STE_SET_FLEX_PARSER_FIELD(tag, fname, caps, spec) do { \ + u8 parser_id = (caps)->flex_parser_id_##fname; \ + u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id); \ + *(__be32 *)parser_ptr = cpu_to_be32((spec)->fname);\ + (spec)->fname = 0;\ +} while (0) + #define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\ (_misc)->outer_first_mpls_over_gre_label || \ (_misc)->outer_first_mpls_over_gre_exp || \ @@ -86,8 +93,22 @@ enum dr_ste_action_modify_type_l4 { DR_STE_ACTION_MDFY_TYPE_L4_UDP = 0x2, }; +enum { + HDR_MPLS_OFFSET_LABEL = 12, + HDR_MPLS_OFFSET_EXP = 9, + HDR_MPLS_OFFSET_S_BOS = 8, + HDR_MPLS_OFFSET_TTL = 0, +}; + u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask); +static inline u8 * +dr_ste_calc_flex_parser_offset(u8 *tag, u8 parser_id) +{ + /* Calculate tag byte offset based on flex parser id */ + return tag + 4 * (3 - (parser_id % 4)); +} + #define DR_STE_CTX_BUILDER(fname) \ ((*build_##fname##_init)(struct mlx5dr_ste_build *sb, \ struct mlx5dr_match_param *mask)) @@ -106,14 +127,22 @@ struct mlx5dr_ste_ctx { void DR_STE_CTX_BUILDER(mpls); void DR_STE_CTX_BUILDER(tnl_gre); void DR_STE_CTX_BUILDER(tnl_mpls); - int DR_STE_CTX_BUILDER(icmp); + void DR_STE_CTX_BUILDER(tnl_mpls_over_gre); + void DR_STE_CTX_BUILDER(tnl_mpls_over_udp); + void DR_STE_CTX_BUILDER(icmp); void DR_STE_CTX_BUILDER(general_purpose); void DR_STE_CTX_BUILDER(eth_l4_misc); void DR_STE_CTX_BUILDER(tnl_vxlan_gpe); void DR_STE_CTX_BUILDER(tnl_geneve); + void DR_STE_CTX_BUILDER(tnl_geneve_tlv_opt); void DR_STE_CTX_BUILDER(register_0); void DR_STE_CTX_BUILDER(register_1); void DR_STE_CTX_BUILDER(src_gvmi_qpn); + void DR_STE_CTX_BUILDER(flex_parser_0); + void DR_STE_CTX_BUILDER(flex_parser_1); + void DR_STE_CTX_BUILDER(tnl_gtpu); + void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_0); + void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_1); /* Getters and Setters */ void (*ste_init)(u8 *hw_ste_p, u16 lu_type, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c index 9ec079247c4b..0757a4e8540e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c @@ -331,7 +331,7 @@ static void dr_ste_v0_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr, MLX5_SET(ste_sx_transmit, hw_ste_p, action_type, DR_STE_ACTION_TYPE_PUSH_VLAN); MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr); - /* Due to HW limitation we need to set this bit, otherwise reforamt + + /* Due to HW limitation we need to set this bit, otherwise reformat + * push vlan will not work. */ if (go_back) @@ -1248,32 +1248,29 @@ dr_ste_v0_build_tnl_mpls_tag(struct mlx5dr_match_param *value, u8 *tag) { struct mlx5dr_match_misc2 *misc_2 = &value->misc2; + u32 mpls_hdr; if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2)) { - DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label, - misc_2, outer_first_mpls_over_gre_label); - - DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp, - misc_2, outer_first_mpls_over_gre_exp); - - DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos, - misc_2, outer_first_mpls_over_gre_s_bos); - - DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl, - misc_2, outer_first_mpls_over_gre_ttl); + mpls_hdr = misc_2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL; + misc_2->outer_first_mpls_over_gre_label = 0; + mpls_hdr |= misc_2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP; + misc_2->outer_first_mpls_over_gre_exp = 0; + mpls_hdr |= misc_2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS; + misc_2->outer_first_mpls_over_gre_s_bos = 0; + mpls_hdr |= misc_2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL; + misc_2->outer_first_mpls_over_gre_ttl = 0; } else { - DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label, - misc_2, outer_first_mpls_over_udp_label); - - DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp, - misc_2, outer_first_mpls_over_udp_exp); - - DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos, - misc_2, outer_first_mpls_over_udp_s_bos); - - DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl, - misc_2, outer_first_mpls_over_udp_ttl); + mpls_hdr = misc_2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL; + misc_2->outer_first_mpls_over_udp_label = 0; + mpls_hdr |= misc_2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP; + misc_2->outer_first_mpls_over_udp_exp = 0; + mpls_hdr |= misc_2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS; + misc_2->outer_first_mpls_over_udp_s_bos = 0; + mpls_hdr |= misc_2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL; + misc_2->outer_first_mpls_over_udp_ttl = 0; } + + MLX5_SET(ste_flex_parser_0, tag, flex_parser_3, mpls_hdr); return 0; } @@ -1288,6 +1285,91 @@ dr_ste_v0_build_tnl_mpls_init(struct mlx5dr_ste_build *sb, sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_tag; } +static int +dr_ste_v0_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc2 *misc2 = &value->misc2; + u8 *parser_ptr; + u8 parser_id; + u32 mpls_hdr; + + mpls_hdr = misc2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL; + misc2->outer_first_mpls_over_udp_label = 0; + mpls_hdr |= misc2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP; + misc2->outer_first_mpls_over_udp_exp = 0; + mpls_hdr |= misc2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS; + misc2->outer_first_mpls_over_udp_s_bos = 0; + mpls_hdr |= misc2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL; + misc2->outer_first_mpls_over_udp_ttl = 0; + + parser_id = sb->caps->flex_parser_id_mpls_over_udp; + parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id); + *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr); + + return 0; +} + +static void +dr_ste_v0_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask); + /* STEs with lookup type FLEX_PARSER_{0/1} includes + * flex parsers_{0-3}/{4-7} respectively. + */ + sb->lu_type = sb->caps->flex_parser_id_mpls_over_udp > DR_STE_MAX_FLEX_0_ID ? + DR_STE_V0_LU_TYPE_FLEX_PARSER_1 : + DR_STE_V0_LU_TYPE_FLEX_PARSER_0; + + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_over_udp_tag; +} + +static int +dr_ste_v0_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc2 *misc2 = &value->misc2; + u8 *parser_ptr; + u8 parser_id; + u32 mpls_hdr; + + mpls_hdr = misc2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL; + misc2->outer_first_mpls_over_gre_label = 0; + mpls_hdr |= misc2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP; + misc2->outer_first_mpls_over_gre_exp = 0; + mpls_hdr |= misc2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS; + misc2->outer_first_mpls_over_gre_s_bos = 0; + mpls_hdr |= misc2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL; + misc2->outer_first_mpls_over_gre_ttl = 0; + + parser_id = sb->caps->flex_parser_id_mpls_over_gre; + parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id); + *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr); + + return 0; +} + +static void +dr_ste_v0_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask); + + /* STEs with lookup type FLEX_PARSER_{0/1} includes + * flex parsers_{0-3}/{4-7} respectively. + */ + sb->lu_type = sb->caps->flex_parser_id_mpls_over_gre > DR_STE_MAX_FLEX_0_ID ? + DR_STE_V0_LU_TYPE_FLEX_PARSER_1 : + DR_STE_V0_LU_TYPE_FLEX_PARSER_0; + + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_over_gre_tag; +} + #define ICMP_TYPE_OFFSET_FIRST_DW 24 #define ICMP_CODE_OFFSET_FIRST_DW 16 @@ -1300,9 +1382,11 @@ dr_ste_v0_build_icmp_tag(struct mlx5dr_match_param *value, u32 *icmp_header_data; int dw0_location; int dw1_location; + u8 *parser_ptr; u8 *icmp_type; u8 *icmp_code; bool is_ipv4; + u32 icmp_hdr; is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3); if (is_ipv4) { @@ -1319,47 +1403,40 @@ dr_ste_v0_build_icmp_tag(struct mlx5dr_match_param *value, dw1_location = sb->caps->flex_parser_id_icmpv6_dw1; } - switch (dw0_location) { - case 4: - MLX5_SET(ste_flex_parser_1, tag, flex_parser_4, - (*icmp_type << ICMP_TYPE_OFFSET_FIRST_DW) | - (*icmp_code << ICMP_TYPE_OFFSET_FIRST_DW)); - - *icmp_type = 0; - *icmp_code = 0; - break; - default: - return -EINVAL; - } + parser_ptr = dr_ste_calc_flex_parser_offset(tag, dw0_location); + icmp_hdr = (*icmp_type << ICMP_TYPE_OFFSET_FIRST_DW) | + (*icmp_code << ICMP_CODE_OFFSET_FIRST_DW); + *(__be32 *)parser_ptr = cpu_to_be32(icmp_hdr); + *icmp_code = 0; + *icmp_type = 0; - switch (dw1_location) { - case 5: - MLX5_SET(ste_flex_parser_1, tag, flex_parser_5, - *icmp_header_data); - *icmp_header_data = 0; - break; - default: - return -EINVAL; - } + parser_ptr = dr_ste_calc_flex_parser_offset(tag, dw1_location); + *(__be32 *)parser_ptr = cpu_to_be32(*icmp_header_data); + *icmp_header_data = 0; return 0; } -static int +static void dr_ste_v0_build_icmp_init(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask) { - int ret; + u8 parser_id; + bool is_ipv4; - ret = dr_ste_v0_build_icmp_tag(mask, sb, sb->bit_mask); - if (ret) - return ret; + dr_ste_v0_build_icmp_tag(mask, sb, sb->bit_mask); - sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1; + /* STEs with lookup type FLEX_PARSER_{0/1} includes + * flex parsers_{0-3}/{4-7} respectively. + */ + is_ipv4 = DR_MASK_IS_ICMPV4_SET(&mask->misc3); + parser_id = is_ipv4 ? sb->caps->flex_parser_id_icmp_dw0 : + sb->caps->flex_parser_id_icmpv6_dw0; + sb->lu_type = parser_id > DR_STE_MAX_FLEX_0_ID ? + DR_STE_V0_LU_TYPE_FLEX_PARSER_1 : + DR_STE_V0_LU_TYPE_FLEX_PARSER_0; sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); sb->ste_build_tag_func = &dr_ste_v0_build_icmp_tag; - - return 0; } static int @@ -1595,6 +1672,185 @@ dr_ste_v0_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb, sb->ste_build_tag_func = &dr_ste_v0_build_src_gvmi_qpn_tag; } +static void dr_ste_v0_set_flex_parser(u32 *misc4_field_id, + u32 *misc4_field_value, + bool *parser_is_used, + u8 *tag) +{ + u32 id = *misc4_field_id; + u8 *parser_ptr; + + if (parser_is_used[id]) + return; + + parser_is_used[id] = true; + parser_ptr = dr_ste_calc_flex_parser_offset(tag, id); + + *(__be32 *)parser_ptr = cpu_to_be32(*misc4_field_value); + *misc4_field_id = 0; + *misc4_field_value = 0; +} + +static int dr_ste_v0_build_flex_parser_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc4 *misc_4_mask = &value->misc4; + bool parser_is_used[DR_NUM_OF_FLEX_PARSERS] = {}; + + dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_0, + &misc_4_mask->prog_sample_field_value_0, + parser_is_used, tag); + + dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_1, + &misc_4_mask->prog_sample_field_value_1, + parser_is_used, tag); + + dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_2, + &misc_4_mask->prog_sample_field_value_2, + parser_is_used, tag); + + dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_3, + &misc_4_mask->prog_sample_field_value_3, + parser_is_used, tag); + + return 0; +} + +static void dr_ste_v0_build_flex_parser_0_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0; + dr_ste_v0_build_flex_parser_tag(mask, sb, sb->bit_mask); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tag; +} + +static void dr_ste_v0_build_flex_parser_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1; + dr_ste_v0_build_flex_parser_tag(mask, sb, sb->bit_mask); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tag; +} + +static int +dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc3 *misc3 = &value->misc3; + u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0; + u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id); + + MLX5_SET(ste_flex_parser_0, parser_ptr, flex_parser_3, + misc3->geneve_tlv_option_0_data); + misc3->geneve_tlv_option_0_data = 0; + + return 0; +} + +static void +dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag(mask, sb, sb->bit_mask); + + /* STEs with lookup type FLEX_PARSER_{0/1} includes + * flex parsers_{0-3}/{4-7} respectively. + */ + sb->lu_type = sb->caps->flex_parser_id_geneve_tlv_option_0 > 3 ? + DR_STE_V0_LU_TYPE_FLEX_PARSER_1 : + DR_STE_V0_LU_TYPE_FLEX_PARSER_0; + + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag; +} + +static int dr_ste_v0_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + uint8_t *tag) +{ + struct mlx5dr_match_misc3 *misc3 = &value->misc3; + + DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, + gtpu_msg_flags, misc3, + gtpu_msg_flags); + DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, + gtpu_msg_type, misc3, + gtpu_msg_type); + DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, + gtpu_teid, misc3, + gtpu_teid); + + return 0; +} + +static void dr_ste_v0_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_gtpu_tag; +} + +static int +dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + uint8_t *tag) +{ + if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0)) + DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3); + if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_teid)) + DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3); + if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_2)) + DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3); + if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0)) + DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3); + return 0; +} + +static void +dr_ste_v0_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag; +} + +static int +dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + uint8_t *tag) +{ + if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0)) + DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3); + if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_teid)) + DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3); + if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_2)) + DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3); + if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0)) + DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3); + return 0; +} + +static void +dr_ste_v0_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag; +} + struct mlx5dr_ste_ctx ste_ctx_v0 = { /* Builders */ .build_eth_l2_src_dst_init = &dr_ste_v0_build_eth_l2_src_dst_init, @@ -1609,14 +1865,22 @@ struct mlx5dr_ste_ctx ste_ctx_v0 = { .build_mpls_init = &dr_ste_v0_build_mpls_init, .build_tnl_gre_init = &dr_ste_v0_build_tnl_gre_init, .build_tnl_mpls_init = &dr_ste_v0_build_tnl_mpls_init, + .build_tnl_mpls_over_udp_init = &dr_ste_v0_build_tnl_mpls_over_udp_init, + .build_tnl_mpls_over_gre_init = &dr_ste_v0_build_tnl_mpls_over_gre_init, .build_icmp_init = &dr_ste_v0_build_icmp_init, .build_general_purpose_init = &dr_ste_v0_build_general_purpose_init, .build_eth_l4_misc_init = &dr_ste_v0_build_eth_l4_misc_init, .build_tnl_vxlan_gpe_init = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init, .build_tnl_geneve_init = &dr_ste_v0_build_flex_parser_tnl_geneve_init, + .build_tnl_geneve_tlv_opt_init = &dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init, .build_register_0_init = &dr_ste_v0_build_register_0_init, .build_register_1_init = &dr_ste_v0_build_register_1_init, .build_src_gvmi_qpn_init = &dr_ste_v0_build_src_gvmi_qpn_init, + .build_flex_parser_0_init = &dr_ste_v0_build_flex_parser_0_init, + .build_flex_parser_1_init = &dr_ste_v0_build_flex_parser_1_init, + .build_tnl_gtpu_init = &dr_ste_v0_build_flex_parser_tnl_gtpu_init, + .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_init, + .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_init, /* Getters and Setters */ .ste_init = &dr_ste_v0_init, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index 9143ec326ebf..054c2e2b6554 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -437,21 +437,6 @@ static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action) dr_ste_v1_set_reparse(hw_ste_p); } -static void dr_ste_v1_set_rx_decap_l3(u8 *hw_ste_p, - u8 *s_action, - u16 decap_actions, - u32 decap_index) -{ - MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id, - DR_STE_V1_ACTION_ID_MODIFY_LIST); - MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions, - decap_actions); - MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr, - decap_index); - - dr_ste_v1_set_reparse(hw_ste_p); -} - static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p, u8 *s_action, u16 num_of_actions, @@ -571,9 +556,6 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, bool allow_ctr = true; if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) { - dr_ste_v1_set_rx_decap_l3(last_ste, action, - attr->decap_actions, - attr->decap_index); dr_ste_v1_set_rewrite_actions(last_ste, action, attr->decap_actions, attr->decap_index); @@ -1324,6 +1306,88 @@ static void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb, sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_tag; } +static int dr_ste_v1_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc2 *misc2 = &value->misc2; + u8 *parser_ptr; + u8 parser_id; + u32 mpls_hdr; + + mpls_hdr = misc2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL; + misc2->outer_first_mpls_over_udp_label = 0; + mpls_hdr |= misc2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP; + misc2->outer_first_mpls_over_udp_exp = 0; + mpls_hdr |= misc2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS; + misc2->outer_first_mpls_over_udp_s_bos = 0; + mpls_hdr |= misc2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL; + misc2->outer_first_mpls_over_udp_ttl = 0; + + parser_id = sb->caps->flex_parser_id_mpls_over_udp; + parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id); + *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr); + + return 0; +} + +static void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask); + + /* STEs with lookup type FLEX_PARSER_{0/1} includes + * flex parsers_{0-3}/{4-7} respectively. + */ + sb->lu_type = sb->caps->flex_parser_id_mpls_over_udp > DR_STE_MAX_FLEX_0_ID ? + DR_STE_V1_LU_TYPE_FLEX_PARSER_1 : + DR_STE_V1_LU_TYPE_FLEX_PARSER_0; + + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_udp_tag; +} + +static int dr_ste_v1_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc2 *misc2 = &value->misc2; + u8 *parser_ptr; + u8 parser_id; + u32 mpls_hdr; + + mpls_hdr = misc2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL; + misc2->outer_first_mpls_over_gre_label = 0; + mpls_hdr |= misc2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP; + misc2->outer_first_mpls_over_gre_exp = 0; + mpls_hdr |= misc2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS; + misc2->outer_first_mpls_over_gre_s_bos = 0; + mpls_hdr |= misc2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL; + misc2->outer_first_mpls_over_gre_ttl = 0; + + parser_id = sb->caps->flex_parser_id_mpls_over_gre; + parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id); + *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr); + + return 0; +} + +static void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask); + + /* STEs with lookup type FLEX_PARSER_{0/1} includes + * flex parsers_{0-3}/{4-7} respectively. + */ + sb->lu_type = sb->caps->flex_parser_id_mpls_over_gre > DR_STE_MAX_FLEX_0_ID ? + DR_STE_V1_LU_TYPE_FLEX_PARSER_1 : + DR_STE_V1_LU_TYPE_FLEX_PARSER_0; + + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_gre_tag; +} + static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, u8 *tag) @@ -1355,16 +1419,14 @@ static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value, return 0; } -static int dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +static void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask); sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O; sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); sb->ste_build_tag_func = &dr_ste_v1_build_icmp_tag; - - return 0; } static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value, @@ -1532,6 +1594,7 @@ static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *val DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port); DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn); + misc_mask->source_eswitch_owner_vhca_id = 0; } static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, @@ -1588,6 +1651,179 @@ static void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb, sb->ste_build_tag_func = &dr_ste_v1_build_src_gvmi_qpn_tag; } +static void dr_ste_v1_set_flex_parser(u32 *misc4_field_id, + u32 *misc4_field_value, + bool *parser_is_used, + u8 *tag) +{ + u32 id = *misc4_field_id; + u8 *parser_ptr; + + if (parser_is_used[id]) + return; + + parser_is_used[id] = true; + parser_ptr = dr_ste_calc_flex_parser_offset(tag, id); + + *(__be32 *)parser_ptr = cpu_to_be32(*misc4_field_value); + *misc4_field_id = 0; + *misc4_field_value = 0; +} + +static int dr_ste_v1_build_felx_parser_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc4 *misc_4_mask = &value->misc4; + bool parser_is_used[DR_NUM_OF_FLEX_PARSERS] = {}; + + dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_0, + &misc_4_mask->prog_sample_field_value_0, + parser_is_used, tag); + + dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_1, + &misc_4_mask->prog_sample_field_value_1, + parser_is_used, tag); + + dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_2, + &misc_4_mask->prog_sample_field_value_2, + parser_is_used, tag); + + dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_3, + &misc_4_mask->prog_sample_field_value_3, + parser_is_used, tag); + + return 0; +} + +static void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0; + dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag; +} + +static void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1; + dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag; +} + +static int +dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc3 *misc3 = &value->misc3; + u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0; + u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id); + + MLX5_SET(ste_flex_parser_0, parser_ptr, flex_parser_3, + misc3->geneve_tlv_option_0_data); + misc3->geneve_tlv_option_0_data = 0; + + return 0; +} + +static void +dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(mask, sb, sb->bit_mask); + + /* STEs with lookup type FLEX_PARSER_{0/1} includes + * flex parsers_{0-3}/{4-7} respectively. + */ + sb->lu_type = sb->caps->flex_parser_id_geneve_tlv_option_0 > 3 ? + DR_STE_V1_LU_TYPE_FLEX_PARSER_1 : + DR_STE_V1_LU_TYPE_FLEX_PARSER_0; + + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag; +} + +static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + uint8_t *tag) +{ + struct mlx5dr_match_misc3 *misc3 = &value->misc3; + + DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_flags, misc3, gtpu_msg_flags); + DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_type, misc3, gtpu_msg_type); + DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_teid, misc3, gtpu_teid); + + return 0; +} + +static void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_gtpu_tag; +} + +static int +dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + uint8_t *tag) +{ + if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0)) + DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3); + if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_teid)) + DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3); + if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_2)) + DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3); + if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0)) + DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3); + return 0; +} + +static void +dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag; +} + +static int +dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + uint8_t *tag) +{ + if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0)) + DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3); + if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_teid)) + DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3); + if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_2)) + DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3); + if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0)) + DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3); + return 0; +} + +static void +dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag; +} + struct mlx5dr_ste_ctx ste_ctx_v1 = { /* Builders */ .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init, @@ -1602,14 +1838,23 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = { .build_mpls_init = &dr_ste_v1_build_mpls_init, .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init, .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init, + .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init, + .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init, .build_icmp_init = &dr_ste_v1_build_icmp_init, .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init, .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init, .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init, .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init, + .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init, .build_register_0_init = &dr_ste_v1_build_register_0_init, .build_register_1_init = &dr_ste_v1_build_register_1_init, .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init, + .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init, + .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init, + .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init, + .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init, + .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init, + /* Getters and Setters */ .ste_init = &dr_ste_v1_init, .set_next_lu_type = &dr_ste_v1_set_next_lu_type, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c index b599b6beb5b9..30ae3cda6d2e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c @@ -29,7 +29,7 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, last_htbl = tbl->rx.s_anchor; tbl->rx.default_icm_addr = action ? - action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr : + action->dest_tbl->tbl->rx.s_anchor->chunk->icm_addr : tbl->rx.nic_dmn->default_icm_addr; info.type = CONNECT_MISS; @@ -53,7 +53,7 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, last_htbl = tbl->tx.s_anchor; tbl->tx.default_icm_addr = action ? - action->dest_tbl.tbl->tx.s_anchor->chunk->icm_addr : + action->dest_tbl->tbl->tx.s_anchor->chunk->icm_addr : tbl->tx.nic_dmn->default_icm_addr; info.type = CONNECT_MISS; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 4af0e4e6a13c..67460c42a99b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -12,17 +12,30 @@ #include "mlx5_ifc_dr.h" #include "mlx5dr.h" -#define DR_RULE_MAX_STES 17 +#define DR_RULE_MAX_STES 18 #define DR_ACTION_MAX_STES 5 #define WIRE_PORT 0xFFFF #define DR_STE_SVLAN 0x1 #define DR_STE_CVLAN 0x2 #define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4) +#define DR_NUM_OF_FLEX_PARSERS 8 +#define DR_STE_MAX_FLEX_0_ID 3 +#define DR_STE_MAX_FLEX_1_ID 7 #define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg) #define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg) #define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg) +static inline bool dr_is_flex_parser_0_id(u8 parser_id) +{ + return parser_id <= DR_STE_MAX_FLEX_0_ID; +} + +static inline bool dr_is_flex_parser_1_id(u8 parser_id) +{ + return parser_id > DR_STE_MAX_FLEX_0_ID; +} + enum mlx5dr_icm_chunk_size { DR_CHUNK_SIZE_1, DR_CHUNK_SIZE_MIN = DR_CHUNK_SIZE_1, /* keep updated when changing */ @@ -87,7 +100,8 @@ enum mlx5dr_matcher_criteria { DR_MATCHER_CRITERIA_INNER = 1 << 2, DR_MATCHER_CRITERIA_MISC2 = 1 << 3, DR_MATCHER_CRITERIA_MISC3 = 1 << 4, - DR_MATCHER_CRITERIA_MAX = 1 << 5, + DR_MATCHER_CRITERIA_MISC4 = 1 << 5, + DR_MATCHER_CRITERIA_MAX = 1 << 6, }; enum mlx5dr_action_type { @@ -389,11 +403,21 @@ void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx, - struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - struct mlx5dr_cmd_caps *caps, - bool inner, bool rx); +void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + bool inner, bool rx); +void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + bool inner, bool rx); +void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + bool inner, bool rx); void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, @@ -402,6 +426,25 @@ void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); +void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + bool inner, bool rx); +void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + bool inner, bool rx); +void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + bool inner, bool rx); void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, @@ -419,6 +462,14 @@ void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_match_param *mask, struct mlx5dr_domain *dmn, bool inner, bool rx); +void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx); /* Actions utils */ @@ -646,7 +697,24 @@ struct mlx5dr_match_misc3 { u8 icmpv6_type; u8 icmpv4_code; u8 icmpv4_type; - u8 reserved_auto3[0x1c]; + u32 geneve_tlv_option_0_data; + u8 gtpu_msg_flags; + u8 gtpu_msg_type; + u32 gtpu_teid; + u32 gtpu_dw_2; + u32 gtpu_first_ext_dw_0; + u32 gtpu_dw_0; +}; + +struct mlx5dr_match_misc4 { + u32 prog_sample_field_value_0; + u32 prog_sample_field_id_0; + u32 prog_sample_field_value_1; + u32 prog_sample_field_id_1; + u32 prog_sample_field_value_2; + u32 prog_sample_field_id_2; + u32 prog_sample_field_value_3; + u32 prog_sample_field_id_3; }; struct mlx5dr_match_param { @@ -655,6 +723,7 @@ struct mlx5dr_match_param { struct mlx5dr_match_spec inner; struct mlx5dr_match_misc2 misc2; struct mlx5dr_match_misc3 misc3; + struct mlx5dr_match_misc4 misc4; }; #define DR_MASK_IS_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \ @@ -678,6 +747,12 @@ struct mlx5dr_cmd_vport_cap { u32 num; }; +struct mlx5dr_roce_cap { + u8 roce_en:1; + u8 fl_rc_qp_when_roce_disabled:1; + u8 fl_rc_qp_when_roce_enabled:1; +}; + struct mlx5dr_cmd_caps { u16 gvmi; u64 nic_rx_drop_address; @@ -692,6 +767,13 @@ struct mlx5dr_cmd_caps { u8 flex_parser_id_icmp_dw1; u8 flex_parser_id_icmpv6_dw0; u8 flex_parser_id_icmpv6_dw1; + u8 flex_parser_id_geneve_tlv_option_0; + u8 flex_parser_id_mpls_over_gre; + u8 flex_parser_id_mpls_over_udp; + u8 flex_parser_id_gtpu_dw_0; + u8 flex_parser_id_gtpu_teid; + u8 flex_parser_id_gtpu_dw_2; + u8 flex_parser_id_gtpu_first_ext_dw_0; u8 max_ft_level; u16 roce_min_src_udp; u8 num_esw_ports; @@ -707,6 +789,8 @@ struct mlx5dr_cmd_caps { struct mlx5dr_esw_caps esw_caps; struct mlx5dr_cmd_vport_cap *vports_caps; bool prio_tag_required; + struct mlx5dr_roce_cap roce_caps; + u8 isolate_vl_tc:1; }; struct mlx5dr_domain_rx_tx { @@ -806,53 +890,71 @@ struct mlx5dr_ste_action_modify_field { u8 l4_type; }; +struct mlx5dr_action_rewrite { + struct mlx5dr_domain *dmn; + struct mlx5dr_icm_chunk *chunk; + u8 *data; + u16 num_of_actions; + u32 index; + u8 allow_rx:1; + u8 allow_tx:1; + u8 modify_ttl:1; +}; + +struct mlx5dr_action_reformat { + struct mlx5dr_domain *dmn; + u32 reformat_id; + u32 reformat_size; +}; + +struct mlx5dr_action_dest_tbl { + u8 is_fw_tbl:1; + union { + struct mlx5dr_table *tbl; + struct { + struct mlx5dr_domain *dmn; + u32 id; + u32 group_id; + enum fs_flow_table_type type; + u64 rx_icm_addr; + u64 tx_icm_addr; + struct mlx5dr_action **ref_actions; + u32 num_of_ref_actions; + } fw_tbl; + }; +}; + +struct mlx5dr_action_ctr { + u32 ctr_id; + u32 offeset; +}; + +struct mlx5dr_action_vport { + struct mlx5dr_domain *dmn; + struct mlx5dr_cmd_vport_cap *caps; +}; + +struct mlx5dr_action_push_vlan { + u32 vlan_hdr; /* tpid_pcp_dei_vid */ +}; + +struct mlx5dr_action_flow_tag { + u32 flow_tag; +}; + struct mlx5dr_action { enum mlx5dr_action_type action_type; refcount_t refcount; + union { - struct { - struct mlx5dr_domain *dmn; - struct mlx5dr_icm_chunk *chunk; - u8 *data; - u16 num_of_actions; - u32 index; - u8 allow_rx:1; - u8 allow_tx:1; - u8 modify_ttl:1; - } rewrite; - struct { - struct mlx5dr_domain *dmn; - u32 reformat_id; - u32 reformat_size; - } reformat; - struct { - u8 is_fw_tbl:1; - union { - struct mlx5dr_table *tbl; - struct { - struct mlx5dr_domain *dmn; - u32 id; - u32 group_id; - enum fs_flow_table_type type; - u64 rx_icm_addr; - u64 tx_icm_addr; - struct mlx5dr_action **ref_actions; - u32 num_of_ref_actions; - } fw_tbl; - }; - } dest_tbl; - struct { - u32 ctr_id; - u32 offeset; - } ctr; - struct { - struct mlx5dr_domain *dmn; - struct mlx5dr_cmd_vport_cap *caps; - } vport; - struct { - u32 vlan_hdr; /* tpid_pcp_dei_vid */ - } push_vlan; - u32 flow_tag; + void *data; + struct mlx5dr_action_rewrite *rewrite; + struct mlx5dr_action_reformat *reformat; + struct mlx5dr_action_dest_tbl *dest_tbl; + struct mlx5dr_action_ctr *ctr; + struct mlx5dr_action_vport *vport; + struct mlx5dr_action_push_vlan *push_vlan; + struct mlx5dr_action_flow_tag *flow_tag; }; }; @@ -1063,6 +1165,7 @@ struct mlx5dr_cmd_qp_create_attr { u32 sq_wqe_cnt; u32 rq_wqe_cnt; u32 rq_wqe_shift; + u8 isolate_vl_tc:1; }; int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h index 83df6df6b459..9643ee647f57 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h @@ -434,10 +434,7 @@ struct mlx5_ifc_ste_gre_bits { }; struct mlx5_ifc_ste_flex_parser_0_bits { - u8 parser_3_label[0x14]; - u8 parser_3_exp[0x3]; - u8 parser_3_s_bos[0x1]; - u8 parser_3_ttl[0x8]; + u8 flex_parser_3[0x20]; u8 flex_parser_2[0x20]; @@ -488,6 +485,17 @@ struct mlx5_ifc_ste_flex_parser_tnl_geneve_bits { u8 reserved_at_40[0x40]; }; +struct mlx5_ifc_ste_flex_parser_tnl_gtpu_bits { + u8 reserved_at_0[0x5]; + u8 gtpu_msg_flags[0x3]; + u8 gtpu_msg_type[0x8]; + u8 reserved_at_10[0x10]; + + u8 gtpu_teid[0x20]; + + u8 reserved_at_40[0x40]; +}; + struct mlx5_ifc_ste_general_purpose_bits { u8 general_purpose_lookup_field[0x20]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index e05c5c0f3ae1..457ad42eaa2a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -1151,20 +1151,6 @@ u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev) } EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid); -/** - * mlx5_eswitch_get_total_vports - Get total vports of the eswitch - * - * @dev: Pointer to core device - * - * mlx5_eswitch_get_total_vports returns total number of vports for - * the eswitch. - */ -u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev) -{ - return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev) + mlx5_sf_max_functions(dev); -} -EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports); - int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out) { u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index 01f075fac276..3091dd014650 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -34,11 +34,6 @@ #include "wq.h" #include "mlx5_core.h" -static u32 wq_get_byte_sz(u8 log_sz, u8 log_stride) -{ - return ((u32)1 << log_sz) << log_stride; -} - int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_cyc *wq, struct mlx5_wq_ctrl *wq_ctrl) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 52fdc34251ba..7e9a7cb31720 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1728,7 +1728,7 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor return err; event_id = mlxsw_reg_mfde_event_id_get(mfde_pl); - err = devlink_fmsg_u8_pair_put(fmsg, "id", event_id); + err = devlink_fmsg_u32_pair_put(fmsg, "id", event_id); if (err) return err; switch (event_id) { @@ -1806,6 +1806,10 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val); if (err) return err; + val = mlxsw_reg_mfde_log_ip_get(mfde_pl); + err = devlink_fmsg_u64_pair_put(fmsg, "log_ip", val); + if (err) + return err; } else if (event_id == MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP) { val = mlxsw_reg_mfde_pipes_mask_get(mfde_pl); err = devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 8af7d9d03475..80712dc803d0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -58,6 +58,25 @@ struct mlxsw_tx_info { bool is_emad; }; +struct mlxsw_rx_md_info { + u32 cookie_index; + u32 latency; + u32 tx_congestion; + union { + /* Valid when 'tx_port_valid' is set. */ + u16 tx_sys_port; + u16 tx_lag_id; + }; + u8 tx_lag_port_index; /* Valid when 'tx_port_is_lag' is set. */ + u8 tx_tc; + u8 latency_valid:1, + tx_congestion_valid:1, + tx_tc_valid:1, + tx_port_valid:1, + tx_port_is_lag:1, + unused:3; +}; + bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core, const struct mlxsw_tx_info *tx_info); int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, @@ -515,7 +534,7 @@ enum mlxsw_devlink_param_id { struct mlxsw_skb_cb { union { struct mlxsw_tx_info tx_info; - u32 cookie_index; /* Only used during receive */ + struct mlxsw_rx_md_info rx_md_info; }; }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 4d699fe98cb6..78d9c0196f2b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -2007,3 +2007,134 @@ int mlxsw_afa_block_append_l4port(struct mlxsw_afa_block *block, bool is_dport, return 0; } EXPORT_SYMBOL(mlxsw_afa_block_append_l4port); + +/* Mirror Sampler Action + * --------------------- + * The SAMPLER_ACTION is used to mirror packets with a probability (sampling). + */ + +#define MLXSW_AFA_SAMPLER_CODE 0x13 +#define MLXSW_AFA_SAMPLER_SIZE 1 + +/* afa_sampler_mirror_agent + * Mirror (SPAN) agent. + */ +MLXSW_ITEM32(afa, sampler, mirror_agent, 0x04, 0, 3); + +#define MLXSW_AFA_SAMPLER_RATE_MAX (BIT(24) - 1) + +/* afa_sampler_mirror_probability_rate + * Mirroring probability. + * Valid values are 1 to 2^24 - 1 + */ +MLXSW_ITEM32(afa, sampler, mirror_probability_rate, 0x08, 0, 24); + +static void mlxsw_afa_sampler_pack(char *payload, u8 mirror_agent, u32 rate) +{ + mlxsw_afa_sampler_mirror_agent_set(payload, mirror_agent); + mlxsw_afa_sampler_mirror_probability_rate_set(payload, rate); +} + +struct mlxsw_afa_sampler { + struct mlxsw_afa_resource resource; + int span_id; + u8 local_port; + bool ingress; +}; + +static void mlxsw_afa_sampler_destroy(struct mlxsw_afa_block *block, + struct mlxsw_afa_sampler *sampler) +{ + mlxsw_afa_resource_del(&sampler->resource); + block->afa->ops->sampler_del(block->afa->ops_priv, sampler->local_port, + sampler->span_id, sampler->ingress); + kfree(sampler); +} + +static void mlxsw_afa_sampler_destructor(struct mlxsw_afa_block *block, + struct mlxsw_afa_resource *resource) +{ + struct mlxsw_afa_sampler *sampler; + + sampler = container_of(resource, struct mlxsw_afa_sampler, resource); + mlxsw_afa_sampler_destroy(block, sampler); +} + +static struct mlxsw_afa_sampler * +mlxsw_afa_sampler_create(struct mlxsw_afa_block *block, u8 local_port, + struct psample_group *psample_group, u32 rate, + u32 trunc_size, bool truncate, bool ingress, + struct netlink_ext_ack *extack) +{ + struct mlxsw_afa_sampler *sampler; + int err; + + sampler = kzalloc(sizeof(*sampler), GFP_KERNEL); + if (!sampler) + return ERR_PTR(-ENOMEM); + + err = block->afa->ops->sampler_add(block->afa->ops_priv, local_port, + psample_group, rate, trunc_size, + truncate, ingress, &sampler->span_id, + extack); + if (err) + goto err_sampler_add; + + sampler->ingress = ingress; + sampler->local_port = local_port; + sampler->resource.destructor = mlxsw_afa_sampler_destructor; + mlxsw_afa_resource_add(block, &sampler->resource); + return sampler; + +err_sampler_add: + kfree(sampler); + return ERR_PTR(err); +} + +static int +mlxsw_afa_block_append_allocated_sampler(struct mlxsw_afa_block *block, + u8 mirror_agent, u32 rate) +{ + char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_SAMPLER_CODE, + MLXSW_AFA_SAMPLER_SIZE); + + if (IS_ERR(act)) + return PTR_ERR(act); + mlxsw_afa_sampler_pack(act, mirror_agent, rate); + return 0; +} + +int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u8 local_port, + struct psample_group *psample_group, + u32 rate, u32 trunc_size, bool truncate, + bool ingress, + struct netlink_ext_ack *extack) +{ + struct mlxsw_afa_sampler *sampler; + int err; + + if (rate > MLXSW_AFA_SAMPLER_RATE_MAX) { + NL_SET_ERR_MSG_MOD(extack, "Sampling rate is too high"); + return -EINVAL; + } + + sampler = mlxsw_afa_sampler_create(block, local_port, psample_group, + rate, trunc_size, truncate, ingress, + extack); + if (IS_ERR(sampler)) + return PTR_ERR(sampler); + + err = mlxsw_afa_block_append_allocated_sampler(block, sampler->span_id, + rate); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Cannot append sampler action"); + goto err_append_allocated_sampler; + } + + return 0; + +err_append_allocated_sampler: + mlxsw_afa_sampler_destroy(block, sampler); + return err; +} +EXPORT_SYMBOL(mlxsw_afa_block_append_sampler); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index b652497b1002..b65bf98eb5ab 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -30,6 +30,12 @@ struct mlxsw_afa_ops { u16 *p_policer_index, struct netlink_ext_ack *extack); void (*policer_del)(void *priv, u16 policer_index); + int (*sampler_add)(void *priv, u8 local_port, + struct psample_group *psample_group, u32 rate, + u32 trunc_size, bool truncate, bool ingress, + int *p_span_id, struct netlink_ext_ack *extack); + void (*sampler_del)(void *priv, u8 local_port, int span_id, + bool ingress); bool dummy_first_set; }; @@ -92,5 +98,10 @@ int mlxsw_afa_block_append_police(struct mlxsw_afa_block *block, u32 fa_index, u64 rate_bytes_ps, u32 burst, u16 *p_policer_index, struct netlink_ext_ack *extack); +int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u8 local_port, + struct psample_group *psample_group, + u32 rate, u32 trunc_size, bool truncate, + bool ingress, + struct netlink_ext_ack *extack); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index bf85ce9835d7..37fb2e1fb278 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -19,7 +19,6 @@ #define MLXSW_THERMAL_ASIC_TEMP_NORM 75000 /* 75C */ #define MLXSW_THERMAL_ASIC_TEMP_HIGH 85000 /* 85C */ #define MLXSW_THERMAL_ASIC_TEMP_HOT 105000 /* 105C */ -#define MLXSW_THERMAL_ASIC_TEMP_CRIT 140000 /* 140C */ #define MLXSW_THERMAL_HYSTERESIS_TEMP 5000 /* 5C */ #define MLXSW_THERMAL_MODULE_TEMP_SHIFT (MLXSW_THERMAL_HYSTERESIS_TEMP * 2) #define MLXSW_THERMAL_ZONE_MAX_NAME 16 @@ -45,7 +44,6 @@ enum mlxsw_thermal_trips { MLXSW_THERMAL_TEMP_TRIP_NORM, MLXSW_THERMAL_TEMP_TRIP_HIGH, MLXSW_THERMAL_TEMP_TRIP_HOT, - MLXSW_THERMAL_TEMP_TRIP_CRIT, }; struct mlxsw_thermal_trip { @@ -75,16 +73,9 @@ static const struct mlxsw_thermal_trip default_thermal_trips[] = { { /* Warning */ .type = THERMAL_TRIP_HOT, .temp = MLXSW_THERMAL_ASIC_TEMP_HOT, - .hyst = MLXSW_THERMAL_HYSTERESIS_TEMP, .min_state = MLXSW_THERMAL_MAX_STATE, .max_state = MLXSW_THERMAL_MAX_STATE, }, - { /* Critical - soft poweroff */ - .type = THERMAL_TRIP_CRITICAL, - .temp = MLXSW_THERMAL_ASIC_TEMP_CRIT, - .min_state = MLXSW_THERMAL_MAX_STATE, - .max_state = MLXSW_THERMAL_MAX_STATE, - } }; #define MLXSW_THERMAL_NUM_TRIPS ARRAY_SIZE(default_thermal_trips) @@ -154,7 +145,6 @@ mlxsw_thermal_module_trips_reset(struct mlxsw_thermal_module *tz) tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = 0; tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = 0; tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = 0; - tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = 0; } static int @@ -183,11 +173,10 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core, } /* According to the system thermal requirements, the thermal zones are - * defined with four trip points. The critical and emergency + * defined with three trip points. The critical and emergency * temperature thresholds, provided by QSFP module are set as "active" - * and "hot" trip points, "normal" and "critical" trip points are - * derived from "active" and "hot" by subtracting or adding double - * hysteresis value. + * and "hot" trip points, "normal" trip point is derived from "active" + * by subtracting double hysteresis value. */ if (crit_temp >= MLXSW_THERMAL_MODULE_TEMP_SHIFT) tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp - @@ -196,8 +185,6 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core, tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp; tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = crit_temp; tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = emerg_temp; - tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp + - MLXSW_THERMAL_MODULE_TEMP_SHIFT; return 0; } @@ -210,7 +197,7 @@ static void mlxsw_thermal_tz_score_update(struct mlxsw_thermal *thermal, struct mlxsw_thermal_trip *trip = trips; unsigned int score, delta, i, shift = 1; - /* Calculate thermal zone score, if temperature is above the critical + /* Calculate thermal zone score, if temperature is above the hot * threshold score is set to MLXSW_THERMAL_TEMP_SCORE_MAX. */ score = MLXSW_THERMAL_TEMP_SCORE_MAX; @@ -333,8 +320,7 @@ static int mlxsw_thermal_set_trip_temp(struct thermal_zone_device *tzdev, { struct mlxsw_thermal *thermal = tzdev->devdata; - if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS || - temp > MLXSW_THERMAL_ASIC_TEMP_CRIT) + if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS) return -EINVAL; thermal->trips[trip].temp = temp; @@ -502,8 +488,7 @@ mlxsw_thermal_module_trip_temp_set(struct thermal_zone_device *tzdev, { struct mlxsw_thermal_module *tz = tzdev->devdata; - if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS || - temp > tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp) + if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS) return -EINVAL; tz->trips[trip].temp = temp; diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index d0052537e627..8e8456811384 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -540,6 +540,55 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci, spin_unlock(&q->lock); } +static void mlxsw_pci_cqe_rdq_md_tx_port_init(struct sk_buff *skb, + const char *cqe) +{ + struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb); + + if (mlxsw_pci_cqe2_tx_lag_get(cqe)) { + cb->rx_md_info.tx_port_is_lag = true; + cb->rx_md_info.tx_lag_id = mlxsw_pci_cqe2_tx_lag_id_get(cqe); + cb->rx_md_info.tx_lag_port_index = + mlxsw_pci_cqe2_tx_lag_subport_get(cqe); + } else { + cb->rx_md_info.tx_port_is_lag = false; + cb->rx_md_info.tx_sys_port = + mlxsw_pci_cqe2_tx_system_port_get(cqe); + } + + if (cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_MULTI_PORT && + cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_INVALID) + cb->rx_md_info.tx_port_valid = 1; + else + cb->rx_md_info.tx_port_valid = 0; +} + +static void mlxsw_pci_cqe_rdq_md_init(struct sk_buff *skb, const char *cqe) +{ + struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb); + + cb->rx_md_info.tx_congestion = mlxsw_pci_cqe2_mirror_cong_get(cqe); + if (cb->rx_md_info.tx_congestion != MLXSW_PCI_CQE2_MIRROR_CONG_INVALID) + cb->rx_md_info.tx_congestion_valid = 1; + else + cb->rx_md_info.tx_congestion_valid = 0; + cb->rx_md_info.tx_congestion <<= MLXSW_PCI_CQE2_MIRROR_CONG_SHIFT; + + cb->rx_md_info.latency = mlxsw_pci_cqe2_mirror_latency_get(cqe); + if (cb->rx_md_info.latency != MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID) + cb->rx_md_info.latency_valid = 1; + else + cb->rx_md_info.latency_valid = 0; + + cb->rx_md_info.tx_tc = mlxsw_pci_cqe2_mirror_tclass_get(cqe); + if (cb->rx_md_info.tx_tc != MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID) + cb->rx_md_info.tx_tc_valid = 1; + else + cb->rx_md_info.tx_tc_valid = 0; + + mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe); +} + static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, struct mlxsw_pci_queue *q, u16 consumer_counter_limit, @@ -581,11 +630,15 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, if (mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) cookie_index = mlxsw_pci_cqe2_user_def_val_orig_pkt_len_get(cqe); - mlxsw_skb_cb(skb)->cookie_index = cookie_index; + mlxsw_skb_cb(skb)->rx_md_info.cookie_index = cookie_index; } else if (rx_info.trap_id >= MLXSW_TRAP_ID_MIRROR_SESSION0 && rx_info.trap_id <= MLXSW_TRAP_ID_MIRROR_SESSION7 && mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) { rx_info.mirror_reason = mlxsw_pci_cqe2_mirror_reason_get(cqe); + mlxsw_pci_cqe_rdq_md_init(skb, cqe); + } else if (rx_info.trap_id == MLXSW_TRAP_ID_PKT_SAMPLE && + mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) { + mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe); } byte_count = mlxsw_pci_cqe_byte_count_get(cqe); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index a2c1fbd3e0d1..7b531228d6c0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -173,6 +173,15 @@ MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16); */ MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14); +#define MLXSW_PCI_CQE2_MIRROR_CONG_INVALID 0xFFFF + +/* pci_cqe_mirror_cong_high + * Congestion level in units of 8KB of the egress traffic class of the original + * packet that does mirroring to the CPU. Value of 0xFFFF means that the + * congestion level is invalid. + */ +MLXSW_ITEM32(pci, cqe2, mirror_cong_high, 0x08, 16, 4); + /* pci_cqe_trap_id * Trap ID that captured the packet. */ @@ -208,6 +217,59 @@ MLXSW_ITEM32(pci, cqe0, dqn, 0x0C, 1, 5); MLXSW_ITEM32(pci, cqe12, dqn, 0x0C, 1, 6); mlxsw_pci_cqe_item_helpers(dqn, 0, 12, 12); +#define MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID 0x1F + +/* pci_cqe_mirror_tclass + * The egress traffic class of the original packet that does mirroring to the + * CPU. Value of 0x1F means that the traffic class is invalid. + */ +MLXSW_ITEM32(pci, cqe2, mirror_tclass, 0x10, 27, 5); + +/* pci_cqe_tx_lag + * The Tx port of a packet that is mirrored / sampled to the CPU is a LAG. + */ +MLXSW_ITEM32(pci, cqe2, tx_lag, 0x10, 24, 1); + +/* pci_cqe_tx_lag_subport + * The port index within the LAG of a packet that is mirrored / sampled to the + * CPU. Reserved when tx_lag is 0. + */ +MLXSW_ITEM32(pci, cqe2, tx_lag_subport, 0x10, 16, 8); + +#define MLXSW_PCI_CQE2_TX_PORT_MULTI_PORT 0xFFFE +#define MLXSW_PCI_CQE2_TX_PORT_INVALID 0xFFFF + +/* pci_cqe_tx_lag_id + * The Tx LAG ID of the original packet that is mirrored / sampled to the CPU. + * Value of 0xFFFE means multi-port. Value fo 0xFFFF means that the Tx LAG ID + * is invalid. Reserved when tx_lag is 0. + */ +MLXSW_ITEM32(pci, cqe2, tx_lag_id, 0x10, 0, 16); + +/* pci_cqe_tx_system_port + * The Tx port of the original packet that is mirrored / sampled to the CPU. + * Value of 0xFFFE means multi-port. Value fo 0xFFFF means that the Tx port is + * invalid. Reserved when tx_lag is 1. + */ +MLXSW_ITEM32(pci, cqe2, tx_system_port, 0x10, 0, 16); + +/* pci_cqe_mirror_cong_low + * Congestion level in units of 8KB of the egress traffic class of the original + * packet that does mirroring to the CPU. Value of 0xFFFF means that the + * congestion level is invalid. + */ +MLXSW_ITEM32(pci, cqe2, mirror_cong_low, 0x14, 20, 12); + +#define MLXSW_PCI_CQE2_MIRROR_CONG_SHIFT 13 /* Units of 8KB. */ + +static inline u16 mlxsw_pci_cqe2_mirror_cong_get(const char *cqe) +{ + u16 cong_high = mlxsw_pci_cqe2_mirror_cong_high_get(cqe); + u16 cong_low = mlxsw_pci_cqe2_mirror_cong_low_get(cqe); + + return cong_high << 12 | cong_low; +} + /* pci_cqe_user_def_val_orig_pkt_len * When trap_id is an ACL: User defined value from policy engine action. */ @@ -218,6 +280,15 @@ MLXSW_ITEM32(pci, cqe2, user_def_val_orig_pkt_len, 0x14, 0, 20); */ MLXSW_ITEM32(pci, cqe2, mirror_reason, 0x18, 24, 8); +#define MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID 0xFFFFFF + +/* pci_cqe_mirror_latency + * End-to-end latency of the original packet that does mirroring to the CPU. + * Value of 0xFFFFFF means that the latency is invalid. Units are according to + * MOGCR.mirror_latency_units. + */ +MLXSW_ITEM32(pci, cqe2, mirror_latency, 0x1C, 8, 24); + /* pci_cqe_owner * Ownership bit. */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index c4adc7f740d3..900b4bf5bb5b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -842,6 +842,14 @@ MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8); */ MLXSW_ITEM32(reg, spvid, sub_port, 0x00, 8, 8); +/* reg_spvid_egr_et_set + * When VLAN is pushed at ingress (for untagged packets or for + * QinQ push mode) then the EtherType is decided at the egress port. + * Reserved when Spectrum-1. + * Access: RW + */ +MLXSW_ITEM32(reg, spvid, egr_et_set, 0x04, 24, 1); + /* reg_spvid_et_vlan * EtherType used for when VLAN is pushed at ingress (for untagged * packets or for QinQ push mode). @@ -849,6 +857,7 @@ MLXSW_ITEM32(reg, spvid, sub_port, 0x00, 8, 8); * 1: ether_type1 * 2: ether_type2 - Reserved when Spectrum-1, supported by Spectrum-2 * Ethertype IDs are configured by SVER. + * Reserved when egr_et_set = 1. * Access: RW */ MLXSW_ITEM32(reg, spvid, et_vlan, 0x04, 16, 2); @@ -2079,6 +2088,41 @@ static inline void mlxsw_reg_spvc_pack(char *payload, u8 local_port, bool et1, mlxsw_reg_spvc_et0_set(payload, et0); } +/* SPEVET - Switch Port Egress VLAN EtherType + * ------------------------------------------ + * The switch port egress VLAN EtherType configures which EtherType to push at + * egress for packets incoming through a local port for which 'SPVID.egr_et_set' + * is set. + */ +#define MLXSW_REG_SPEVET_ID 0x202A +#define MLXSW_REG_SPEVET_LEN 0x08 + +MLXSW_REG_DEFINE(spevet, MLXSW_REG_SPEVET_ID, MLXSW_REG_SPEVET_LEN); + +/* reg_spevet_local_port + * Egress Local port number. + * Not supported to CPU port. + * Access: Index + */ +MLXSW_ITEM32(reg, spevet, local_port, 0x00, 16, 8); + +/* reg_spevet_et_vlan + * Egress EtherType VLAN to push when SPVID.egr_et_set field set for the packet: + * 0: ether_type0 - (default) + * 1: ether_type1 + * 2: ether_type2 + * Access: RW + */ +MLXSW_ITEM32(reg, spevet, et_vlan, 0x04, 16, 2); + +static inline void mlxsw_reg_spevet_pack(char *payload, u8 local_port, + u8 et_vlan) +{ + MLXSW_REG_ZERO(spevet, payload); + mlxsw_reg_spevet_local_port_set(payload, local_port); + mlxsw_reg_spevet_et_vlan_set(payload, et_vlan); +} + /* CWTP - Congetion WRED ECN TClass Profile * ---------------------------------------- * Configures the profiles for queues of egress port and traffic class @@ -5637,7 +5681,7 @@ static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port) MLXSW_REG_DEFINE(pmaos, MLXSW_REG_PMAOS_ID, MLXSW_REG_PMAOS_LEN); -/* reg_slot_index +/* reg_pmaos_slot_index * Slot index. * Access: Index */ @@ -8086,6 +8130,60 @@ mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif, mlxsw_reg_rtdp_ipip_expected_gre_key_set(payload, expected_gre_key); } +/* RATRAD - Router Adjacency Table Activity Dump Register + * ------------------------------------------------------ + * The RATRAD register is used to dump and optionally clear activity bits of + * router adjacency table entries. + */ +#define MLXSW_REG_RATRAD_ID 0x8022 +#define MLXSW_REG_RATRAD_LEN 0x210 + +MLXSW_REG_DEFINE(ratrad, MLXSW_REG_RATRAD_ID, MLXSW_REG_RATRAD_LEN); + +enum { + /* Read activity */ + MLXSW_REG_RATRAD_OP_READ_ACTIVITY, + /* Read and clear activity */ + MLXSW_REG_RATRAD_OP_READ_CLEAR_ACTIVITY, +}; + +/* reg_ratrad_op + * Access: Operation + */ +MLXSW_ITEM32(reg, ratrad, op, 0x00, 30, 2); + +/* reg_ratrad_ecmp_size + * ecmp_size is the amount of sequential entries from adjacency_index. Valid + * ranges: + * Spectrum-1: 32-64, 512, 1024, 2048, 4096 + * Spectrum-2/3: 32-128, 256, 512, 1024, 2048, 4096 + * Access: Index + */ +MLXSW_ITEM32(reg, ratrad, ecmp_size, 0x00, 0, 13); + +/* reg_ratrad_adjacency_index + * Index into the adjacency table. + * Access: Index + */ +MLXSW_ITEM32(reg, ratrad, adjacency_index, 0x04, 0, 24); + +/* reg_ratrad_activity_vector + * Activity bit per adjacency index. + * Bits higher than ecmp_size are reserved. + * Access: RO + */ +MLXSW_ITEM_BIT_ARRAY(reg, ratrad, activity_vector, 0x10, 0x200, 1); + +static inline void mlxsw_reg_ratrad_pack(char *payload, u32 adjacency_index, + u16 ecmp_size) +{ + MLXSW_REG_ZERO(ratrad, payload); + mlxsw_reg_ratrad_op_set(payload, + MLXSW_REG_RATRAD_OP_READ_CLEAR_ACTIVITY); + mlxsw_reg_ratrad_ecmp_size_set(payload, ecmp_size); + mlxsw_reg_ratrad_adjacency_index_set(payload, adjacency_index); +} + /* RIGR-V2 - Router Interface Group Register Version 2 * --------------------------------------------------- * The RIGR_V2 register is used to add, remove and query egress interface list @@ -9925,15 +10023,28 @@ MLXSW_ITEM32(reg, mpar, enable, 0x04, 31, 1); */ MLXSW_ITEM32(reg, mpar, pa_id, 0x04, 0, 4); +#define MLXSW_REG_MPAR_RATE_MAX 3500000000UL + +/* reg_mpar_probability_rate + * Sampling rate. + * Valid values are: 1 to 3.5*10^9 + * Value of 1 means "sample all". Default is 1. + * Reserved when Spectrum-1. + * Access: RW + */ +MLXSW_ITEM32(reg, mpar, probability_rate, 0x08, 0, 32); + static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port, enum mlxsw_reg_mpar_i_e i_e, - bool enable, u8 pa_id) + bool enable, u8 pa_id, + u32 probability_rate) { MLXSW_REG_ZERO(mpar, payload); mlxsw_reg_mpar_local_port_set(payload, local_port); mlxsw_reg_mpar_enable_set(payload, enable); mlxsw_reg_mpar_i_e_set(payload, i_e); mlxsw_reg_mpar_pa_id_set(payload, pa_id); + mlxsw_reg_mpar_probability_rate_set(payload, probability_rate); } /* MGIR - Management General Information Register @@ -10577,6 +10688,8 @@ MLXSW_ITEM32(reg, mpagr, trigger, 0x00, 0, 4); */ MLXSW_ITEM32(reg, mpagr, pa_id, 0x04, 0, 4); +#define MLXSW_REG_MPAGR_RATE_MAX 3500000000UL + /* reg_mpagr_probability_rate * Sampling rate. * Valid values are: 1 to 3.5*10^9 @@ -10919,7 +11032,7 @@ MLXSW_REG_DEFINE(mfde, MLXSW_REG_MFDE_ID, MLXSW_REG_MFDE_LEN); * Which irisc triggered the event * Access: RO */ -MLXSW_ITEM32(reg, mfde, irisc_id, 0x00, 8, 4); +MLXSW_ITEM32(reg, mfde, irisc_id, 0x00, 24, 8); enum mlxsw_reg_mfde_event_id { MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO = 1, @@ -10930,7 +11043,7 @@ enum mlxsw_reg_mfde_event_id { /* reg_mfde_event_id * Access: RO */ -MLXSW_ITEM32(reg, mfde, event_id, 0x00, 0, 8); +MLXSW_ITEM32(reg, mfde, event_id, 0x00, 0, 16); enum mlxsw_reg_mfde_method { MLXSW_REG_MFDE_METHOD_QUERY, @@ -10979,6 +11092,13 @@ MLXSW_ITEM32(reg, mfde, log_address, 0x10, 0, 32); */ MLXSW_ITEM32(reg, mfde, log_id, 0x14, 0, 4); +/* reg_mfde_log_ip + * IP (instruction pointer) that triggered the timeout. + * Valid in case event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO + * Access: RO + */ +MLXSW_ITEM64(reg, mfde, log_ip, 0x18, 0, 64); + /* reg_mfde_pipes_mask * Bit per kvh pipe. * Access: RO @@ -11995,6 +12115,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(sfmr), MLXSW_REG(spvmlr), MLXSW_REG(spvc), + MLXSW_REG(spevet), MLXSW_REG(cwtp), MLXSW_REG(cwtpm), MLXSW_REG(pgcr), @@ -12047,6 +12168,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(rtar), MLXSW_REG(ratr), MLXSW_REG(rtdp), + MLXSW_REG(ratrad), MLXSW_REG(rdpm), MLXSW_REG(ricnt), MLXSW_REG(rrcr), diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 1650d9852b5b..bca0354482cb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -23,6 +23,8 @@ #include <linux/netlink.h> #include <linux/jhash.h> #include <linux/log2.h> +#include <linux/refcount.h> +#include <linux/rhashtable.h> #include <net/switchdev.h> #include <net/pkt_cls.h> #include <net/netevent.h> @@ -45,7 +47,7 @@ #define MLXSW_SP1_FWREV_MAJOR 13 #define MLXSW_SP1_FWREV_MINOR 2008 -#define MLXSW_SP1_FWREV_SUBMINOR 2018 +#define MLXSW_SP1_FWREV_SUBMINOR 2406 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { @@ -62,7 +64,7 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { #define MLXSW_SP2_FWREV_MAJOR 29 #define MLXSW_SP2_FWREV_MINOR 2008 -#define MLXSW_SP2_FWREV_SUBMINOR 2018 +#define MLXSW_SP2_FWREV_SUBMINOR 2406 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { .major = MLXSW_SP2_FWREV_MAJOR, @@ -77,7 +79,7 @@ static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { #define MLXSW_SP3_FWREV_MAJOR 30 #define MLXSW_SP3_FWREV_MINOR 2008 -#define MLXSW_SP3_FWREV_SUBMINOR 2018 +#define MLXSW_SP3_FWREV_SUBMINOR 2406 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { .major = MLXSW_SP3_FWREV_MAJOR, @@ -400,6 +402,22 @@ int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type) return 0; } +int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 ethtype) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char spevet_pl[MLXSW_REG_SPEVET_LEN]; + u8 sver_type; + int err; + + err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); + if (err) + return err; + + mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl); +} + static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, u16 ethtype) { @@ -2212,32 +2230,6 @@ void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); } -void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, - u8 local_port) -{ - struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; - struct mlxsw_sp_port_sample *sample; - u32 size; - - if (unlikely(!mlxsw_sp_port)) { - dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", - local_port); - goto out; - } - - rcu_read_lock(); - sample = rcu_dereference(mlxsw_sp_port->sample); - if (!sample) - goto out_unlock; - size = sample->truncate ? sample->trunc_size : skb->len; - psample_sample_packet(sample->psample_group, skb, size, - mlxsw_sp_port->dev->ifindex, 0, sample->rate); -out_unlock: - rcu_read_unlock(); -out: - consume_skb(skb); -} - #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ _is_ctrl, SP_##_trap_group, DISCARD) @@ -2576,6 +2568,147 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { .get_stats = mlxsw_sp2_get_stats, }; +struct mlxsw_sp_sample_trigger_node { + struct mlxsw_sp_sample_trigger trigger; + struct mlxsw_sp_sample_params params; + struct rhash_head ht_node; + struct rcu_head rcu; + refcount_t refcount; +}; + +static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = { + .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger), + .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node), + .key_len = sizeof(struct mlxsw_sp_sample_trigger), + .automatic_shrinking = true, +}; + +static void +mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key, + const struct mlxsw_sp_sample_trigger *trigger) +{ + memset(key, 0, sizeof(*key)); + key->type = trigger->type; + key->local_port = trigger->local_port; +} + +/* RCU read lock must be held */ +struct mlxsw_sp_sample_params * +mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_sample_trigger *trigger) +{ + struct mlxsw_sp_sample_trigger_node *trigger_node; + struct mlxsw_sp_sample_trigger key; + + mlxsw_sp_sample_trigger_key_init(&key, trigger); + trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key, + mlxsw_sp_sample_trigger_ht_params); + if (!trigger_node) + return NULL; + + return &trigger_node->params; +} + +static int +mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_sample_trigger *trigger, + const struct mlxsw_sp_sample_params *params) +{ + struct mlxsw_sp_sample_trigger_node *trigger_node; + int err; + + trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL); + if (!trigger_node) + return -ENOMEM; + + trigger_node->trigger = *trigger; + trigger_node->params = *params; + refcount_set(&trigger_node->refcount, 1); + + err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht, + &trigger_node->ht_node, + mlxsw_sp_sample_trigger_ht_params); + if (err) + goto err_rhashtable_insert; + + return 0; + +err_rhashtable_insert: + kfree(trigger_node); + return err; +} + +static void +mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_sample_trigger_node *trigger_node) +{ + rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht, + &trigger_node->ht_node, + mlxsw_sp_sample_trigger_ht_params); + kfree_rcu(trigger_node, rcu); +} + +int +mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_sample_trigger *trigger, + const struct mlxsw_sp_sample_params *params, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_sample_trigger_node *trigger_node; + struct mlxsw_sp_sample_trigger key; + + ASSERT_RTNL(); + + mlxsw_sp_sample_trigger_key_init(&key, trigger); + + trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, + &key, + mlxsw_sp_sample_trigger_ht_params); + if (!trigger_node) + return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key, + params); + + if (trigger_node->trigger.local_port) { + NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port"); + return -EINVAL; + } + + if (trigger_node->params.psample_group != params->psample_group || + trigger_node->params.truncate != params->truncate || + trigger_node->params.rate != params->rate || + trigger_node->params.trunc_size != params->trunc_size) { + NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger"); + return -EINVAL; + } + + refcount_inc(&trigger_node->refcount); + + return 0; +} + +void +mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_sample_trigger *trigger) +{ + struct mlxsw_sp_sample_trigger_node *trigger_node; + struct mlxsw_sp_sample_trigger key; + + ASSERT_RTNL(); + + mlxsw_sp_sample_trigger_key_init(&key, trigger); + + trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, + &key, + mlxsw_sp_sample_trigger_ht_params); + if (!trigger_node) + return; + + if (!refcount_dec_and_test(&trigger_node->refcount)) + return; + + mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node); +} + static int mlxsw_sp_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr); @@ -2730,6 +2863,13 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_port_module_info_init; } + err = rhashtable_init(&mlxsw_sp->sample_trigger_ht, + &mlxsw_sp_sample_trigger_ht_params); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n"); + goto err_sample_trigger_init; + } + err = mlxsw_sp_ports_create(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); @@ -2739,6 +2879,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, return 0; err_ports_create: + rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); +err_sample_trigger_init: mlxsw_sp_port_module_info_fini(mlxsw_sp); err_port_module_info_init: mlxsw_sp_dpipe_fini(mlxsw_sp); @@ -2788,6 +2930,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops; mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; @@ -2796,7 +2939,6 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; - mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops; mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; @@ -2804,6 +2946,8 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops; mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops; + mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops; + mlxsw_sp->router_ops = &mlxsw_sp1_router_ops; mlxsw_sp->listeners = mlxsw_sp1_listener; mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; @@ -2817,6 +2961,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; @@ -2825,7 +2970,6 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; - mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops; mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; @@ -2833,6 +2977,8 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; + mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; + mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); @@ -2844,6 +2990,7 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; @@ -2852,7 +2999,6 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; - mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; @@ -2860,6 +3006,8 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; + mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; + mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); @@ -2870,6 +3018,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); mlxsw_sp_ports_remove(mlxsw_sp); + rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); mlxsw_sp_port_module_info_fini(mlxsw_sp); mlxsw_sp_dpipe_fini(mlxsw_sp); unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), @@ -4283,7 +4432,7 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, if (br_vlan_enabled(br_dev)) { br_vlan_get_proto(br_dev, &proto); if (proto == ETH_P_8021AD) { - NL_SET_ERR_MSG_MOD(extack, "Uppers are not supported on top of an 802.1ad bridge"); + NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge"); return -EOPNOTSUPP; } } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index ba28ac7e79bc..f99db88ee884 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -16,6 +16,7 @@ #include <linux/in6.h> #include <linux/notifier.h> #include <linux/net_namespace.h> +#include <linux/spinlock.h> #include <net/psample.h> #include <net/pkt_cls.h> #include <net/red.h> @@ -87,10 +88,15 @@ enum mlxsw_sp_rif_type { MLXSW_SP_RIF_TYPE_MAX, }; -struct mlxsw_sp_rif_ops; +struct mlxsw_sp_router_ops; -extern const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[]; -extern const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[]; +extern const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops; +extern const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops; + +struct mlxsw_sp_switchdev_ops; + +extern const struct mlxsw_sp_switchdev_ops mlxsw_sp1_switchdev_ops; +extern const struct mlxsw_sp_switchdev_ops mlxsw_sp2_switchdev_ops; enum mlxsw_sp_fid_type { MLXSW_SP_FID_TYPE_8021Q, @@ -134,6 +140,7 @@ struct mlxsw_sp_ptp_state; struct mlxsw_sp_ptp_ops; struct mlxsw_sp_span_ops; struct mlxsw_sp_qdisc_state; +struct mlxsw_sp_mall_entry; struct mlxsw_sp_port_mapping { u8 module; @@ -149,6 +156,7 @@ struct mlxsw_sp { const unsigned char *mac_mask; struct mlxsw_sp_upper *lags; struct mlxsw_sp_port_mapping **port_mapping; + struct rhashtable sample_trigger_ht; struct mlxsw_sp_sb *sb; struct mlxsw_sp_bridge *bridge; struct mlxsw_sp_router *router; @@ -165,6 +173,7 @@ struct mlxsw_sp { struct mlxsw_sp_counter_pool *counter_pool; struct mlxsw_sp_span *span; struct mlxsw_sp_trap *trap; + const struct mlxsw_sp_switchdev_ops *switchdev_ops; const struct mlxsw_sp_kvdl_ops *kvdl_ops; const struct mlxsw_afa_ops *afa_ops; const struct mlxsw_afk_ops *afk_ops; @@ -172,7 +181,6 @@ struct mlxsw_sp { const struct mlxsw_sp_acl_rulei_ops *acl_rulei_ops; const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops; const struct mlxsw_sp_nve_ops **nve_ops_arr; - const struct mlxsw_sp_rif_ops **rif_ops_arr; const struct mlxsw_sp_sb_vals *sb_vals; const struct mlxsw_sp_sb_ops *sb_ops; const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; @@ -180,6 +188,8 @@ struct mlxsw_sp { const struct mlxsw_sp_span_ops *span_ops; const struct mlxsw_sp_policer_core_ops *policer_core_ops; const struct mlxsw_sp_trap_ops *trap_ops; + const struct mlxsw_sp_mall_ops *mall_ops; + const struct mlxsw_sp_router_ops *router_ops; const struct mlxsw_listener *listeners; size_t listeners_count; u32 lowest_shaper_bs; @@ -233,7 +243,18 @@ struct mlxsw_sp_port_pcpu_stats { u32 tx_dropped; }; -struct mlxsw_sp_port_sample { +enum mlxsw_sp_sample_trigger_type { + MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS, + MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS, + MLXSW_SP_SAMPLE_TRIGGER_TYPE_POLICY_ENGINE, +}; + +struct mlxsw_sp_sample_trigger { + enum mlxsw_sp_sample_trigger_type type; + u8 local_port; /* Reserved when trigger type is not ingress / egress. */ +}; + +struct mlxsw_sp_sample_params { struct psample_group *psample_group; u32 trunc_size; u32 rate; @@ -303,7 +324,6 @@ struct mlxsw_sp_port { struct mlxsw_sp_port_xstats xstats; struct delayed_work update_dw; } periodic_hw_stats; - struct mlxsw_sp_port_sample __rcu *sample; struct list_head vlans_list; struct mlxsw_sp_port_vlan *default_vlan; struct mlxsw_sp_qdisc_state *qdisc; @@ -546,6 +566,17 @@ void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_hdroom *hdroom); int mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port, const struct mlxsw_sp_hdroom *hdroom); +struct mlxsw_sp_sample_params * +mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_sample_trigger *trigger); +int +mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_sample_trigger *trigger, + const struct mlxsw_sp_sample_params *params, + struct netlink_ext_ack *extack); +void +mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_sample_trigger *trigger); extern const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals; extern const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals; @@ -583,8 +614,6 @@ void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, u8 local_port, void *priv); void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, u8 local_port); -void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, - u8 local_port); int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed); int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, @@ -601,6 +630,8 @@ int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable); int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, bool learn_enable); int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type); +int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 ethtype); int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, u16 ethtype); struct mlxsw_sp_port_vlan * @@ -939,6 +970,12 @@ int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, u16 fid, struct netlink_ext_ack *extack); +int mlxsw_sp_acl_rulei_act_sample(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + struct mlxsw_sp_flow_block *block, + struct psample_group *psample_group, u32 rate, + u32 trunc_size, bool truncate, + struct netlink_ext_ack *extack); struct mlxsw_sp_acl_rule; @@ -1048,6 +1085,19 @@ extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops; extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops; /* spectrum_matchall.c */ +struct mlxsw_sp_mall_ops { + int (*sample_add)(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_mall_entry *mall_entry, + struct netlink_ext_ack *extack); + void (*sample_del)(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_mall_entry *mall_entry); +}; + +extern const struct mlxsw_sp_mall_ops mlxsw_sp1_mall_ops; +extern const struct mlxsw_sp_mall_ops mlxsw_sp2_mall_ops; + enum mlxsw_sp_mall_action_type { MLXSW_SP_MALL_ACTION_TYPE_MIRROR, MLXSW_SP_MALL_ACTION_TYPE_SAMPLE, @@ -1063,6 +1113,11 @@ struct mlxsw_sp_mall_trap_entry { int span_id; }; +struct mlxsw_sp_mall_sample_entry { + struct mlxsw_sp_sample_params params; + int span_id; /* Relevant for Spectrum-2 onwards. */ +}; + struct mlxsw_sp_mall_entry { struct list_head list; unsigned long cookie; @@ -1072,7 +1127,7 @@ struct mlxsw_sp_mall_entry { union { struct mlxsw_sp_mall_mirror_entry mirror; struct mlxsw_sp_mall_trap_entry trap; - struct mlxsw_sp_port_sample sample; + struct mlxsw_sp_mall_sample_entry sample; }; struct rcu_head rcu; }; @@ -1083,7 +1138,8 @@ int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp, void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block, struct tc_cls_matchall_offload *f); int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block, - struct mlxsw_sp_port *mlxsw_sp_port); + struct mlxsw_sp_port *mlxsw_sp_port, + struct netlink_ext_ack *extack); void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block, struct mlxsw_sp_port *mlxsw_sp_port); int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 8cfa03a75374..67cedfa76f78 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -688,6 +688,31 @@ int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp, return mlxsw_afa_block_append_fid_set(rulei->act_block, fid, extack); } +int mlxsw_sp_acl_rulei_act_sample(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + struct mlxsw_sp_flow_block *block, + struct psample_group *psample_group, u32 rate, + u32 trunc_size, bool truncate, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_flow_block_binding *binding; + struct mlxsw_sp_port *mlxsw_sp_port; + + if (!list_is_singular(&block->binding_list)) { + NL_SET_ERR_MSG_MOD(extack, "Only a single sampling source is allowed"); + return -EOPNOTSUPP; + } + binding = list_first_entry(&block->binding_list, + struct mlxsw_sp_flow_block_binding, list); + mlxsw_sp_port = binding->mlxsw_sp_port; + + return mlxsw_afa_block_append_sampler(rulei->act_block, + mlxsw_sp_port->local_port, + psample_group, rate, trunc_size, + truncate, binding->ingress, + extack); +} + struct mlxsw_sp_acl_rule * mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c index 90372d1c28d4..c72aa38424dc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c @@ -192,6 +192,22 @@ static void mlxsw_sp_act_policer_del(void *priv, u16 policer_index) policer_index); } +static int mlxsw_sp1_act_sampler_add(void *priv, u8 local_port, + struct psample_group *psample_group, + u32 rate, u32 trunc_size, bool truncate, + bool ingress, int *p_span_id, + struct netlink_ext_ack *extack) +{ + NL_SET_ERR_MSG_MOD(extack, "Sampling action is not supported on Spectrum-1"); + return -EOPNOTSUPP; +} + +static void mlxsw_sp1_act_sampler_del(void *priv, u8 local_port, int span_id, + bool ingress) +{ + WARN_ON_ONCE(1); +} + const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = { .kvdl_set_add = mlxsw_sp1_act_kvdl_set_add, .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, @@ -204,8 +220,73 @@ const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = { .mirror_del = mlxsw_sp_act_mirror_del, .policer_add = mlxsw_sp_act_policer_add, .policer_del = mlxsw_sp_act_policer_del, + .sampler_add = mlxsw_sp1_act_sampler_add, + .sampler_del = mlxsw_sp1_act_sampler_del, }; +static int mlxsw_sp2_act_sampler_add(void *priv, u8 local_port, + struct psample_group *psample_group, + u32 rate, u32 trunc_size, bool truncate, + bool ingress, int *p_span_id, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_span_agent_parms agent_parms = { + .session_id = MLXSW_SP_SPAN_SESSION_ID_SAMPLING, + }; + struct mlxsw_sp_sample_trigger trigger = { + .type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_POLICY_ENGINE, + }; + struct mlxsw_sp_sample_params params; + struct mlxsw_sp_port *mlxsw_sp_port; + struct mlxsw_sp *mlxsw_sp = priv; + int err; + + params.psample_group = psample_group; + params.trunc_size = trunc_size; + params.rate = rate; + params.truncate = truncate; + err = mlxsw_sp_sample_trigger_params_set(mlxsw_sp, &trigger, ¶ms, + extack); + if (err) + return err; + + err = mlxsw_sp_span_agent_get(mlxsw_sp, p_span_id, &agent_parms); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to get SPAN agent"); + goto err_span_agent_get; + } + + mlxsw_sp_port = mlxsw_sp->ports[local_port]; + err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, ingress); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to get analyzed port"); + goto err_analyzed_port_get; + } + + return 0; + +err_analyzed_port_get: + mlxsw_sp_span_agent_put(mlxsw_sp, *p_span_id); +err_span_agent_get: + mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger); + return err; +} + +static void mlxsw_sp2_act_sampler_del(void *priv, u8 local_port, int span_id, + bool ingress) +{ + struct mlxsw_sp_sample_trigger trigger = { + .type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_POLICY_ENGINE, + }; + struct mlxsw_sp_port *mlxsw_sp_port; + struct mlxsw_sp *mlxsw_sp = priv; + + mlxsw_sp_port = mlxsw_sp->ports[local_port]; + mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress); + mlxsw_sp_span_agent_put(mlxsw_sp, span_id); + mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger); +} + const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops = { .kvdl_set_add = mlxsw_sp2_act_kvdl_set_add, .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, @@ -218,6 +299,8 @@ const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops = { .mirror_del = mlxsw_sp_act_mirror_del, .policer_add = mlxsw_sp_act_policer_add, .policer_del = mlxsw_sp_act_policer_del, + .sampler_add = mlxsw_sp2_act_sampler_add, + .sampler_del = mlxsw_sp2_act_sampler_del, .dummy_first_set = true, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c index ed81d4fa48ac..1a2fef2a5379 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c @@ -912,9 +912,8 @@ static u64 mlxsw_sp_dpipe_table_adj_size(struct mlxsw_sp *mlxsw_sp) u64 size = 0; mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) - if (mlxsw_sp_nexthop_offload(nh) && - !mlxsw_sp_nexthop_group_has_ipip(nh) && - !mlxsw_sp_nexthop_is_discard(nh)) + if (mlxsw_sp_nexthop_is_forward(nh) && + !mlxsw_sp_nexthop_group_has_ipip(nh)) size++; return size; } @@ -1105,9 +1104,8 @@ start_again: nh_skip = nh_count; nh_count = 0; mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) { - if (!mlxsw_sp_nexthop_offload(nh) || - mlxsw_sp_nexthop_group_has_ipip(nh) || - mlxsw_sp_nexthop_is_discard(nh)) + if (!mlxsw_sp_nexthop_is_forward(nh) || + mlxsw_sp_nexthop_group_has_ipip(nh)) continue; if (nh_count < nh_skip) @@ -1180,6 +1178,7 @@ out: static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable) { + char ratr_pl[MLXSW_REG_RATR_LEN]; struct mlxsw_sp *mlxsw_sp = priv; struct mlxsw_sp_nexthop *nh; u32 adj_hash_index = 0; @@ -1187,9 +1186,8 @@ static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable) u32 adj_size = 0; mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) { - if (!mlxsw_sp_nexthop_offload(nh) || - mlxsw_sp_nexthop_group_has_ipip(nh) || - mlxsw_sp_nexthop_is_discard(nh)) + if (!mlxsw_sp_nexthop_is_forward(nh) || + mlxsw_sp_nexthop_group_has_ipip(nh)) continue; mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_size, @@ -1198,8 +1196,9 @@ static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable) mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh); else mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh); - mlxsw_sp_nexthop_update(mlxsw_sp, - adj_index + adj_hash_index, nh); + mlxsw_sp_nexthop_eth_update(mlxsw_sp, + adj_index + adj_hash_index, nh, + true, ratr_pl); } return 0; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index 078601d31cde..c8061beed6db 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -1059,6 +1059,131 @@ mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info); } +static void +mlxsw_sp_get_eth_phy_stats(struct net_device *dev, + struct ethtool_eth_phy_stats *phy_stats) +{ + char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; + + if (mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, + 0, ppcnt_pl)) + return; + + phy_stats->SymbolErrorDuringCarrier = + mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get(ppcnt_pl); +} + +static void +mlxsw_sp_get_eth_mac_stats(struct net_device *dev, + struct ethtool_eth_mac_stats *mac_stats) +{ + char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; + + if (mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, + 0, ppcnt_pl)) + return; + + mac_stats->FramesTransmittedOK = + mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); + mac_stats->FramesReceivedOK = + mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); + mac_stats->FrameCheckSequenceErrors = + mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); + mac_stats->AlignmentErrors = + mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); + mac_stats->OctetsTransmittedOK = + mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); + mac_stats->OctetsReceivedOK = + mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); + mac_stats->MulticastFramesXmittedOK = + mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get(ppcnt_pl); + mac_stats->BroadcastFramesXmittedOK = + mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get(ppcnt_pl); + mac_stats->MulticastFramesReceivedOK = + mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); + mac_stats->BroadcastFramesReceivedOK = + mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get(ppcnt_pl); + mac_stats->InRangeLengthErrors = + mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl); + mac_stats->OutOfRangeLengthField = + mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl); + mac_stats->FrameTooLongErrors = + mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl); +} + +static void +mlxsw_sp_get_eth_ctrl_stats(struct net_device *dev, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; + + if (mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, + 0, ppcnt_pl)) + return; + + ctrl_stats->MACControlFramesTransmitted = + mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get(ppcnt_pl); + ctrl_stats->MACControlFramesReceived = + mlxsw_reg_ppcnt_a_mac_control_frames_received_get(ppcnt_pl); + ctrl_stats->UnsupportedOpcodesReceived = + mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get(ppcnt_pl); +} + +static const struct ethtool_rmon_hist_range mlxsw_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 2047 }, + { 2048, 4095 }, + { 4096, 8191 }, + { 8192, 10239 }, + {} +}; + +static void +mlxsw_sp_get_rmon_stats(struct net_device *dev, + struct ethtool_rmon_stats *rmon, + const struct ethtool_rmon_hist_range **ranges) +{ + char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; + + if (mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, + 0, ppcnt_pl)) + return; + + rmon->undersize_pkts = + mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get(ppcnt_pl); + rmon->oversize_pkts = + mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get(ppcnt_pl); + rmon->fragments = + mlxsw_reg_ppcnt_ether_stats_fragments_get(ppcnt_pl); + + rmon->hist[0] = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get(ppcnt_pl); + rmon->hist[1] = + mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get(ppcnt_pl); + rmon->hist[2] = + mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get(ppcnt_pl); + rmon->hist[3] = + mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get(ppcnt_pl); + rmon->hist[4] = + mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get(ppcnt_pl); + rmon->hist[5] = + mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get(ppcnt_pl); + rmon->hist[6] = + mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get(ppcnt_pl); + rmon->hist[7] = + mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get(ppcnt_pl); + rmon->hist[8] = + mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get(ppcnt_pl); + rmon->hist[9] = + mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get(ppcnt_pl); + + *ranges = mlxsw_rmon_ranges; +} + const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { .cap_link_lanes_supported = true, .get_drvinfo = mlxsw_sp_port_get_drvinfo, @@ -1075,6 +1200,10 @@ const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { .get_module_info = mlxsw_sp_get_module_info, .get_module_eeprom = mlxsw_sp_get_module_eeprom, .get_ts_info = mlxsw_sp_get_ts_info, + .get_eth_phy_stats = mlxsw_sp_get_eth_phy_stats, + .get_eth_mac_stats = mlxsw_sp_get_eth_mac_stats, + .get_eth_ctrl_stats = mlxsw_sp_get_eth_ctrl_stats, + .get_rmon_stats = mlxsw_sp_get_rmon_stats, }; struct mlxsw_sp1_port_link_mode { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c index 0456cda33808..9e50c823a354 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c @@ -71,7 +71,7 @@ static int mlxsw_sp_flow_block_bind(struct mlxsw_sp *mlxsw_sp, return -EOPNOTSUPP; } - err = mlxsw_sp_mall_port_bind(block, mlxsw_sp_port); + err = mlxsw_sp_mall_port_bind(block, mlxsw_sp_port, extack); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 41855e58564b..be3791ca6069 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -24,6 +24,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, const struct flow_action_entry *act; int mirror_act_count = 0; int police_act_count = 0; + int sample_act_count = 0; int err, i; if (!flow_action_has_entries(flow_action)) @@ -190,6 +191,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, return -EOPNOTSUPP; } + if (act->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); + return -EOPNOTSUPP; + } + /* The kernel might adjust the requested burst size so * that it is not exactly a power of two. Re-adjust it * here since the hardware only supports burst sizes @@ -204,6 +210,23 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, return err; break; } + case FLOW_ACTION_SAMPLE: { + if (sample_act_count++) { + NL_SET_ERR_MSG_MOD(extack, "Multiple sample actions per rule are not supported"); + return -EOPNOTSUPP; + } + + err = mlxsw_sp_acl_rulei_act_sample(mlxsw_sp, rulei, + block, + act->sample.psample_group, + act->sample.rate, + act->sample.trunc_size, + act->sample.truncate, + extack); + if (err) + return err; + break; + } default: NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c index 64a8f838eb53..5facabd86882 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c @@ -127,14 +127,16 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr) static int mlxsw_sp_ipip_nexthop_update_gre4(struct mlxsw_sp *mlxsw_sp, u32 adj_index, - struct mlxsw_sp_ipip_entry *ipip_entry) + struct mlxsw_sp_ipip_entry *ipip_entry, + bool force, char *ratr_pl) { u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb); __be32 daddr4 = mlxsw_sp_ipip_netdev_daddr4(ipip_entry->ol_dev); - char ratr_pl[MLXSW_REG_RATR_LEN]; + enum mlxsw_reg_ratr_op op; - mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, - true, MLXSW_REG_RATR_TYPE_IPIP, + op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY : + MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY; + mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_IPIP, adj_index, rif_index); mlxsw_reg_ratr_ipip4_entry_pack(ratr_pl, be32_to_cpu(daddr4)); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h index 87bef9880e5e..f0837b42d1d6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h @@ -40,7 +40,8 @@ struct mlxsw_sp_ipip_ops { enum mlxsw_sp_l3proto ul_proto; /* Underlay. */ int (*nexthop_update)(struct mlxsw_sp *mlxsw_sp, u32 adj_index, - struct mlxsw_sp_ipip_entry *ipip_entry); + struct mlxsw_sp_ipip_entry *ipip_entry, + bool force, char *ratr_pl); bool (*can_offload)(const struct mlxsw_sp *mlxsw_sp, const struct net_device *ol_dev); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c index f30599ad6019..07b371cd9818 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c @@ -24,7 +24,8 @@ mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie static int mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_mall_entry *mall_entry) + struct mlxsw_sp_mall_entry *mall_entry, + struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_span_agent_parms agent_parms = {}; @@ -33,28 +34,35 @@ mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port, int err; if (!mall_entry->mirror.to_dev) { - netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); + NL_SET_ERR_MSG(extack, "Could not find requested device"); return -EINVAL; } agent_parms.to_dev = mall_entry->mirror.to_dev; err = mlxsw_sp_span_agent_get(mlxsw_sp, &mall_entry->mirror.span_id, &agent_parms); - if (err) + if (err) { + NL_SET_ERR_MSG(extack, "Failed to get SPAN agent"); return err; + } err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, mall_entry->ingress); - if (err) + if (err) { + NL_SET_ERR_MSG(extack, "Failed to get analyzed port"); goto err_analyzed_port_get; + } trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS : MLXSW_SP_SPAN_TRIGGER_EGRESS; parms.span_id = mall_entry->mirror.span_id; + parms.probability_rate = 1; err = mlxsw_sp_span_agent_bind(mlxsw_sp, trigger, mlxsw_sp_port, &parms); - if (err) + if (err) { + NL_SET_ERR_MSG(extack, "Failed to bind SPAN agent"); goto err_agent_bind; + } return 0; @@ -93,46 +101,64 @@ static int mlxsw_sp_mall_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_mall_port_sample_add(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_mall_entry *mall_entry) + struct mlxsw_sp_mall_entry *mall_entry, + struct netlink_ext_ack *extack) { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_sample_trigger trigger; int err; - if (rtnl_dereference(mlxsw_sp_port->sample)) { - netdev_err(mlxsw_sp_port->dev, "sample already active\n"); - return -EEXIST; - } - rcu_assign_pointer(mlxsw_sp_port->sample, &mall_entry->sample); + if (mall_entry->ingress) + trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS; + else + trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS; + trigger.local_port = mlxsw_sp_port->local_port; + err = mlxsw_sp_sample_trigger_params_set(mlxsw_sp, &trigger, + &mall_entry->sample.params, + extack); + if (err) + return err; - err = mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, true, - mall_entry->sample.rate); + err = mlxsw_sp->mall_ops->sample_add(mlxsw_sp, mlxsw_sp_port, + mall_entry, extack); if (err) goto err_port_sample_set; return 0; err_port_sample_set: - RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL); + mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger); return err; } static void -mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port *mlxsw_sp_port) +mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_mall_entry *mall_entry) { - if (!mlxsw_sp_port->sample) - return; + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_sample_trigger trigger; - mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, false, 1); - RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL); + if (mall_entry->ingress) + trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS; + else + trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS; + trigger.local_port = mlxsw_sp_port->local_port; + + mlxsw_sp->mall_ops->sample_del(mlxsw_sp, mlxsw_sp_port, mall_entry); + mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger); } static int mlxsw_sp_mall_port_rule_add(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_mall_entry *mall_entry) + struct mlxsw_sp_mall_entry *mall_entry, + struct netlink_ext_ack *extack) { switch (mall_entry->type) { case MLXSW_SP_MALL_ACTION_TYPE_MIRROR: - return mlxsw_sp_mall_port_mirror_add(mlxsw_sp_port, mall_entry); + return mlxsw_sp_mall_port_mirror_add(mlxsw_sp_port, mall_entry, + extack); case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE: - return mlxsw_sp_mall_port_sample_add(mlxsw_sp_port, mall_entry); + return mlxsw_sp_mall_port_sample_add(mlxsw_sp_port, mall_entry, + extack); default: WARN_ON(1); return -EINVAL; @@ -148,7 +174,7 @@ mlxsw_sp_mall_port_rule_del(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_mall_port_mirror_del(mlxsw_sp_port, mall_entry); break; case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE: - mlxsw_sp_mall_port_sample_del(mlxsw_sp_port); + mlxsw_sp_mall_port_sample_del(mlxsw_sp_port, mall_entry); break; default: WARN_ON(1); @@ -212,6 +238,11 @@ int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp, flower_prio_valid = true; } + if (protocol != htons(ETH_P_ALL)) { + NL_SET_ERR_MSG(f->common.extack, "matchall rules only supported with 'all' protocol"); + return -EOPNOTSUPP; + } + mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL); if (!mall_entry) return -ENOMEM; @@ -219,54 +250,41 @@ int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp, mall_entry->priority = f->common.prio; mall_entry->ingress = mlxsw_sp_flow_block_is_ingress_bound(block); + if (flower_prio_valid && mall_entry->ingress && + mall_entry->priority >= flower_min_prio) { + NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules"); + err = -EOPNOTSUPP; + goto errout; + } + if (flower_prio_valid && !mall_entry->ingress && + mall_entry->priority <= flower_max_prio) { + NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules"); + err = -EOPNOTSUPP; + goto errout; + } + act = &f->rule->action.entries[0]; - if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) { - if (flower_prio_valid && mall_entry->ingress && - mall_entry->priority >= flower_min_prio) { - NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules"); - err = -EOPNOTSUPP; - goto errout; - } - if (flower_prio_valid && !mall_entry->ingress && - mall_entry->priority <= flower_max_prio) { - NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules"); - err = -EOPNOTSUPP; - goto errout; - } + switch (act->id) { + case FLOW_ACTION_MIRRED: mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR; mall_entry->mirror.to_dev = act->dev; - } else if (act->id == FLOW_ACTION_SAMPLE && - protocol == htons(ETH_P_ALL)) { - if (!mall_entry->ingress) { - NL_SET_ERR_MSG(f->common.extack, "Sample is not supported on egress"); - err = -EOPNOTSUPP; - goto errout; - } - if (flower_prio_valid && - mall_entry->priority >= flower_min_prio) { - NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules"); - err = -EOPNOTSUPP; - goto errout; - } - if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) { - NL_SET_ERR_MSG(f->common.extack, "Sample rate not supported"); - err = -EOPNOTSUPP; - goto errout; - } + break; + case FLOW_ACTION_SAMPLE: mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_SAMPLE; - mall_entry->sample.psample_group = act->sample.psample_group; - mall_entry->sample.truncate = act->sample.truncate; - mall_entry->sample.trunc_size = act->sample.trunc_size; - mall_entry->sample.rate = act->sample.rate; - } else { + mall_entry->sample.params.psample_group = act->sample.psample_group; + mall_entry->sample.params.truncate = act->sample.truncate; + mall_entry->sample.params.trunc_size = act->sample.trunc_size; + mall_entry->sample.params.rate = act->sample.rate; + break; + default: err = -EOPNOTSUPP; goto errout; } list_for_each_entry(binding, &block->binding_list, list) { err = mlxsw_sp_mall_port_rule_add(binding->mlxsw_sp_port, - mall_entry); + mall_entry, f->common.extack); if (err) goto rollback; } @@ -314,13 +332,15 @@ void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block, } int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block, - struct mlxsw_sp_port *mlxsw_sp_port) + struct mlxsw_sp_port *mlxsw_sp_port, + struct netlink_ext_ack *extack) { struct mlxsw_sp_mall_entry *mall_entry; int err; list_for_each_entry(mall_entry, &block->mall.list, list) { - err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry); + err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry, + extack); if (err) goto rollback; } @@ -355,3 +375,104 @@ int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index, *p_max_prio = block->mall.max_prio; return 0; } + +static int mlxsw_sp1_mall_sample_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_mall_entry *mall_entry, + struct netlink_ext_ack *extack) +{ + u32 rate = mall_entry->sample.params.rate; + + if (!mall_entry->ingress) { + NL_SET_ERR_MSG(extack, "Sampling is not supported on egress"); + return -EOPNOTSUPP; + } + + if (rate > MLXSW_REG_MPSC_RATE_MAX) { + NL_SET_ERR_MSG(extack, "Unsupported sampling rate"); + return -EOPNOTSUPP; + } + + return mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, true, rate); +} + +static void mlxsw_sp1_mall_sample_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_mall_entry *mall_entry) +{ + mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, false, 1); +} + +const struct mlxsw_sp_mall_ops mlxsw_sp1_mall_ops = { + .sample_add = mlxsw_sp1_mall_sample_add, + .sample_del = mlxsw_sp1_mall_sample_del, +}; + +static int mlxsw_sp2_mall_sample_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_mall_entry *mall_entry, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_span_trigger_parms trigger_parms = {}; + struct mlxsw_sp_span_agent_parms agent_parms = { + .to_dev = NULL, /* Mirror to CPU. */ + .session_id = MLXSW_SP_SPAN_SESSION_ID_SAMPLING, + }; + u32 rate = mall_entry->sample.params.rate; + enum mlxsw_sp_span_trigger span_trigger; + int err; + + err = mlxsw_sp_span_agent_get(mlxsw_sp, &mall_entry->sample.span_id, + &agent_parms); + if (err) { + NL_SET_ERR_MSG(extack, "Failed to get SPAN agent"); + return err; + } + + err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, + mall_entry->ingress); + if (err) { + NL_SET_ERR_MSG(extack, "Failed to get analyzed port"); + goto err_analyzed_port_get; + } + + span_trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS : + MLXSW_SP_SPAN_TRIGGER_EGRESS; + trigger_parms.span_id = mall_entry->sample.span_id; + trigger_parms.probability_rate = rate; + err = mlxsw_sp_span_agent_bind(mlxsw_sp, span_trigger, mlxsw_sp_port, + &trigger_parms); + if (err) { + NL_SET_ERR_MSG(extack, "Failed to bind SPAN agent"); + goto err_agent_bind; + } + + return 0; + +err_agent_bind: + mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress); +err_analyzed_port_get: + mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->sample.span_id); + return err; +} + +static void mlxsw_sp2_mall_sample_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_mall_entry *mall_entry) +{ + struct mlxsw_sp_span_trigger_parms trigger_parms = {}; + enum mlxsw_sp_span_trigger span_trigger; + + span_trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS : + MLXSW_SP_SPAN_TRIGGER_EGRESS; + trigger_parms.span_id = mall_entry->sample.span_id; + mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port, + &trigger_parms); + mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress); + mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->sample.span_id); +} + +const struct mlxsw_sp_mall_ops mlxsw_sp2_mall_ops = { + .sample_add = mlxsw_sp2_mall_sample_add, + .sample_del = mlxsw_sp2_mall_sample_del, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h index 2796d3659979..d8104fc6c900 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h @@ -18,7 +18,6 @@ struct mlxsw_sp_nve_config { u32 ul_tb_id; enum mlxsw_sp_l3proto ul_proto; union mlxsw_sp_l3addr ul_sip; - u16 ethertype; }; struct mlxsw_sp_nve { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c index 3e2bb22e9ca6..b84bb4b65098 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c @@ -113,7 +113,6 @@ static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve, config->ul_proto = MLXSW_SP_L3_PROTO_IPV4; config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr; config->udp_dport = cfg->dst_port; - config->ethertype = params->ethertype; } static int __mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp, @@ -318,20 +317,14 @@ static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp, } static int -mlxsw_sp2_nve_decap_ethertype_set(struct mlxsw_sp *mlxsw_sp, u16 ethertype) +mlxsw_sp2_nve_decap_ethertype_set(struct mlxsw_sp *mlxsw_sp) { char spvid_pl[MLXSW_REG_SPVID_LEN] = {}; - u8 sver_type; - int err; mlxsw_reg_spvid_tport_set(spvid_pl, true); mlxsw_reg_spvid_local_port_set(spvid_pl, MLXSW_REG_TUNNEL_PORT_NVE); - err = mlxsw_sp_ethtype_to_sver_type(ethertype, &sver_type); - if (err) - return err; - - mlxsw_reg_spvid_et_vlan_set(spvid_pl, sver_type); + mlxsw_reg_spvid_egr_et_set_set(spvid_pl, true); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); } @@ -367,7 +360,7 @@ mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp, if (err) goto err_spvtr_write; - err = mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp, config->ethertype); + err = mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp); if (err) goto err_decap_ethertype_set; @@ -392,8 +385,6 @@ static void mlxsw_sp2_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp) char spvtr_pl[MLXSW_REG_SPVTR_LEN]; char tngcr_pl[MLXSW_REG_TNGCR_LEN]; - /* Set default EtherType */ - mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp, ETH_P_8021Q); mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE, MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c index fd672c6c9133..04672eb5c7f3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@ -29,7 +29,6 @@ struct mlxsw_sp_qdisc; struct mlxsw_sp_qdisc_ops { enum mlxsw_sp_qdisc_type type; int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params); int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params); @@ -48,11 +47,14 @@ struct mlxsw_sp_qdisc_ops { */ void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params); + struct mlxsw_sp_qdisc *(*find_class)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + u32 parent); + unsigned int num_classes; }; struct mlxsw_sp_qdisc { u32 handle; - u8 tclass_num; + int tclass_num; u8 prio_bitmap; union { struct red_stats red; @@ -66,11 +68,13 @@ struct mlxsw_sp_qdisc { } stats_base; struct mlxsw_sp_qdisc_ops *ops; + struct mlxsw_sp_qdisc *parent; + struct mlxsw_sp_qdisc *qdiscs; + unsigned int num_classes; }; struct mlxsw_sp_qdisc_state { struct mlxsw_sp_qdisc root_qdisc; - struct mlxsw_sp_qdisc tclass_qdiscs[IEEE_8021QAZ_MAX_TCS]; /* When a PRIO or ETS are added, the invisible FIFOs in their bands are * created first. When notifications for these FIFOs arrive, it is not @@ -85,15 +89,55 @@ struct mlxsw_sp_qdisc_state { */ u32 future_handle; bool future_fifos[IEEE_8021QAZ_MAX_TCS]; + struct mutex lock; /* Protects qdisc state. */ }; static bool -mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle, - enum mlxsw_sp_qdisc_type type) +mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle) +{ + return mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->handle == handle; +} + +static struct mlxsw_sp_qdisc * +mlxsw_sp_qdisc_walk(struct mlxsw_sp_qdisc *qdisc, + struct mlxsw_sp_qdisc *(*pre)(struct mlxsw_sp_qdisc *, + void *), + void *data) +{ + struct mlxsw_sp_qdisc *tmp; + unsigned int i; + + if (pre) { + tmp = pre(qdisc, data); + if (tmp) + return tmp; + } + + if (qdisc->ops) { + for (i = 0; i < qdisc->num_classes; i++) { + tmp = &qdisc->qdiscs[i]; + if (qdisc->ops) { + tmp = mlxsw_sp_qdisc_walk(tmp, pre, data); + if (tmp) + return tmp; + } + } + } + + return NULL; +} + +static struct mlxsw_sp_qdisc * +mlxsw_sp_qdisc_walk_cb_find(struct mlxsw_sp_qdisc *qdisc, void *data) { - return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops && - mlxsw_sp_qdisc->ops->type == type && - mlxsw_sp_qdisc->handle == handle; + u32 parent = *(u32 *)data; + + if (qdisc->ops && TC_H_MAJ(qdisc->handle) == TC_H_MAJ(parent)) { + if (qdisc->ops->find_class) + return qdisc->ops->find_class(qdisc, parent); + } + + return NULL; } static struct mlxsw_sp_qdisc * @@ -101,39 +145,46 @@ mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent, bool root_only) { struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc; - int tclass, child_index; + if (!qdisc_state) + return NULL; if (parent == TC_H_ROOT) return &qdisc_state->root_qdisc; - - if (root_only || !qdisc_state || - !qdisc_state->root_qdisc.ops || - TC_H_MAJ(parent) != qdisc_state->root_qdisc.handle || - TC_H_MIN(parent) > IEEE_8021QAZ_MAX_TCS) + if (root_only) return NULL; + return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc, + mlxsw_sp_qdisc_walk_cb_find, &parent); +} - child_index = TC_H_MIN(parent); - tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index); - return &qdisc_state->tclass_qdiscs[tclass]; +static struct mlxsw_sp_qdisc * +mlxsw_sp_qdisc_walk_cb_find_by_handle(struct mlxsw_sp_qdisc *qdisc, void *data) +{ + u32 handle = *(u32 *)data; + + if (qdisc->ops && qdisc->handle == handle) + return qdisc; + return NULL; } static struct mlxsw_sp_qdisc * mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle) { struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc; - int i; - - if (qdisc_state->root_qdisc.handle == handle) - return &qdisc_state->root_qdisc; - if (qdisc_state->root_qdisc.handle == TC_H_UNSPEC) + if (!qdisc_state) return NULL; + return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc, + mlxsw_sp_qdisc_walk_cb_find_by_handle, + &handle); +} - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) - if (qdisc_state->tclass_qdiscs[i].handle == handle) - return &qdisc_state->tclass_qdiscs[i]; +static void +mlxsw_sp_qdisc_reduce_parent_backlog(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) +{ + struct mlxsw_sp_qdisc *tmp; - return NULL; + for (tmp = mlxsw_sp_qdisc->parent; tmp; tmp = tmp->parent) + tmp->stats_base.backlog -= mlxsw_sp_qdisc->stats_base.backlog; } static int @@ -157,32 +208,48 @@ mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port, err_hdroom = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); } - if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy) + if (!mlxsw_sp_qdisc->ops) + return 0; + + mlxsw_sp_qdisc_reduce_parent_backlog(mlxsw_sp_qdisc); + if (mlxsw_sp_qdisc->ops->destroy) err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port, mlxsw_sp_qdisc); + if (mlxsw_sp_qdisc->ops->clean_stats) + mlxsw_sp_qdisc->ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc); mlxsw_sp_qdisc->handle = TC_H_UNSPEC; mlxsw_sp_qdisc->ops = NULL; - + mlxsw_sp_qdisc->num_classes = 0; + kfree(mlxsw_sp_qdisc->qdiscs); + mlxsw_sp_qdisc->qdiscs = NULL; return err_hdroom ?: err; } -static int -mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, - struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, - struct mlxsw_sp_qdisc_ops *ops, void *params) +static int mlxsw_sp_qdisc_create(struct mlxsw_sp_port *mlxsw_sp_port, + u32 handle, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + struct mlxsw_sp_qdisc_ops *ops, void *params) { struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc; struct mlxsw_sp_hdroom orig_hdroom; + unsigned int i; int err; - if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type) - /* In case this location contained a different qdisc of the - * same type we can override the old qdisc configuration. - * Otherwise, we need to remove the old qdisc before setting the - * new one. - */ - mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); + err = ops->check_params(mlxsw_sp_port, params); + if (err) + return err; + + if (ops->num_classes) { + mlxsw_sp_qdisc->qdiscs = kcalloc(ops->num_classes, + sizeof(*mlxsw_sp_qdisc->qdiscs), + GFP_KERNEL); + if (!mlxsw_sp_qdisc->qdiscs) + return -ENOMEM; + + for (i = 0; i < ops->num_classes; i++) + mlxsw_sp_qdisc->qdiscs[i].parent = mlxsw_sp_qdisc; + } orig_hdroom = *mlxsw_sp_port->hdroom; if (root_qdisc == mlxsw_sp_qdisc) { @@ -198,20 +265,46 @@ mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, goto err_hdroom_configure; } - err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params); + mlxsw_sp_qdisc->num_classes = ops->num_classes; + mlxsw_sp_qdisc->ops = ops; + mlxsw_sp_qdisc->handle = handle; + err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params); if (err) - goto err_bad_param; + goto err_replace; + + return 0; + +err_replace: + mlxsw_sp_qdisc->handle = TC_H_UNSPEC; + mlxsw_sp_qdisc->ops = NULL; + mlxsw_sp_qdisc->num_classes = 0; + mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom); +err_hdroom_configure: + kfree(mlxsw_sp_qdisc->qdiscs); + mlxsw_sp_qdisc->qdiscs = NULL; + return err; +} + +static int +mlxsw_sp_qdisc_change(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params) +{ + struct mlxsw_sp_qdisc_ops *ops = mlxsw_sp_qdisc->ops; + int err; + + err = ops->check_params(mlxsw_sp_port, params); + if (err) + goto unoffload; err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params); if (err) - goto err_config; + goto unoffload; /* Check if the Qdisc changed. That includes a situation where an * invisible Qdisc replaces another one, or is being added for the * first time. */ - if (mlxsw_sp_qdisc->handle != handle || handle == TC_H_UNSPEC) { - mlxsw_sp_qdisc->ops = ops; + if (mlxsw_sp_qdisc->handle != handle) { if (ops->clean_stats) ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc); } @@ -219,11 +312,8 @@ mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, mlxsw_sp_qdisc->handle = handle; return 0; -err_bad_param: -err_config: - mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom); -err_hdroom_configure: - if (mlxsw_sp_qdisc->handle == handle && ops->unoffload) +unoffload: + if (ops->unoffload) ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params); mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); @@ -231,6 +321,27 @@ err_hdroom_configure: } static int +mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + struct mlxsw_sp_qdisc_ops *ops, void *params) +{ + if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type) + /* In case this location contained a different qdisc of the + * same type we can override the old qdisc configuration. + * Otherwise, we need to remove the old qdisc before setting the + * new one. + */ + mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); + + if (!mlxsw_sp_qdisc->ops) + return mlxsw_sp_qdisc_create(mlxsw_sp_port, handle, + mlxsw_sp_qdisc, ops, params); + else + return mlxsw_sp_qdisc_change(mlxsw_sp_port, handle, + mlxsw_sp_qdisc, params); +} + +static int mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, struct tc_qopt_offload_stats *stats_ptr) @@ -295,7 +406,7 @@ mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port, u64 *p_tx_bytes, u64 *p_tx_packets, u64 *p_drops, u64 *p_backlog) { - u8 tclass_num = mlxsw_sp_qdisc->tclass_num; + int tclass_num = mlxsw_sp_qdisc->tclass_num; struct mlxsw_sp_port_xstats *xstats; u64 tx_bytes, tx_packets; @@ -395,7 +506,7 @@ static void mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) { - u8 tclass_num = mlxsw_sp_qdisc->tclass_num; + int tclass_num = mlxsw_sp_qdisc->tclass_num; struct mlxsw_sp_qdisc_stats *stats_base; struct mlxsw_sp_port_xstats *xstats; struct red_stats *red_base; @@ -421,20 +532,12 @@ static int mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) { - struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc; - struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc; - - if (root_qdisc != mlxsw_sp_qdisc) - root_qdisc->stats_base.backlog -= - mlxsw_sp_qdisc->stats_base.backlog; - return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, mlxsw_sp_qdisc->tclass_num); } static int mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; @@ -467,7 +570,7 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct tc_red_qopt_offload_params *p = params; - u8 tclass_num = mlxsw_sp_qdisc->tclass_num; + int tclass_num = mlxsw_sp_qdisc->tclass_num; u32 min, max; u64 prob; @@ -512,7 +615,7 @@ mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, void *xstats_ptr) { struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red; - u8 tclass_num = mlxsw_sp_qdisc->tclass_num; + int tclass_num = mlxsw_sp_qdisc->tclass_num; struct mlxsw_sp_port_xstats *xstats; struct red_stats *res = xstats_ptr; int early_drops, pdrops; @@ -536,7 +639,7 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, struct tc_qopt_offload_stats *stats_ptr) { - u8 tclass_num = mlxsw_sp_qdisc->tclass_num; + int tclass_num = mlxsw_sp_qdisc->tclass_num; struct mlxsw_sp_qdisc_stats *stats_base; struct mlxsw_sp_port_xstats *xstats; u64 overlimits; @@ -553,6 +656,13 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, return 0; } +static struct mlxsw_sp_qdisc * +mlxsw_sp_qdisc_leaf_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + u32 parent) +{ + return NULL; +} + #define MLXSW_SP_PORT_DEFAULT_TCLASS 0 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = { @@ -564,10 +674,11 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = { .get_stats = mlxsw_sp_qdisc_get_red_stats, .get_xstats = mlxsw_sp_qdisc_get_red_xstats, .clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats, + .find_class = mlxsw_sp_qdisc_leaf_find_class, }; -int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, - struct tc_red_qopt_offload *p) +static int __mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_red_qopt_offload *p) { struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; @@ -581,8 +692,7 @@ int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, &mlxsw_sp_qdisc_ops_red, &p->set); - if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle, - MLXSW_SP_QDISC_RED)) + if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle)) return -EOPNOTSUPP; switch (p->command) { @@ -599,6 +709,18 @@ int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, } } +int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_red_qopt_offload *p) +{ + int err; + + mutex_lock(&mlxsw_sp_port->qdisc->lock); + err = __mlxsw_sp_setup_tc_red(mlxsw_sp_port, p); + mutex_unlock(&mlxsw_sp_port->qdisc->lock); + + return err; +} + static void mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) @@ -622,13 +744,6 @@ static int mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) { - struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc; - struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc; - - if (root_qdisc != mlxsw_sp_qdisc) - root_qdisc->stats_base.backlog -= - mlxsw_sp_qdisc->stats_base.backlog; - return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, MLXSW_REG_QEEC_HR_SUBGROUP, mlxsw_sp_qdisc->tclass_num, 0, @@ -678,7 +793,6 @@ mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params *p) static int mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params) { struct tc_tbf_qopt_offload_replace_params *p = params; @@ -766,10 +880,11 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = { .destroy = mlxsw_sp_qdisc_tbf_destroy, .get_stats = mlxsw_sp_qdisc_get_tbf_stats, .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats, + .find_class = mlxsw_sp_qdisc_leaf_find_class, }; -int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port, - struct tc_tbf_qopt_offload *p) +static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_tbf_qopt_offload *p) { struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; @@ -783,8 +898,7 @@ int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port, &mlxsw_sp_qdisc_ops_tbf, &p->replace_params); - if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle, - MLXSW_SP_QDISC_TBF)) + if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle)) return -EOPNOTSUPP; switch (p->command) { @@ -798,22 +912,20 @@ int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port, } } -static int -mlxsw_sp_qdisc_fifo_destroy(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) +int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_tbf_qopt_offload *p) { - struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc; - struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc; + int err; - if (root_qdisc != mlxsw_sp_qdisc) - root_qdisc->stats_base.backlog -= - mlxsw_sp_qdisc->stats_base.backlog; - return 0; + mutex_lock(&mlxsw_sp_port->qdisc->lock); + err = __mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, p); + mutex_unlock(&mlxsw_sp_port->qdisc->lock); + + return err; } static int mlxsw_sp_qdisc_fifo_check_params(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params) { return 0; @@ -841,25 +953,18 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_fifo = { .type = MLXSW_SP_QDISC_FIFO, .check_params = mlxsw_sp_qdisc_fifo_check_params, .replace = mlxsw_sp_qdisc_fifo_replace, - .destroy = mlxsw_sp_qdisc_fifo_destroy, .get_stats = mlxsw_sp_qdisc_get_fifo_stats, .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats, }; -int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port, - struct tc_fifo_qopt_offload *p) +static int __mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_fifo_qopt_offload *p) { struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc; struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; - int tclass, child_index; + unsigned int band; u32 parent_handle; - /* Invisible FIFOs are tracked in future_handle and future_fifos. Make - * sure that not more than one qdisc is created for a port at a time. - * RTNL is a simple proxy for that. - */ - ASSERT_RTNL(); - mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false); if (!mlxsw_sp_qdisc && p->handle == TC_H_UNSPEC) { parent_handle = TC_H_MAJ(p->parent); @@ -872,13 +977,12 @@ int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port, qdisc_state->future_handle = parent_handle; } - child_index = TC_H_MIN(p->parent); - tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index); - if (tclass < IEEE_8021QAZ_MAX_TCS) { + band = TC_H_MIN(p->parent) - 1; + if (band < IEEE_8021QAZ_MAX_TCS) { if (p->command == TC_FIFO_REPLACE) - qdisc_state->future_fifos[tclass] = true; + qdisc_state->future_fifos[band] = true; else if (p->command == TC_FIFO_DESTROY) - qdisc_state->future_fifos[tclass] = false; + qdisc_state->future_fifos[band] = false; } } if (!mlxsw_sp_qdisc) @@ -890,16 +994,12 @@ int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port, &mlxsw_sp_qdisc_ops_fifo, NULL); } - if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle, - MLXSW_SP_QDISC_FIFO)) + if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle)) return -EOPNOTSUPP; switch (p->command) { case TC_FIFO_DESTROY: - if (p->handle == mlxsw_sp_qdisc->handle) - return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, - mlxsw_sp_qdisc); - return 0; + return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); case TC_FIFO_STATS: return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc, &p->stats); @@ -910,21 +1010,32 @@ int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port, return -EOPNOTSUPP; } -static int -__mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port) +int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_fifo_qopt_offload *p) +{ + int err; + + mutex_lock(&mlxsw_sp_port->qdisc->lock); + err = __mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, p); + mutex_unlock(&mlxsw_sp_port->qdisc->lock); + + return err; +} + +static int __mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) { - struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc; int i; - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) { mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, MLXSW_SP_PORT_DEFAULT_TCLASS); mlxsw_sp_port_ets_set(mlxsw_sp_port, MLXSW_REG_QEEC_HR_SUBGROUP, i, 0, false, 0); mlxsw_sp_qdisc_destroy(mlxsw_sp_port, - &qdisc_state->tclass_qdiscs[i]); - qdisc_state->tclass_qdiscs[i].prio_bitmap = 0; + &mlxsw_sp_qdisc->qdiscs[i]); + mlxsw_sp_qdisc->qdiscs[i].prio_bitmap = 0; } return 0; @@ -934,7 +1045,7 @@ static int mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) { - return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port); + return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); } static int @@ -948,7 +1059,6 @@ __mlxsw_sp_qdisc_ets_check_params(unsigned int nbands) static int mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params) { struct tc_prio_qopt_offload_params *p = params; @@ -957,8 +1067,9 @@ mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port, } static int -__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, - unsigned int nbands, +__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + u32 handle, unsigned int nbands, const unsigned int *quanta, const unsigned int *weights, const u8 *priomap) @@ -971,7 +1082,7 @@ __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, for (band = 0; band < nbands; band++) { tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band); - child_qdisc = &qdisc_state->tclass_qdiscs[tclass]; + child_qdisc = &mlxsw_sp_qdisc->qdiscs[band]; old_priomap = child_qdisc->prio_bitmap; child_qdisc->prio_bitmap = 0; @@ -993,6 +1104,9 @@ __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, return err; } } + + child_qdisc->tclass_num = tclass; + if (old_priomap != child_qdisc->prio_bitmap && child_qdisc->ops && child_qdisc->ops->clean_stats) { backlog = child_qdisc->stats_base.backlog; @@ -1002,7 +1116,7 @@ __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, } if (handle == qdisc_state->future_handle && - qdisc_state->future_fifos[tclass]) { + qdisc_state->future_fifos[band]) { err = mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC, child_qdisc, &mlxsw_sp_qdisc_ops_fifo, @@ -1013,7 +1127,7 @@ __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, } for (; band < IEEE_8021QAZ_MAX_TCS; band++) { tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band); - child_qdisc = &qdisc_state->tclass_qdiscs[tclass]; + child_qdisc = &mlxsw_sp_qdisc->qdiscs[band]; child_qdisc->prio_bitmap = 0; mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc); mlxsw_sp_port_ets_set(mlxsw_sp_port, @@ -1034,8 +1148,9 @@ mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, struct tc_prio_qopt_offload_params *p = params; unsigned int zeroes[TCQ_ETS_MAX_BANDS] = {0}; - return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, handle, p->bands, - zeroes, zeroes, p->priomap); + return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, mlxsw_sp_qdisc, + handle, p->bands, zeroes, + zeroes, p->priomap); } static void @@ -1066,7 +1181,6 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, struct tc_qopt_offload_stats *stats_ptr) { - struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc; struct mlxsw_sp_qdisc *tc_qdisc; u64 tx_packets = 0; u64 tx_bytes = 0; @@ -1074,8 +1188,8 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port, u64 drops = 0; int i; - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - tc_qdisc = &qdisc_state->tclass_qdiscs[i]; + for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) { + tc_qdisc = &mlxsw_sp_qdisc->qdiscs[i]; mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, tc_qdisc, &tx_bytes, &tx_packets, &drops, &backlog); @@ -1112,6 +1226,18 @@ mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_qdisc->stats_base.backlog = 0; } +static struct mlxsw_sp_qdisc * +mlxsw_sp_qdisc_prio_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + u32 parent) +{ + int child_index = TC_H_MIN(parent); + int band = child_index - 1; + + if (band < 0 || band >= mlxsw_sp_qdisc->num_classes) + return NULL; + return &mlxsw_sp_qdisc->qdiscs[band]; +} + static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = { .type = MLXSW_SP_QDISC_PRIO, .check_params = mlxsw_sp_qdisc_prio_check_params, @@ -1120,11 +1246,12 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = { .destroy = mlxsw_sp_qdisc_prio_destroy, .get_stats = mlxsw_sp_qdisc_get_prio_stats, .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats, + .find_class = mlxsw_sp_qdisc_prio_find_class, + .num_classes = IEEE_8021QAZ_MAX_TCS, }; static int mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params) { struct tc_ets_qopt_offload_replace_params *p = params; @@ -1139,8 +1266,9 @@ mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, { struct tc_ets_qopt_offload_replace_params *p = params; - return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, handle, p->bands, - p->quanta, p->weights, p->priomap); + return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, mlxsw_sp_qdisc, + handle, p->bands, p->quanta, + p->weights, p->priomap); } static void @@ -1158,7 +1286,7 @@ static int mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) { - return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port); + return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); } static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = { @@ -1169,6 +1297,8 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = { .destroy = mlxsw_sp_qdisc_ets_destroy, .get_stats = mlxsw_sp_qdisc_get_prio_stats, .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats, + .find_class = mlxsw_sp_qdisc_prio_find_class, + .num_classes = IEEE_8021QAZ_MAX_TCS, }; /* Linux allows linking of Qdiscs to arbitrary classes (so long as the resulting @@ -1201,12 +1331,10 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u8 band, u32 child_handle) { - struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc; - int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band); struct mlxsw_sp_qdisc *old_qdisc; - if (band < IEEE_8021QAZ_MAX_TCS && - qdisc_state->tclass_qdiscs[tclass_num].handle == child_handle) + if (band < mlxsw_sp_qdisc->num_classes && + mlxsw_sp_qdisc->qdiscs[band].handle == child_handle) return 0; if (!child_handle) { @@ -1224,8 +1352,10 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port, if (old_qdisc) mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc); - mlxsw_sp_qdisc_destroy(mlxsw_sp_port, - &qdisc_state->tclass_qdiscs[tclass_num]); + mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc, band); + if (!WARN_ON(!mlxsw_sp_qdisc)) + mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); + return -EOPNOTSUPP; } @@ -1238,8 +1368,8 @@ mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port, p->band, p->child_handle); } -int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port, - struct tc_prio_qopt_offload *p) +static int __mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_prio_qopt_offload *p) { struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; @@ -1253,8 +1383,7 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port, &mlxsw_sp_qdisc_ops_prio, &p->replace_params); - if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle, - MLXSW_SP_QDISC_PRIO)) + if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle)) return -EOPNOTSUPP; switch (p->command) { @@ -1271,8 +1400,20 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port, } } -int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port, - struct tc_ets_qopt_offload *p) +int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_prio_qopt_offload *p) +{ + int err; + + mutex_lock(&mlxsw_sp_port->qdisc->lock); + err = __mlxsw_sp_setup_tc_prio(mlxsw_sp_port, p); + mutex_unlock(&mlxsw_sp_port->qdisc->lock); + + return err; +} + +static int __mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_ets_qopt_offload *p) { struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; @@ -1286,8 +1427,7 @@ int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port, &mlxsw_sp_qdisc_ops_ets, &p->replace_params); - if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle, - MLXSW_SP_QDISC_ETS)) + if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle)) return -EOPNOTSUPP; switch (p->command) { @@ -1305,6 +1445,18 @@ int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port, } } +int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_ets_qopt_offload *p) +{ + int err; + + mutex_lock(&mlxsw_sp_port->qdisc->lock); + err = __mlxsw_sp_setup_tc_ets(mlxsw_sp_port, p); + mutex_unlock(&mlxsw_sp_port->qdisc->lock); + + return err; +} + struct mlxsw_sp_qevent_block { struct list_head binding_list; struct list_head mall_entry_list; @@ -1341,6 +1493,7 @@ static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp, goto err_analyzed_port_get; trigger_parms.span_id = span_id; + trigger_parms.probability_rate = 1; err = mlxsw_sp_span_agent_bind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port, &trigger_parms); if (err) @@ -1404,7 +1557,9 @@ static int mlxsw_sp_qevent_trap_configure(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_mall_entry *mall_entry, struct mlxsw_sp_qevent_binding *qevent_binding) { - struct mlxsw_sp_span_agent_parms agent_parms = {}; + struct mlxsw_sp_span_agent_parms agent_parms = { + .session_id = MLXSW_SP_SPAN_SESSION_ID_BUFFER, + }; int err; err = mlxsw_sp_trap_group_policer_hw_id_get(mlxsw_sp, @@ -1831,22 +1986,20 @@ int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_por int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port) { struct mlxsw_sp_qdisc_state *qdisc_state; - int i; qdisc_state = kzalloc(sizeof(*qdisc_state), GFP_KERNEL); if (!qdisc_state) return -ENOMEM; + mutex_init(&qdisc_state->lock); qdisc_state->root_qdisc.prio_bitmap = 0xff; qdisc_state->root_qdisc.tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS; - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) - qdisc_state->tclass_qdiscs[i].tclass_num = i; - mlxsw_sp_port->qdisc = qdisc_state; return 0; } void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port) { + mutex_destroy(&mlxsw_sp_port->qdisc->lock); kfree(mlxsw_sp_port->qdisc); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index eda99d82766a..41259c0004d1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -113,6 +113,10 @@ struct mlxsw_sp_rif_ops { void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac); }; +struct mlxsw_sp_router_ops { + int (*init)(struct mlxsw_sp *mlxsw_sp); +}; + static struct mlxsw_sp_rif * mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, const struct net_device *dev); @@ -2662,6 +2666,10 @@ static void mlxsw_sp_router_neigh_event_work(struct work_struct *work) goto out; } + if (neigh_entry->connected && entry_connected && + !memcmp(neigh_entry->ha, ha, ETH_ALEN)) + goto out; + memcpy(neigh_entry->ha, ha, ETH_ALEN); mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected); mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected, @@ -2842,6 +2850,15 @@ enum mlxsw_sp_nexthop_type { MLXSW_SP_NEXTHOP_TYPE_IPIP, }; +enum mlxsw_sp_nexthop_action { + /* Nexthop forwards packets to an egress RIF */ + MLXSW_SP_NEXTHOP_ACTION_FORWARD, + /* Nexthop discards packets */ + MLXSW_SP_NEXTHOP_ACTION_DISCARD, + /* Nexthop traps packets */ + MLXSW_SP_NEXTHOP_ACTION_TRAP, +}; + struct mlxsw_sp_nexthop_key { struct fib_nh *fib_nh; }; @@ -2862,16 +2879,16 @@ struct mlxsw_sp_nexthop { int norm_nh_weight; int num_adj_entries; struct mlxsw_sp_rif *rif; - u8 should_offload:1, /* set indicates this neigh is connected and - * should be put to KVD linear area of this group. + u8 should_offload:1, /* set indicates this nexthop should be written + * to the adjacency table. */ - offloaded:1, /* set in case the neigh is actually put into - * KVD linear area of this group. + offloaded:1, /* set indicates this nexthop was written to the + * adjacency table. */ - update:1, /* set indicates that MAC of this neigh should be - * updated in HW + update:1; /* set indicates this nexthop should be updated in the + * adjacency table (f.e., its MAC changed). */ - discard:1; /* nexthop is programmed to discard packets */ + enum mlxsw_sp_nexthop_action action; enum mlxsw_sp_nexthop_type type; union { struct mlxsw_sp_neigh_entry *neigh_entry; @@ -2894,7 +2911,9 @@ struct mlxsw_sp_nexthop_group_info { u16 count; int sum_norm_weight; u8 adj_index_valid:1, - gateway:1; /* routes using the group use a gateway */ + gateway:1, /* routes using the group use a gateway */ + is_resilient:1; + struct list_head list; /* member in nh_res_grp_list */ struct mlxsw_sp_nexthop nexthops[0]; #define nh_rif nexthops[0].rif }; @@ -2979,14 +2998,15 @@ struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router, return list_next_entry(nh, router_list_node); } -bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh) +bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh) { - return nh->offloaded; + return nh->offloaded && nh->action == MLXSW_SP_NEXTHOP_ACTION_FORWARD; } unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh) { - if (!nh->offloaded) + if (nh->type != MLXSW_SP_NEXTHOP_TYPE_ETH || + !mlxsw_sp_nexthop_is_forward(nh)) return NULL; return nh->neigh_entry->ha; } @@ -3036,11 +3056,6 @@ bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh) return false; } -bool mlxsw_sp_nexthop_is_discard(const struct mlxsw_sp_nexthop *nh) -{ - return nh->discard; -} - static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = { .key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key), .head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node), @@ -3403,20 +3418,38 @@ err_mass_update_vr: return err; } -static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, - struct mlxsw_sp_nexthop *nh) +static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, + u32 adj_index, + struct mlxsw_sp_nexthop *nh, + bool force, char *ratr_pl) { struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry; - char ratr_pl[MLXSW_REG_RATR_LEN]; + enum mlxsw_reg_ratr_op op; + u16 rif_index; - mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, - true, MLXSW_REG_RATR_TYPE_ETHERNET, - adj_index, nh->rif->rif_index); - if (nh->discard) + rif_index = nh->rif ? nh->rif->rif_index : + mlxsw_sp->router->lb_rif_index; + op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY : + MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY; + mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET, + adj_index, rif_index); + switch (nh->action) { + case MLXSW_SP_NEXTHOP_ACTION_FORWARD: + mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha); + break; + case MLXSW_SP_NEXTHOP_ACTION_DISCARD: mlxsw_reg_ratr_trap_action_set(ratr_pl, MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS); - else - mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha); + break; + case MLXSW_SP_NEXTHOP_ACTION_TRAP: + mlxsw_reg_ratr_trap_action_set(ratr_pl, + MLXSW_REG_RATR_TRAP_ACTION_TRAP); + mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0); + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } if (nh->counter_valid) mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true); else @@ -3425,15 +3458,17 @@ static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); } -int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, - struct mlxsw_sp_nexthop *nh) +int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, + struct mlxsw_sp_nexthop *nh, bool force, + char *ratr_pl) { int i; for (i = 0; i < nh->num_adj_entries; i++) { int err; - err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh); + err = __mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index + i, + nh, force, ratr_pl); if (err) return err; } @@ -3443,17 +3478,20 @@ int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, - struct mlxsw_sp_nexthop *nh) + struct mlxsw_sp_nexthop *nh, + bool force, char *ratr_pl) { const struct mlxsw_sp_ipip_ops *ipip_ops; ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt]; - return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry); + return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry, + force, ratr_pl); } static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, - struct mlxsw_sp_nexthop *nh) + struct mlxsw_sp_nexthop *nh, bool force, + char *ratr_pl) { int i; @@ -3461,7 +3499,7 @@ static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp, int err; err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i, - nh); + nh, force, ratr_pl); if (err) return err; } @@ -3469,11 +3507,29 @@ static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp, return 0; } +static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, + struct mlxsw_sp_nexthop *nh, bool force, + char *ratr_pl) +{ + /* When action is discard or trap, the nexthop must be + * programmed as an Ethernet nexthop. + */ + if (nh->type == MLXSW_SP_NEXTHOP_TYPE_ETH || + nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD || + nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP) + return mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index, nh, + force, ratr_pl); + else + return mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index, nh, + force, ratr_pl); +} + static int mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group_info *nhgi, bool reallocate) { + char ratr_pl[MLXSW_REG_RATR_LEN]; u32 adj_index = nhgi->adj_index; /* base */ struct mlxsw_sp_nexthop *nh; int i; @@ -3489,16 +3545,8 @@ mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp, if (nh->update || reallocate) { int err = 0; - switch (nh->type) { - case MLXSW_SP_NEXTHOP_TYPE_ETH: - err = mlxsw_sp_nexthop_update - (mlxsw_sp, adj_index, nh); - break; - case MLXSW_SP_NEXTHOP_TYPE_IPIP: - err = mlxsw_sp_nexthop_ipip_update - (mlxsw_sp, adj_index, nh); - break; - } + err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, + true, ratr_pl); if (err) return err; nh->update = 0; @@ -3524,34 +3572,69 @@ mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp, return 0; } -static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size) +struct mlxsw_sp_adj_grp_size_range { + u16 start; /* Inclusive */ + u16 end; /* Inclusive */ +}; + +/* Ordered by range start value */ +static const struct mlxsw_sp_adj_grp_size_range +mlxsw_sp1_adj_grp_size_ranges[] = { + { .start = 1, .end = 64 }, + { .start = 512, .end = 512 }, + { .start = 1024, .end = 1024 }, + { .start = 2048, .end = 2048 }, + { .start = 4096, .end = 4096 }, +}; + +/* Ordered by range start value */ +static const struct mlxsw_sp_adj_grp_size_range +mlxsw_sp2_adj_grp_size_ranges[] = { + { .start = 1, .end = 128 }, + { .start = 256, .end = 256 }, + { .start = 512, .end = 512 }, + { .start = 1024, .end = 1024 }, + { .start = 2048, .end = 2048 }, + { .start = 4096, .end = 4096 }, +}; + +static void mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp *mlxsw_sp, + u16 *p_adj_grp_size) { - /* Valid sizes for an adjacency group are: - * 1-64, 512, 1024, 2048 and 4096. - */ - if (*p_adj_grp_size <= 64) - return; - else if (*p_adj_grp_size <= 512) - *p_adj_grp_size = 512; - else if (*p_adj_grp_size <= 1024) - *p_adj_grp_size = 1024; - else if (*p_adj_grp_size <= 2048) - *p_adj_grp_size = 2048; - else - *p_adj_grp_size = 4096; + int i; + + for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) { + const struct mlxsw_sp_adj_grp_size_range *size_range; + + size_range = &mlxsw_sp->router->adj_grp_size_ranges[i]; + + if (*p_adj_grp_size >= size_range->start && + *p_adj_grp_size <= size_range->end) + return; + + if (*p_adj_grp_size <= size_range->end) { + *p_adj_grp_size = size_range->end; + return; + } + } } -static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size, +static void mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp *mlxsw_sp, + u16 *p_adj_grp_size, unsigned int alloc_size) { - if (alloc_size >= 4096) - *p_adj_grp_size = 4096; - else if (alloc_size >= 2048) - *p_adj_grp_size = 2048; - else if (alloc_size >= 1024) - *p_adj_grp_size = 1024; - else if (alloc_size >= 512) - *p_adj_grp_size = 512; + int i; + + for (i = mlxsw_sp->router->adj_grp_size_ranges_count - 1; i >= 0; i--) { + const struct mlxsw_sp_adj_grp_size_range *size_range; + + size_range = &mlxsw_sp->router->adj_grp_size_ranges[i]; + + if (alloc_size >= size_range->end) { + *p_adj_grp_size = size_range->end; + return; + } + } } static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp, @@ -3563,7 +3646,7 @@ static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp, /* Round up the requested group size to the next size supported * by the device and make sure the request can be satisfied. */ - mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size); + mlxsw_sp_adj_grp_size_round_up(mlxsw_sp, p_adj_grp_size); err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, *p_adj_grp_size, &alloc_size); @@ -3573,7 +3656,7 @@ static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp, * entries than requested. Try to use as much of them as * possible. */ - mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size); + mlxsw_sp_adj_grp_size_round_down(mlxsw_sp, p_adj_grp_size, alloc_size); return 0; } @@ -3681,9 +3764,29 @@ mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp, } static void +mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_nexthop *nh, + u16 bucket_index) +{ + struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp; + bool offload = false, trap = false; + + if (nh->offloaded) { + if (nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP) + trap = true; + else + offload = true; + } + nexthop_bucket_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id, + bucket_index, offload, trap); +} + +static void mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp) { + int i; + /* Do not update the flags if the nexthop group is being destroyed * since: * 1. The nexthop objects is being deleted, in which case the flags are @@ -3697,6 +3800,18 @@ mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp, nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id, nh_grp->nhgi->adj_index_valid, false); + + /* Update flags of individual nexthop buckets in case of a resilient + * nexthop group. + */ + if (!nh_grp->nhgi->is_resilient) + return; + + for (i = 0; i < nh_grp->nhgi->count; i++) { + struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i]; + + mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, i); + } } static void @@ -3750,6 +3865,10 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); goto set_trap; } + /* Flags of individual nexthop buckets might need to be + * updated. + */ + mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp); return 0; } mlxsw_sp_nexthop_group_normalize(nhgi); @@ -3832,10 +3951,15 @@ set_trap: static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh, bool removing) { - if (!removing) + if (!removing) { + nh->action = MLXSW_SP_NEXTHOP_ACTION_FORWARD; nh->should_offload = 1; - else + } else if (nh->nhgi->is_resilient) { + nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP; + nh->should_offload = 1; + } else { nh->should_offload = 0; + } nh->update = 1; } @@ -4250,6 +4374,85 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, } } +static void +mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_nexthop_group *nh_grp, + unsigned long *activity) +{ + char *ratrad_pl; + int i, err; + + ratrad_pl = kmalloc(MLXSW_REG_RATRAD_LEN, GFP_KERNEL); + if (!ratrad_pl) + return; + + mlxsw_reg_ratrad_pack(ratrad_pl, nh_grp->nhgi->adj_index, + nh_grp->nhgi->count); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratrad), ratrad_pl); + if (err) + goto out; + + for (i = 0; i < nh_grp->nhgi->count; i++) { + if (!mlxsw_reg_ratrad_activity_vector_get(ratrad_pl, i)) + continue; + bitmap_set(activity, i, 1); + } + +out: + kfree(ratrad_pl); +} + +#define MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL 1000 /* ms */ + +static void +mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_nexthop_group *nh_grp) +{ + unsigned long *activity; + + activity = bitmap_zalloc(nh_grp->nhgi->count, GFP_KERNEL); + if (!activity) + return; + + mlxsw_sp_nh_grp_activity_get(mlxsw_sp, nh_grp, activity); + nexthop_res_grp_activity_update(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id, + nh_grp->nhgi->count, activity); + + bitmap_free(activity); +} + +static void +mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp *mlxsw_sp) +{ + unsigned int interval = MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL; + + mlxsw_core_schedule_dw(&mlxsw_sp->router->nh_grp_activity_dw, + msecs_to_jiffies(interval)); +} + +static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work) +{ + struct mlxsw_sp_nexthop_group_info *nhgi; + struct mlxsw_sp_router *router; + bool reschedule = false; + + router = container_of(work, struct mlxsw_sp_router, + nh_grp_activity_dw.work); + + mutex_lock(&router->lock); + + list_for_each_entry(nhgi, &router->nh_res_grp_list, list) { + mlxsw_sp_nh_grp_activity_update(router->mlxsw_sp, nhgi->nh_grp); + reschedule = true; + } + + mutex_unlock(&router->lock); + + if (!reschedule) + return; + mlxsw_sp_nh_grp_activity_work_schedule(router->mlxsw_sp); +} + static int mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp, const struct nh_notifier_single_info *nh, @@ -4268,6 +4471,29 @@ mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp, } static int +mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp *mlxsw_sp, + const struct nh_notifier_single_info *nh, + struct netlink_ext_ack *extack) +{ + int err; + + err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, extack); + if (err) + return err; + + /* Device only nexthops with an IPIP device are programmed as + * encapsulating adjacency entries. + */ + if (!nh->gw_family && !nh->is_reject && + !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) { + NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway"); + return -EINVAL; + } + + return 0; +} + +static int mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp, const struct nh_notifier_grp_info *nh_grp, struct netlink_ext_ack *extack) @@ -4284,21 +4510,83 @@ mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp, int err; nh = &nh_grp->nh_entries[i].nh; - err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, - extack); + err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh, + extack); if (err) return err; + } - /* Device only nexthops with an IPIP device are programmed as - * encapsulating adjacency entries. - */ - if (!nh->gw_family && !nh->is_reject && - !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) { - NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway"); - return -EINVAL; + return 0; +} + +static int +mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp *mlxsw_sp, + const struct nh_notifier_res_table_info *nh_res_table, + struct netlink_ext_ack *extack) +{ + unsigned int alloc_size; + bool valid_size = false; + int err, i; + + if (nh_res_table->num_nh_buckets < 32) { + NL_SET_ERR_MSG_MOD(extack, "Minimum number of buckets is 32"); + return -EINVAL; + } + + for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) { + const struct mlxsw_sp_adj_grp_size_range *size_range; + + size_range = &mlxsw_sp->router->adj_grp_size_ranges[i]; + + if (nh_res_table->num_nh_buckets >= size_range->start && + nh_res_table->num_nh_buckets <= size_range->end) { + valid_size = true; + break; } } + if (!valid_size) { + NL_SET_ERR_MSG_MOD(extack, "Invalid number of buckets"); + return -EINVAL; + } + + err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp, + MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + nh_res_table->num_nh_buckets, + &alloc_size); + if (err || nh_res_table->num_nh_buckets != alloc_size) { + NL_SET_ERR_MSG_MOD(extack, "Number of buckets does not fit allocation size of any KVDL partition"); + return -EINVAL; + } + + return 0; +} + +static int +mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp *mlxsw_sp, + const struct nh_notifier_res_table_info *nh_res_table, + struct netlink_ext_ack *extack) +{ + int err; + u16 i; + + err = mlxsw_sp_nexthop_obj_res_group_size_validate(mlxsw_sp, + nh_res_table, + extack); + if (err) + return err; + + for (i = 0; i < nh_res_table->num_nh_buckets; i++) { + const struct nh_notifier_single_info *nh; + int err; + + nh = &nh_res_table->nhs[i]; + err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh, + extack); + if (err) + return err; + } + return 0; } @@ -4306,7 +4594,11 @@ static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp, unsigned long event, struct nh_notifier_info *info) { - if (event != NEXTHOP_EVENT_REPLACE) + struct nh_notifier_single_info *nh; + + if (event != NEXTHOP_EVENT_REPLACE && + event != NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE && + event != NEXTHOP_EVENT_BUCKET_REPLACE) return 0; switch (info->type) { @@ -4317,6 +4609,14 @@ static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp, return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp, info->nh_grp, info->extack); + case NH_NOTIFIER_INFO_TYPE_RES_TABLE: + return mlxsw_sp_nexthop_obj_res_group_validate(mlxsw_sp, + info->nh_res_table, + info->extack); + case NH_NOTIFIER_INFO_TYPE_RES_BUCKET: + nh = &info->nh_res_bucket->new_nh; + return mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh, + info->extack); default: NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type"); return -EOPNOTSUPP; @@ -4334,6 +4634,7 @@ static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp, return info->nh->gw_family || info->nh->is_reject || mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL); case NH_NOTIFIER_INFO_TYPE_GRP: + case NH_NOTIFIER_INFO_TYPE_RES_TABLE: /* Already validated earlier. */ return true; default: @@ -4346,7 +4647,7 @@ static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp, { u16 lb_rif_index = mlxsw_sp->router->lb_rif_index; - nh->discard = 1; + nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD; nh->should_offload = 1; /* While nexthops that discard packets do not forward packets * via an egress RIF, they still need to be programmed using a @@ -4398,6 +4699,15 @@ mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp, if (nh_obj->is_reject) mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh); + /* In a resilient nexthop group, all the nexthops must be written to + * the adjacency table. Even if they do not have a valid neighbour or + * RIF. + */ + if (nh_grp->nhgi->is_resilient && !nh->should_offload) { + nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP; + nh->should_offload = 1; + } + return 0; err_type_init: @@ -4409,11 +4719,12 @@ err_type_init: static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh) { - if (nh->discard) + if (nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD) mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh); mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh); list_del(&nh->router_list_node); mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh); + nh->should_offload = 0; } static int @@ -4423,6 +4734,7 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_nexthop_group_info *nhgi; struct mlxsw_sp_nexthop *nh; + bool is_resilient = false; unsigned int nhs; int err, i; @@ -4433,6 +4745,10 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp, case NH_NOTIFIER_INFO_TYPE_GRP: nhs = info->nh_grp->num_nh; break; + case NH_NOTIFIER_INFO_TYPE_RES_TABLE: + nhs = info->nh_res_table->num_nh_buckets; + is_resilient = true; + break; default: return -EINVAL; } @@ -4443,6 +4759,7 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp, nh_grp->nhgi = nhgi; nhgi->nh_grp = nh_grp; nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info); + nhgi->is_resilient = is_resilient; nhgi->count = nhs; for (i = 0; i < nhgi->count; i++) { struct nh_notifier_single_info *nh_obj; @@ -4458,6 +4775,10 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp, nh_obj = &info->nh_grp->nh_entries[i].nh; weight = info->nh_grp->nh_entries[i].weight; break; + case NH_NOTIFIER_INFO_TYPE_RES_TABLE: + nh_obj = &info->nh_res_table->nhs[i]; + weight = 1; + break; default: err = -EINVAL; goto err_nexthop_obj_init; @@ -4473,6 +4794,15 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp, goto err_group_refresh; } + /* Add resilient nexthop groups to a list so that the activity of their + * nexthop buckets will be periodically queried and cleared. + */ + if (nhgi->is_resilient) { + if (list_empty(&mlxsw_sp->router->nh_res_grp_list)) + mlxsw_sp_nh_grp_activity_work_schedule(mlxsw_sp); + list_add(&nhgi->list, &mlxsw_sp->router->nh_res_grp_list); + } + return 0; err_group_refresh: @@ -4491,8 +4821,15 @@ mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp) { struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi; + struct mlxsw_sp_router *router = mlxsw_sp->router; int i; + if (nhgi->is_resilient) { + list_del(&nhgi->list); + if (list_empty(&mlxsw_sp->router->nh_res_grp_list)) + cancel_delayed_work(&router->nh_grp_activity_dw); + } + for (i = nhgi->count - 1; i >= 0; i--) { struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i]; @@ -4685,6 +5022,136 @@ static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp); } +static int mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp *mlxsw_sp, + u32 adj_index, char *ratr_pl) +{ + MLXSW_REG_ZERO(ratr, ratr_pl); + mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ); + mlxsw_reg_ratr_adjacency_index_low_set(ratr_pl, adj_index); + mlxsw_reg_ratr_adjacency_index_high_set(ratr_pl, adj_index >> 16); + + return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); +} + +static int mlxsw_sp_nexthop_obj_bucket_compare(char *ratr_pl, char *ratr_pl_new) +{ + /* Clear the opcode and activity on both the old and new payload as + * they are irrelevant for the comparison. + */ + mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ); + mlxsw_reg_ratr_a_set(ratr_pl, 0); + mlxsw_reg_ratr_op_set(ratr_pl_new, MLXSW_REG_RATR_OP_QUERY_READ); + mlxsw_reg_ratr_a_set(ratr_pl_new, 0); + + /* If the contents of the adjacency entry are consistent with the + * replacement request, then replacement was successful. + */ + if (!memcmp(ratr_pl, ratr_pl_new, MLXSW_REG_RATR_LEN)) + return 0; + + return -EINVAL; +} + +static int +mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh, + struct nh_notifier_info *info) +{ + u16 bucket_index = info->nh_res_bucket->bucket_index; + struct netlink_ext_ack *extack = info->extack; + bool force = info->nh_res_bucket->force; + char ratr_pl_new[MLXSW_REG_RATR_LEN]; + char ratr_pl[MLXSW_REG_RATR_LEN]; + u32 adj_index; + int err; + + /* No point in trying an atomic replacement if the idle timer interval + * is smaller than the interval in which we query and clear activity. + */ + if (!force && info->nh_res_bucket->idle_timer_ms < + MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL) + force = true; + + adj_index = nh->nhgi->adj_index + bucket_index; + err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, force, ratr_pl); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to overwrite nexthop bucket"); + return err; + } + + if (!force) { + err = mlxsw_sp_nexthop_obj_bucket_query(mlxsw_sp, adj_index, + ratr_pl_new); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to query nexthop bucket state after replacement. State might be inconsistent"); + return err; + } + + err = mlxsw_sp_nexthop_obj_bucket_compare(ratr_pl, ratr_pl_new); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket was not replaced because it was active during replacement"); + return err; + } + } + + nh->update = 0; + nh->offloaded = 1; + mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, bucket_index); + + return 0; +} + +static int mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp *mlxsw_sp, + struct nh_notifier_info *info) +{ + u16 bucket_index = info->nh_res_bucket->bucket_index; + struct netlink_ext_ack *extack = info->extack; + struct mlxsw_sp_nexthop_group_info *nhgi; + struct nh_notifier_single_info *nh_obj; + struct mlxsw_sp_nexthop_group *nh_grp; + struct mlxsw_sp_nexthop *nh; + int err; + + nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id); + if (!nh_grp) { + NL_SET_ERR_MSG_MOD(extack, "Nexthop group was not found"); + return -EINVAL; + } + + nhgi = nh_grp->nhgi; + + if (bucket_index >= nhgi->count) { + NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket index out of range"); + return -EINVAL; + } + + nh = &nhgi->nexthops[bucket_index]; + mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh); + + nh_obj = &info->nh_res_bucket->new_nh; + err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to initialize nexthop object for nexthop bucket replacement"); + goto err_nexthop_obj_init; + } + + err = mlxsw_sp_nexthop_obj_bucket_adj_update(mlxsw_sp, nh, info); + if (err) + goto err_nexthop_obj_bucket_adj_update; + + return 0; + +err_nexthop_obj_bucket_adj_update: + mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh); +err_nexthop_obj_init: + nh_obj = &info->nh_res_bucket->old_nh; + mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1); + /* The old adjacency entry was not overwritten */ + nh->update = 0; + nh->offloaded = 1; + return err; +} + static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb, unsigned long event, void *ptr) { @@ -4699,8 +5166,6 @@ static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb, mutex_lock(&router->lock); - ASSERT_RTNL(); - switch (event) { case NEXTHOP_EVENT_REPLACE: err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info); @@ -4708,6 +5173,10 @@ static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb, case NEXTHOP_EVENT_DEL: mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info); break; + case NEXTHOP_EVENT_BUCKET_REPLACE: + err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp, + info); + break; default: break; } @@ -7667,7 +8136,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, int i, err; type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev); - ops = mlxsw_sp->rif_ops_arr[type]; + ops = mlxsw_sp->router->rif_ops_arr[type]; vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack); if (IS_ERR(vr)) @@ -8865,7 +9334,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = { .deconfigure = mlxsw_sp1_rif_ipip_lb_deconfigure, }; -const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = { +static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = { [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops, [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops, [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops, @@ -9050,7 +9519,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = { .deconfigure = mlxsw_sp2_rif_ipip_lb_deconfigure, }; -const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = { +static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = { [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops, [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops, [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops, @@ -9302,6 +9771,36 @@ static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp) mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->router->lb_rif_index); } +static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp) +{ + size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp1_adj_grp_size_ranges); + + mlxsw_sp->router->rif_ops_arr = mlxsw_sp1_rif_ops_arr; + mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp1_adj_grp_size_ranges; + mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count; + + return 0; +} + +const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = { + .init = mlxsw_sp1_router_init, +}; + +static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp) +{ + size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp2_adj_grp_size_ranges); + + mlxsw_sp->router->rif_ops_arr = mlxsw_sp2_rif_ops_arr; + mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp2_adj_grp_size_ranges; + mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count; + + return 0; +} + +const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = { + .init = mlxsw_sp2_router_init, +}; + int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, struct netlink_ext_ack *extack) { @@ -9315,6 +9814,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, mlxsw_sp->router = router; router->mlxsw_sp = mlxsw_sp; + err = mlxsw_sp->router_ops->init(mlxsw_sp); + if (err) + goto err_router_ops_init; + err = mlxsw_sp_router_xm_init(mlxsw_sp); if (err) goto err_xm_init; @@ -9328,6 +9831,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, if (err) goto err_ll_op_ctx_init; + INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list); + INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw, + mlxsw_sp_nh_grp_activity_work); + INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list); err = __mlxsw_sp_router_init(mlxsw_sp); if (err) @@ -9451,10 +9958,12 @@ err_ipips_init: err_rifs_init: __mlxsw_sp_router_fini(mlxsw_sp); err_router_init: + cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw); mlxsw_sp_router_ll_op_ctx_fini(router); err_ll_op_ctx_init: mlxsw_sp_router_xm_fini(mlxsw_sp); err_xm_init: +err_router_ops_init: mutex_destroy(&mlxsw_sp->router->lock); kfree(mlxsw_sp->router); return err; @@ -9481,6 +9990,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) mlxsw_sp_ipips_fini(mlxsw_sp); mlxsw_sp_rifs_fini(mlxsw_sp); __mlxsw_sp_router_fini(mlxsw_sp); + cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw); mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router); mlxsw_sp_router_xm_fini(mlxsw_sp); mutex_destroy(&mlxsw_sp->router->lock); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 2875ee8ec537..be7708a375e1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -78,6 +78,10 @@ struct mlxsw_sp_router { struct mlxsw_sp_fib_entry_op_ctx *ll_op_ctx; u16 lb_rif_index; struct mlxsw_sp_router_xm *xm; + const struct mlxsw_sp_adj_grp_size_range *adj_grp_size_ranges; + size_t adj_grp_size_ranges_count; + struct delayed_work nh_grp_activity_dw; + struct list_head nh_res_grp_list; }; struct mlxsw_sp_fib_entry_priv { @@ -195,20 +199,20 @@ mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_ipip_entry *except); struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router, struct mlxsw_sp_nexthop *nh); -bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh); +bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh); unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh); int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index, u32 *p_adj_size, u32 *p_adj_hash_index); struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh); bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh); -bool mlxsw_sp_nexthop_is_discard(const struct mlxsw_sp_nexthop *nh); #define mlxsw_sp_nexthop_for_each(nh, router) \ for (nh = mlxsw_sp_nexthop_next(router, NULL); nh; \ nh = mlxsw_sp_nexthop_next(router, nh)) int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh, u64 *p_counter); -int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, - struct mlxsw_sp_nexthop *nh); +int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, + struct mlxsw_sp_nexthop *nh, bool force, + char *ratr_pl); void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh); void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 1892cea05ee7..3398cc01e5ec 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -186,6 +186,7 @@ mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry, /* Create a new port analayzer entry for local_port. */ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); + mlxsw_reg_mpat_session_id_set(mpat_pl, sparms.session_id); mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); @@ -203,6 +204,7 @@ mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry, int pa_id = span_entry->id; mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type); + mlxsw_reg_mpat_session_id_set(mpat_pl, span_entry->parms.session_id); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); } @@ -938,7 +940,8 @@ mlxsw_sp_span_entry_find_by_parms(struct mlxsw_sp *mlxsw_sp, if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev && curr->parms.policer_enable == sparms->policer_enable && - curr->parms.policer_id == sparms->policer_id) + curr->parms.policer_id == sparms->policer_id && + curr->parms.session_id == sparms->session_id) return curr; } return NULL; @@ -1085,6 +1088,7 @@ int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id, sparms.policer_id = parms->policer_id; sparms.policer_enable = parms->policer_enable; + sparms.session_id = parms->session_id; span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms); if (!span_entry) return -ENOBUFS; @@ -1227,8 +1231,12 @@ __mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span *span, return -EINVAL; } + if (trigger_entry->parms.probability_rate > MLXSW_REG_MPAR_RATE_MAX) + return -EINVAL; + mlxsw_reg_mpar_pack(mpar_pl, trigger_entry->local_port, i_e, enable, - trigger_entry->parms.span_id); + trigger_entry->parms.span_id, + trigger_entry->parms.probability_rate); return mlxsw_reg_write(span->mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); } @@ -1362,8 +1370,11 @@ mlxsw_sp2_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry * return -EINVAL; } + if (trigger_entry->parms.probability_rate > MLXSW_REG_MPAGR_RATE_MAX) + return -EINVAL; + mlxsw_reg_mpagr_pack(mpagr_pl, trigger, trigger_entry->parms.span_id, - 1); + trigger_entry->parms.probability_rate); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpagr), mpagr_pl); } @@ -1561,7 +1572,9 @@ int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp, trigger, mlxsw_sp_port); if (trigger_entry) { - if (trigger_entry->parms.span_id != parms->span_id) + if (trigger_entry->parms.span_id != parms->span_id || + trigger_entry->parms.probability_rate != + parms->probability_rate) return -EINVAL; refcount_inc(&trigger_entry->ref_count); goto out; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h index aa1cd409c0e2..efaefd1ae863 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h @@ -13,6 +13,19 @@ struct mlxsw_sp; struct mlxsw_sp_port; +/* SPAN session identifiers that correspond to MLXSW_TRAP_ID_MIRROR_SESSION<i> + * trap identifiers. The session identifier is an attribute of the SPAN agent, + * which determines the trap identifier of packets that are mirrored to the + * CPU. Packets that are trapped to the CPU for the same logical reason (e.g., + * buffer drops) should use the same session identifier. + */ +enum mlxsw_sp_span_session_id { + MLXSW_SP_SPAN_SESSION_ID_BUFFER, + MLXSW_SP_SPAN_SESSION_ID_SAMPLING, + + __MLXSW_SP_SPAN_SESSION_ID_MAX = 8, +}; + struct mlxsw_sp_span_parms { struct mlxsw_sp_port *dest_port; /* NULL for unoffloaded SPAN. */ unsigned int ttl; @@ -23,6 +36,7 @@ struct mlxsw_sp_span_parms { u16 vid; u16 policer_id; bool policer_enable; + enum mlxsw_sp_span_session_id session_id; }; enum mlxsw_sp_span_trigger { @@ -35,12 +49,14 @@ enum mlxsw_sp_span_trigger { struct mlxsw_sp_span_trigger_parms { int span_id; + u32 probability_rate; }; struct mlxsw_sp_span_agent_parms { const struct net_device *to_dev; u16 policer_id; bool policer_enable; + enum mlxsw_sp_span_session_id session_id; }; struct mlxsw_sp_span_entry_ops; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 23b7e8d6386b..eeccd586e781 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -98,6 +98,10 @@ struct mlxsw_sp_bridge_ops { const struct mlxsw_sp_fid *fid); }; +struct mlxsw_sp_switchdev_ops { + void (*init)(struct mlxsw_sp *mlxsw_sp); +}; + static int mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_bridge_port *bridge_port, @@ -2296,7 +2300,7 @@ mlxsw_sp_bridge_8021ad_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, vid, ETH_P_8021AD, extack); } -static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021ad_ops = { +static const struct mlxsw_sp_bridge_ops mlxsw_sp1_bridge_8021ad_ops = { .port_join = mlxsw_sp_bridge_8021ad_port_join, .port_leave = mlxsw_sp_bridge_8021ad_port_leave, .vxlan_join = mlxsw_sp_bridge_8021ad_vxlan_join, @@ -2305,6 +2309,53 @@ static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021ad_ops = { .fid_vid = mlxsw_sp_bridge_8021q_fid_vid, }; +static int +mlxsw_sp2_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device, + struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_port *mlxsw_sp_port, + struct netlink_ext_ack *extack) +{ + int err; + + /* The EtherType of decapsulated packets is determined at the egress + * port to allow 802.1d and 802.1ad bridges with VXLAN devices to + * co-exist. + */ + err = mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021AD); + if (err) + return err; + + err = mlxsw_sp_bridge_8021ad_port_join(bridge_device, bridge_port, + mlxsw_sp_port, extack); + if (err) + goto err_bridge_8021ad_port_join; + + return 0; + +err_bridge_8021ad_port_join: + mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q); + return err; +} + +static void +mlxsw_sp2_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device, + struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_port *mlxsw_sp_port) +{ + mlxsw_sp_bridge_8021ad_port_leave(bridge_device, bridge_port, + mlxsw_sp_port); + mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q); +} + +static const struct mlxsw_sp_bridge_ops mlxsw_sp2_bridge_8021ad_ops = { + .port_join = mlxsw_sp2_bridge_8021ad_port_join, + .port_leave = mlxsw_sp2_bridge_8021ad_port_leave, + .vxlan_join = mlxsw_sp_bridge_8021ad_vxlan_join, + .fid_get = mlxsw_sp_bridge_8021q_fid_get, + .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup, + .fid_vid = mlxsw_sp_bridge_8021q_fid_vid, +}; + int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *brport_dev, struct net_device *br_dev, @@ -2865,7 +2916,8 @@ mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work * return; if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE && - !switchdev_work->fdb_info.added_by_user) + (!switchdev_work->fdb_info.added_by_user || + switchdev_work->fdb_info.is_local)) return; if (!netif_running(dev)) @@ -2920,7 +2972,7 @@ static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work) switch (switchdev_work->event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: fdb_info = &switchdev_work->fdb_info; - if (!fdb_info->added_by_user) + if (!fdb_info->added_by_user || fdb_info->is_local) break; err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true); if (err) @@ -3535,6 +3587,24 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp) unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier); } +static void mlxsw_sp1_switchdev_init(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp1_bridge_8021ad_ops; +} + +const struct mlxsw_sp_switchdev_ops mlxsw_sp1_switchdev_ops = { + .init = mlxsw_sp1_switchdev_init, +}; + +static void mlxsw_sp2_switchdev_init(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp2_bridge_8021ad_ops; +} + +const struct mlxsw_sp_switchdev_ops mlxsw_sp2_switchdev_ops = { + .init = mlxsw_sp2_switchdev_init, +}; + int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) { struct mlxsw_sp_bridge *bridge; @@ -3549,7 +3619,8 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops; bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops; - bridge->bridge_8021ad_ops = &mlxsw_sp_bridge_8021ad_ops; + + mlxsw_sp->switchdev_ops->init(mlxsw_sp); return mlxsw_sp_fdb_init(mlxsw_sp); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 4ef12e3e021a..26d01adbedad 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -49,8 +49,14 @@ enum { #define MLXSW_SP_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT enum { + /* Packet was mirrored from ingress. */ + MLXSW_SP_MIRROR_REASON_INGRESS = 1, + /* Packet was mirrored from policy engine. */ + MLXSW_SP_MIRROR_REASON_POLICY_ENGINE = 2, /* Packet was early dropped. */ MLXSW_SP_MIRROR_REASON_INGRESS_WRED = 9, + /* Packet was mirrored from egress. */ + MLXSW_SP_MIRROR_REASON_EGRESS = 14, }; static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, @@ -106,7 +112,7 @@ static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port, static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u8 local_port, void *trap_ctx) { - u32 cookie_index = mlxsw_skb_cb(skb)->cookie_index; + u32 cookie_index = mlxsw_skb_cb(skb)->rx_md_info.cookie_index; const struct flow_action_cookie *fa_cookie; struct devlink_port *in_devlink_port; struct mlxsw_sp_port *mlxsw_sp_port; @@ -202,21 +208,175 @@ static void mlxsw_sp_rx_ptp_listener(struct sk_buff *skb, u8 local_port, mlxsw_sp_ptp_receive(mlxsw_sp, skb, local_port); } +static struct mlxsw_sp_port * +mlxsw_sp_sample_tx_port_get(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_rx_md_info *rx_md_info) +{ + u8 local_port; + + if (!rx_md_info->tx_port_valid) + return NULL; + + if (rx_md_info->tx_port_is_lag) + local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core, + rx_md_info->tx_lag_id, + rx_md_info->tx_lag_port_index); + else + local_port = rx_md_info->tx_sys_port; + + if (local_port >= mlxsw_core_max_ports(mlxsw_sp->core)) + return NULL; + + return mlxsw_sp->ports[local_port]; +} + +/* The latency units are determined according to MOGCR.mirror_latency_units. It + * defaults to 64 nanoseconds. + */ +#define MLXSW_SP_MIRROR_LATENCY_SHIFT 6 + +static void mlxsw_sp_psample_md_init(struct mlxsw_sp *mlxsw_sp, + struct psample_metadata *md, + struct sk_buff *skb, int in_ifindex, + bool truncate, u32 trunc_size) +{ + struct mlxsw_rx_md_info *rx_md_info = &mlxsw_skb_cb(skb)->rx_md_info; + struct mlxsw_sp_port *mlxsw_sp_port; + + md->trunc_size = truncate ? trunc_size : skb->len; + md->in_ifindex = in_ifindex; + mlxsw_sp_port = mlxsw_sp_sample_tx_port_get(mlxsw_sp, rx_md_info); + md->out_ifindex = mlxsw_sp_port && mlxsw_sp_port->dev ? + mlxsw_sp_port->dev->ifindex : 0; + md->out_tc_valid = rx_md_info->tx_tc_valid; + md->out_tc = rx_md_info->tx_tc; + md->out_tc_occ_valid = rx_md_info->tx_congestion_valid; + md->out_tc_occ = rx_md_info->tx_congestion; + md->latency_valid = rx_md_info->latency_valid; + md->latency = rx_md_info->latency; + md->latency <<= MLXSW_SP_MIRROR_LATENCY_SHIFT; +} + static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u8 local_port, void *trap_ctx) { struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx); + struct mlxsw_sp_sample_trigger trigger; + struct mlxsw_sp_sample_params *params; + struct mlxsw_sp_port *mlxsw_sp_port; + struct psample_metadata md = {}; + int err; + + err = __mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx); + if (err) + return; + + mlxsw_sp_port = mlxsw_sp->ports[local_port]; + if (!mlxsw_sp_port) + goto out; + + trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS; + trigger.local_port = local_port; + params = mlxsw_sp_sample_trigger_params_lookup(mlxsw_sp, &trigger); + if (!params) + goto out; + + /* The psample module expects skb->data to point to the start of the + * Ethernet header. + */ + skb_push(skb, ETH_HLEN); + mlxsw_sp_psample_md_init(mlxsw_sp, &md, skb, + mlxsw_sp_port->dev->ifindex, params->truncate, + params->trunc_size); + psample_sample_packet(params->psample_group, skb, params->rate, &md); +out: + consume_skb(skb); +} + +static void mlxsw_sp_rx_sample_tx_listener(struct sk_buff *skb, u8 local_port, + void *trap_ctx) +{ + struct mlxsw_rx_md_info *rx_md_info = &mlxsw_skb_cb(skb)->rx_md_info; + struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx); + struct mlxsw_sp_port *mlxsw_sp_port, *mlxsw_sp_port_tx; + struct mlxsw_sp_sample_trigger trigger; + struct mlxsw_sp_sample_params *params; + struct psample_metadata md = {}; + int err; + + /* Locally generated packets are not reported from the policy engine + * trigger, so do not report them from the egress trigger as well. + */ + if (local_port == MLXSW_PORT_CPU_PORT) + goto out; + + err = __mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx); + if (err) + return; + + mlxsw_sp_port = mlxsw_sp->ports[local_port]; + if (!mlxsw_sp_port) + goto out; + + /* Packet was sampled from Tx, so we need to retrieve the sample + * parameters based on the Tx port and not the Rx port. + */ + mlxsw_sp_port_tx = mlxsw_sp_sample_tx_port_get(mlxsw_sp, rx_md_info); + if (!mlxsw_sp_port_tx) + goto out; + + trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS; + trigger.local_port = mlxsw_sp_port_tx->local_port; + params = mlxsw_sp_sample_trigger_params_lookup(mlxsw_sp, &trigger); + if (!params) + goto out; + + /* The psample module expects skb->data to point to the start of the + * Ethernet header. + */ + skb_push(skb, ETH_HLEN); + mlxsw_sp_psample_md_init(mlxsw_sp, &md, skb, + mlxsw_sp_port->dev->ifindex, params->truncate, + params->trunc_size); + psample_sample_packet(params->psample_group, skb, params->rate, &md); +out: + consume_skb(skb); +} + +static void mlxsw_sp_rx_sample_acl_listener(struct sk_buff *skb, u8 local_port, + void *trap_ctx) +{ + struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx); + struct mlxsw_sp_sample_trigger trigger = { + .type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_POLICY_ENGINE, + }; + struct mlxsw_sp_sample_params *params; + struct mlxsw_sp_port *mlxsw_sp_port; + struct psample_metadata md = {}; int err; err = __mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx); if (err) return; - /* The sample handler expects skb->data to point to the start of the + mlxsw_sp_port = mlxsw_sp->ports[local_port]; + if (!mlxsw_sp_port) + goto out; + + params = mlxsw_sp_sample_trigger_params_lookup(mlxsw_sp, &trigger); + if (!params) + goto out; + + /* The psample module expects skb->data to point to the start of the * Ethernet header. */ skb_push(skb, ETH_HLEN); - mlxsw_sp_sample_receive(mlxsw_sp, skb, local_port); + mlxsw_sp_psample_md_init(mlxsw_sp, &md, skb, + mlxsw_sp_port->dev->ifindex, params->truncate, + params->trunc_size); + psample_sample_packet(params->psample_group, skb, params->rate, &md); +out: + consume_skb(skb); } #define MLXSW_SP_TRAP_DROP(_id, _group_id) \ @@ -464,11 +624,6 @@ static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = { .priority = 2, }, { - .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_SAMPLE, 0), - .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PKT_SAMPLE, - .priority = 0, - }, - { .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_TRAP, 18), .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_FLOW_LOGGING, .priority = 4, @@ -993,14 +1148,6 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = { }, }, { - .trap = MLXSW_SP_TRAP_CONTROL(FLOW_ACTION_SAMPLE, ACL_SAMPLE, - MIRROR), - .listeners_arr = { - MLXSW_RXL(mlxsw_sp_rx_sample_listener, PKT_SAMPLE, - MIRROR_TO_CPU, false, SP_PKT_SAMPLE, DISCARD), - }, - }, - { .trap = MLXSW_SP_TRAP_CONTROL(FLOW_ACTION_TRAP, ACL_TRAP, TRAP), .listeners_arr = { MLXSW_SP_RXL_NO_MARK(ACL0, FLOW_LOGGING, TRAP_TO_CPU, @@ -1709,10 +1856,23 @@ int mlxsw_sp_trap_group_policer_hw_id_get(struct mlxsw_sp *mlxsw_sp, u16 id, static const struct mlxsw_sp_trap_group_item mlxsw_sp1_trap_group_items_arr[] = { + { + .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_SAMPLE, 0), + .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PKT_SAMPLE, + .priority = 0, + }, }; static const struct mlxsw_sp_trap_item mlxsw_sp1_trap_items_arr[] = { + { + .trap = MLXSW_SP_TRAP_CONTROL(FLOW_ACTION_SAMPLE, ACL_SAMPLE, + MIRROR), + .listeners_arr = { + MLXSW_RXL(mlxsw_sp_rx_sample_listener, PKT_SAMPLE, + MIRROR_TO_CPU, false, SP_PKT_SAMPLE, DISCARD), + }, + }, }; static int @@ -1749,6 +1909,12 @@ mlxsw_sp2_trap_group_items_arr[] = { .priority = 0, .fixed_policer = true, }, + { + .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_SAMPLE, 0), + .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PKT_SAMPLE, + .priority = 0, + .fixed_policer = true, + }, }; static const struct mlxsw_sp_trap_item @@ -1760,6 +1926,21 @@ mlxsw_sp2_trap_items_arr[] = { }, .is_source = true, }, + { + .trap = MLXSW_SP_TRAP_CONTROL(FLOW_ACTION_SAMPLE, ACL_SAMPLE, + MIRROR), + .listeners_arr = { + MLXSW_RXL_MIRROR(mlxsw_sp_rx_sample_listener, 1, + SP_PKT_SAMPLE, + MLXSW_SP_MIRROR_REASON_INGRESS), + MLXSW_RXL_MIRROR(mlxsw_sp_rx_sample_tx_listener, 1, + SP_PKT_SAMPLE, + MLXSW_SP_MIRROR_REASON_EGRESS), + MLXSW_RXL_MIRROR(mlxsw_sp_rx_sample_acl_listener, 1, + SP_PKT_SAMPLE, + MLXSW_SP_MIRROR_REASON_POLICY_ENGINE), + }, + }, }; static int diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c index 2feed6ce19d3..13eef6e9bd2d 100644 --- a/drivers/net/ethernet/micrel/ks8851_common.c +++ b/drivers/net/ethernet/micrel/ks8851_common.c @@ -193,11 +193,10 @@ static void ks8851_read_mac_addr(struct net_device *dev) static void ks8851_init_mac(struct ks8851_net *ks, struct device_node *np) { struct net_device *dev = ks->netdev; - const u8 *mac_addr; + int ret; - mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) { - ether_addr_copy(dev->dev_addr, mac_addr); + ret = of_get_mac_address(np, dev->dev_addr); + if (!ret) { ks8851_write_mac_addr(dev); return; } diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c index 2c0dcd7acf3f..3658c4ae3c37 100644 --- a/drivers/net/ethernet/microchip/encx24j600.c +++ b/drivers/net/ethernet/microchip/encx24j600.c @@ -222,7 +222,6 @@ static int encx24j600_wait_for_autoneg(struct encx24j600_priv *priv) unsigned long timeout = jiffies + msecs_to_jiffies(2000); u16 phstat1; u16 estat; - int ret = 0; phstat1 = encx24j600_read_phy(priv, PHSTAT1); while ((phstat1 & ANDONE) == 0) { @@ -258,7 +257,7 @@ static int encx24j600_wait_for_autoneg(struct encx24j600_priv *priv) encx24j600_write_reg(priv, MACLCON, 0x370f); } - return ret; + return 0; } /* Access the PHY to determine link status */ @@ -1118,17 +1117,7 @@ static struct spi_driver encx24j600_spi_net_driver = { .id_table = encx24j600_spi_id_table, }; -static int __init encx24j600_init(void) -{ - return spi_register_driver(&encx24j600_spi_net_driver); -} -module_init(encx24j600_init); - -static void encx24j600_exit(void) -{ - spi_unregister_driver(&encx24j600_spi_net_driver); -} -module_exit(encx24j600_exit); +module_spi_driver(encx24j600_spi_net_driver); MODULE_DESCRIPTION(DRV_NAME " ethernet driver"); MODULE_AUTHOR("Jon Ringle <jringle@gridpoint.com>"); diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index c5de8f46cdd3..91a755efe2e6 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -730,8 +730,8 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev, static int lan743x_ethtool_set_eee(struct net_device *netdev, struct ethtool_eee *eee) { - struct lan743x_adapter *adapter = netdev_priv(netdev); - struct phy_device *phydev = NULL; + struct lan743x_adapter *adapter; + struct phy_device *phydev; u32 buf = 0; int ret = 0; diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 7b6794aa8ea9..dae10328c6cf 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -2771,7 +2771,6 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev, { struct lan743x_adapter *adapter = NULL; struct net_device *netdev = NULL; - const void *mac_addr; int ret = -ENODEV; netdev = devm_alloc_etherdev(&pdev->dev, @@ -2788,9 +2787,7 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev, NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED; netdev->max_mtu = LAN743X_MAX_FRAME_SIZE; - mac_addr = of_get_mac_address(pdev->dev.of_node); - if (!IS_ERR(mac_addr)) - ether_addr_copy(adapter->mac_address, mac_addr); + of_get_mac_address(pdev->dev.of_node, adapter->mac_address); ret = lan743x_pci_init(adapter, pdev); if (ret) @@ -3004,7 +3001,7 @@ static int lan743x_pm_suspend(struct device *dev) lan743x_pm_set_wol(adapter); /* Host sets PME_En, put D3hot */ - return pci_prepare_to_sleep(pdev);; + return pci_prepare_to_sleep(pdev); } static int lan743x_pm_resume(struct device *dev) diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig new file mode 100644 index 000000000000..fe4e7a7d9c0b --- /dev/null +++ b/drivers/net/ethernet/microsoft/Kconfig @@ -0,0 +1,29 @@ +# +# Microsoft Azure network device configuration +# + +config NET_VENDOR_MICROSOFT + bool "Microsoft Network Devices" + default y + help + If you have a network (Ethernet) device belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip the + question about Microsoft network devices. If you say Y, you will be + asked for your specific device in the following question. + +if NET_VENDOR_MICROSOFT + +config MICROSOFT_MANA + tristate "Microsoft Azure Network Adapter (MANA) support" + depends on PCI_MSI && X86_64 + depends on PCI_HYPERV + help + This driver supports Microsoft Azure Network Adapter (MANA). + So far, the driver is only supported on X86_64. + + To compile this driver as a module, choose M here. + The module will be called mana. + +endif #NET_VENDOR_MICROSOFT diff --git a/drivers/net/ethernet/microsoft/Makefile b/drivers/net/ethernet/microsoft/Makefile new file mode 100644 index 000000000000..d2ddc218135f --- /dev/null +++ b/drivers/net/ethernet/microsoft/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Microsoft Azure network device driver. +# + +obj-$(CONFIG_MICROSOFT_MANA) += mana/ diff --git a/drivers/net/ethernet/microsoft/mana/Makefile b/drivers/net/ethernet/microsoft/mana/Makefile new file mode 100644 index 000000000000..0edd5bb685f3 --- /dev/null +++ b/drivers/net/ethernet/microsoft/mana/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# +# Makefile for the Microsoft Azure Network Adapter driver + +obj-$(CONFIG_MICROSOFT_MANA) += mana.o +mana-objs := gdma_main.o shm_channel.o hw_channel.o mana_en.o mana_ethtool.o diff --git a/drivers/net/ethernet/microsoft/mana/gdma.h b/drivers/net/ethernet/microsoft/mana/gdma.h new file mode 100644 index 000000000000..33e53d32e891 --- /dev/null +++ b/drivers/net/ethernet/microsoft/mana/gdma.h @@ -0,0 +1,673 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright (c) 2021, Microsoft Corporation. */ + +#ifndef _GDMA_H +#define _GDMA_H + +#include <linux/dma-mapping.h> +#include <linux/netdevice.h> + +#include "shm_channel.h" + +/* Structures labeled with "HW DATA" are exchanged with the hardware. All of + * them are naturally aligned and hence don't need __packed. + */ + +enum gdma_request_type { + GDMA_VERIFY_VF_DRIVER_VERSION = 1, + GDMA_QUERY_MAX_RESOURCES = 2, + GDMA_LIST_DEVICES = 3, + GDMA_REGISTER_DEVICE = 4, + GDMA_DEREGISTER_DEVICE = 5, + GDMA_GENERATE_TEST_EQE = 10, + GDMA_CREATE_QUEUE = 12, + GDMA_DISABLE_QUEUE = 13, + GDMA_CREATE_DMA_REGION = 25, + GDMA_DMA_REGION_ADD_PAGES = 26, + GDMA_DESTROY_DMA_REGION = 27, +}; + +enum gdma_queue_type { + GDMA_INVALID_QUEUE, + GDMA_SQ, + GDMA_RQ, + GDMA_CQ, + GDMA_EQ, +}; + +enum gdma_work_request_flags { + GDMA_WR_NONE = 0, + GDMA_WR_OOB_IN_SGL = BIT(0), + GDMA_WR_PAD_BY_SGE0 = BIT(1), +}; + +enum gdma_eqe_type { + GDMA_EQE_COMPLETION = 3, + GDMA_EQE_TEST_EVENT = 64, + GDMA_EQE_HWC_INIT_EQ_ID_DB = 129, + GDMA_EQE_HWC_INIT_DATA = 130, + GDMA_EQE_HWC_INIT_DONE = 131, +}; + +enum { + GDMA_DEVICE_NONE = 0, + GDMA_DEVICE_HWC = 1, + GDMA_DEVICE_MANA = 2, +}; + +struct gdma_resource { + /* Protect the bitmap */ + spinlock_t lock; + + /* The bitmap size in bits. */ + u32 size; + + /* The bitmap tracks the resources. */ + unsigned long *map; +}; + +union gdma_doorbell_entry { + u64 as_uint64; + + struct { + u64 id : 24; + u64 reserved : 8; + u64 tail_ptr : 31; + u64 arm : 1; + } cq; + + struct { + u64 id : 24; + u64 wqe_cnt : 8; + u64 tail_ptr : 32; + } rq; + + struct { + u64 id : 24; + u64 reserved : 8; + u64 tail_ptr : 32; + } sq; + + struct { + u64 id : 16; + u64 reserved : 16; + u64 tail_ptr : 31; + u64 arm : 1; + } eq; +}; /* HW DATA */ + +struct gdma_msg_hdr { + u32 hdr_type; + u32 msg_type; + u16 msg_version; + u16 hwc_msg_id; + u32 msg_size; +}; /* HW DATA */ + +struct gdma_dev_id { + union { + struct { + u16 type; + u16 instance; + }; + + u32 as_uint32; + }; +}; /* HW DATA */ + +struct gdma_req_hdr { + struct gdma_msg_hdr req; + struct gdma_msg_hdr resp; /* The expected response */ + struct gdma_dev_id dev_id; + u32 activity_id; +}; /* HW DATA */ + +struct gdma_resp_hdr { + struct gdma_msg_hdr response; + struct gdma_dev_id dev_id; + u32 activity_id; + u32 status; + u32 reserved; +}; /* HW DATA */ + +struct gdma_general_req { + struct gdma_req_hdr hdr; +}; /* HW DATA */ + +#define GDMA_MESSAGE_V1 1 + +struct gdma_general_resp { + struct gdma_resp_hdr hdr; +}; /* HW DATA */ + +#define GDMA_STANDARD_HEADER_TYPE 0 + +static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code, + u32 req_size, u32 resp_size) +{ + hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE; + hdr->req.msg_type = code; + hdr->req.msg_version = GDMA_MESSAGE_V1; + hdr->req.msg_size = req_size; + + hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE; + hdr->resp.msg_type = code; + hdr->resp.msg_version = GDMA_MESSAGE_V1; + hdr->resp.msg_size = resp_size; +} + +/* The 16-byte struct is part of the GDMA work queue entry (WQE). */ +struct gdma_sge { + u64 address; + u32 mem_key; + u32 size; +}; /* HW DATA */ + +struct gdma_wqe_request { + struct gdma_sge *sgl; + u32 num_sge; + + u32 inline_oob_size; + const void *inline_oob_data; + + u32 flags; + u32 client_data_unit; +}; + +enum gdma_page_type { + GDMA_PAGE_TYPE_4K, +}; + +#define GDMA_INVALID_DMA_REGION 0 + +struct gdma_mem_info { + struct device *dev; + + dma_addr_t dma_handle; + void *virt_addr; + u64 length; + + /* Allocated by the PF driver */ + u64 gdma_region; +}; + +#define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8 + +struct gdma_dev { + struct gdma_context *gdma_context; + + struct gdma_dev_id dev_id; + + u32 pdid; + u32 doorbell; + u32 gpa_mkey; + + /* GDMA driver specific pointer */ + void *driver_data; +}; + +#define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE + +#define GDMA_CQE_SIZE 64 +#define GDMA_EQE_SIZE 16 +#define GDMA_MAX_SQE_SIZE 512 +#define GDMA_MAX_RQE_SIZE 256 + +#define GDMA_COMP_DATA_SIZE 0x3C + +#define GDMA_EVENT_DATA_SIZE 0xC + +/* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */ +#define GDMA_WQE_BU_SIZE 32 + +#define INVALID_PDID UINT_MAX +#define INVALID_DOORBELL UINT_MAX +#define INVALID_MEM_KEY UINT_MAX +#define INVALID_QUEUE_ID UINT_MAX +#define INVALID_PCI_MSIX_INDEX UINT_MAX + +struct gdma_comp { + u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; + u32 wq_num; + bool is_sq; +}; + +struct gdma_event { + u32 details[GDMA_EVENT_DATA_SIZE / 4]; + u8 type; +}; + +struct gdma_queue; + +#define CQE_POLLING_BUFFER 512 +struct mana_eq { + struct gdma_queue *eq; + struct gdma_comp cqe_poll[CQE_POLLING_BUFFER]; +}; + +typedef void gdma_eq_callback(void *context, struct gdma_queue *q, + struct gdma_event *e); + +typedef void gdma_cq_callback(void *context, struct gdma_queue *q); + +/* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE + * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the + * driver increases the 'head' in BUs rather than in bytes, and notifies + * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track + * the HW head, and increases the 'head' by 1 for every processed EQE/CQE. + * + * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is + * processed, the driver increases the 'tail' to indicate that WQEs have + * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ. + * + * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures + * that the EQ/CQ is big enough so they can't overflow, and the driver uses + * the owner bits mechanism to detect if the queue has become empty. + */ +struct gdma_queue { + struct gdma_dev *gdma_dev; + + enum gdma_queue_type type; + u32 id; + + struct gdma_mem_info mem_info; + + void *queue_mem_ptr; + u32 queue_size; + + bool monitor_avl_buf; + + u32 head; + u32 tail; + + /* Extra fields specific to EQ/CQ. */ + union { + struct { + bool disable_needed; + + gdma_eq_callback *callback; + void *context; + + unsigned int msix_index; + + u32 log2_throttle_limit; + + /* NAPI data */ + struct napi_struct napi; + int work_done; + int budget; + } eq; + + struct { + gdma_cq_callback *callback; + void *context; + + struct gdma_queue *parent; /* For CQ/EQ relationship */ + } cq; + }; +}; + +struct gdma_queue_spec { + enum gdma_queue_type type; + bool monitor_avl_buf; + unsigned int queue_size; + + /* Extra fields specific to EQ/CQ. */ + union { + struct { + gdma_eq_callback *callback; + void *context; + + unsigned long log2_throttle_limit; + + /* Only used by the MANA device. */ + struct net_device *ndev; + } eq; + + struct { + gdma_cq_callback *callback; + void *context; + + struct gdma_queue *parent_eq; + + } cq; + }; +}; + +struct gdma_irq_context { + void (*handler)(void *arg); + void *arg; +}; + +struct gdma_context { + struct device *dev; + + /* Per-vPort max number of queues */ + unsigned int max_num_queues; + unsigned int max_num_msix; + unsigned int num_msix_usable; + struct gdma_resource msix_resource; + struct gdma_irq_context *irq_contexts; + + /* This maps a CQ index to the queue structure. */ + unsigned int max_num_cqs; + struct gdma_queue **cq_table; + + /* Protect eq_test_event and test_event_eq_id */ + struct mutex eq_test_event_mutex; + struct completion eq_test_event; + u32 test_event_eq_id; + + void __iomem *bar0_va; + void __iomem *shm_base; + void __iomem *db_page_base; + u32 db_page_size; + + /* Shared memory chanenl (used to bootstrap HWC) */ + struct shm_channel shm_channel; + + /* Hardware communication channel (HWC) */ + struct gdma_dev hwc; + + /* Azure network adapter */ + struct gdma_dev mana; +}; + +#define MAX_NUM_GDMA_DEVICES 4 + +static inline bool mana_gd_is_mana(struct gdma_dev *gd) +{ + return gd->dev_id.type == GDMA_DEVICE_MANA; +} + +static inline bool mana_gd_is_hwc(struct gdma_dev *gd) +{ + return gd->dev_id.type == GDMA_DEVICE_HWC; +} + +u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset); +u32 mana_gd_wq_avail_space(struct gdma_queue *wq); + +int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq); + +int mana_gd_create_hwc_queue(struct gdma_dev *gd, + const struct gdma_queue_spec *spec, + struct gdma_queue **queue_ptr); + +int mana_gd_create_mana_eq(struct gdma_dev *gd, + const struct gdma_queue_spec *spec, + struct gdma_queue **queue_ptr); + +int mana_gd_create_mana_wq_cq(struct gdma_dev *gd, + const struct gdma_queue_spec *spec, + struct gdma_queue **queue_ptr); + +void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue); + +int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe); + +void mana_gd_arm_cq(struct gdma_queue *cq); + +struct gdma_wqe { + u32 reserved :24; + u32 last_vbytes :8; + + union { + u32 flags; + + struct { + u32 num_sge :8; + u32 inline_oob_size_div4:3; + u32 client_oob_in_sgl :1; + u32 reserved1 :4; + u32 client_data_unit :14; + u32 reserved2 :2; + }; + }; +}; /* HW DATA */ + +#define INLINE_OOB_SMALL_SIZE 8 +#define INLINE_OOB_LARGE_SIZE 24 + +#define MAX_TX_WQE_SIZE 512 +#define MAX_RX_WQE_SIZE 256 + +struct gdma_cqe { + u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; + + union { + u32 as_uint32; + + struct { + u32 wq_num : 24; + u32 is_sq : 1; + u32 reserved : 4; + u32 owner_bits : 3; + }; + } cqe_info; +}; /* HW DATA */ + +#define GDMA_CQE_OWNER_BITS 3 + +#define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1) + +#define SET_ARM_BIT 1 + +#define GDMA_EQE_OWNER_BITS 3 + +union gdma_eqe_info { + u32 as_uint32; + + struct { + u32 type : 8; + u32 reserved1 : 8; + u32 client_id : 2; + u32 reserved2 : 11; + u32 owner_bits : 3; + }; +}; /* HW DATA */ + +#define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1) +#define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries)) + +struct gdma_eqe { + u32 details[GDMA_EVENT_DATA_SIZE / 4]; + u32 eqe_info; +}; /* HW DATA */ + +#define GDMA_REG_DB_PAGE_OFFSET 8 +#define GDMA_REG_DB_PAGE_SIZE 0x10 +#define GDMA_REG_SHM_OFFSET 0x18 + +struct gdma_posted_wqe_info { + u32 wqe_size_in_bu; +}; + +/* GDMA_GENERATE_TEST_EQE */ +struct gdma_generate_test_event_req { + struct gdma_req_hdr hdr; + u32 queue_index; +}; /* HW DATA */ + +/* GDMA_VERIFY_VF_DRIVER_VERSION */ +enum { + GDMA_PROTOCOL_V1 = 1, + GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1, + GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1, +}; + +struct gdma_verify_ver_req { + struct gdma_req_hdr hdr; + + /* Mandatory fields required for protocol establishment */ + u64 protocol_ver_min; + u64 protocol_ver_max; + u64 drv_cap_flags1; + u64 drv_cap_flags2; + u64 drv_cap_flags3; + u64 drv_cap_flags4; + + /* Advisory fields */ + u64 drv_ver; + u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */ + u32 reserved; + u32 os_ver_major; + u32 os_ver_minor; + u32 os_ver_build; + u32 os_ver_platform; + u64 reserved_2; + u8 os_ver_str1[128]; + u8 os_ver_str2[128]; + u8 os_ver_str3[128]; + u8 os_ver_str4[128]; +}; /* HW DATA */ + +struct gdma_verify_ver_resp { + struct gdma_resp_hdr hdr; + u64 gdma_protocol_ver; + u64 pf_cap_flags1; + u64 pf_cap_flags2; + u64 pf_cap_flags3; + u64 pf_cap_flags4; +}; /* HW DATA */ + +/* GDMA_QUERY_MAX_RESOURCES */ +struct gdma_query_max_resources_resp { + struct gdma_resp_hdr hdr; + u32 status; + u32 max_sq; + u32 max_rq; + u32 max_cq; + u32 max_eq; + u32 max_db; + u32 max_mst; + u32 max_cq_mod_ctx; + u32 max_mod_cq; + u32 max_msix; +}; /* HW DATA */ + +/* GDMA_LIST_DEVICES */ +struct gdma_list_devices_resp { + struct gdma_resp_hdr hdr; + u32 num_of_devs; + u32 reserved; + struct gdma_dev_id devs[64]; +}; /* HW DATA */ + +/* GDMA_REGISTER_DEVICE */ +struct gdma_register_device_resp { + struct gdma_resp_hdr hdr; + u32 pdid; + u32 gpa_mkey; + u32 db_id; +}; /* HW DATA */ + +/* GDMA_CREATE_QUEUE */ +struct gdma_create_queue_req { + struct gdma_req_hdr hdr; + u32 type; + u32 reserved1; + u32 pdid; + u32 doolbell_id; + u64 gdma_region; + u32 reserved2; + u32 queue_size; + u32 log2_throttle_limit; + u32 eq_pci_msix_index; + u32 cq_mod_ctx_id; + u32 cq_parent_eq_id; + u8 rq_drop_on_overrun; + u8 rq_err_on_wqe_overflow; + u8 rq_chain_rec_wqes; + u8 sq_hw_db; + u32 reserved3; +}; /* HW DATA */ + +struct gdma_create_queue_resp { + struct gdma_resp_hdr hdr; + u32 queue_index; +}; /* HW DATA */ + +/* GDMA_DISABLE_QUEUE */ +struct gdma_disable_queue_req { + struct gdma_req_hdr hdr; + u32 type; + u32 queue_index; + u32 alloc_res_id_on_creation; +}; /* HW DATA */ + +/* GDMA_CREATE_DMA_REGION */ +struct gdma_create_dma_region_req { + struct gdma_req_hdr hdr; + + /* The total size of the DMA region */ + u64 length; + + /* The offset in the first page */ + u32 offset_in_page; + + /* enum gdma_page_type */ + u32 gdma_page_type; + + /* The total number of pages */ + u32 page_count; + + /* If page_addr_list_len is smaller than page_count, + * the remaining page addresses will be added via the + * message GDMA_DMA_REGION_ADD_PAGES. + */ + u32 page_addr_list_len; + u64 page_addr_list[]; +}; /* HW DATA */ + +struct gdma_create_dma_region_resp { + struct gdma_resp_hdr hdr; + u64 gdma_region; +}; /* HW DATA */ + +/* GDMA_DMA_REGION_ADD_PAGES */ +struct gdma_dma_region_add_pages_req { + struct gdma_req_hdr hdr; + + u64 gdma_region; + + u32 page_addr_list_len; + u32 reserved3; + + u64 page_addr_list[]; +}; /* HW DATA */ + +/* GDMA_DESTROY_DMA_REGION */ +struct gdma_destroy_dma_region_req { + struct gdma_req_hdr hdr; + + u64 gdma_region; +}; /* HW DATA */ + +int mana_gd_verify_vf_version(struct pci_dev *pdev); + +int mana_gd_register_device(struct gdma_dev *gd); +int mana_gd_deregister_device(struct gdma_dev *gd); + +int mana_gd_post_work_request(struct gdma_queue *wq, + const struct gdma_wqe_request *wqe_req, + struct gdma_posted_wqe_info *wqe_info); + +int mana_gd_post_and_ring(struct gdma_queue *queue, + const struct gdma_wqe_request *wqe, + struct gdma_posted_wqe_info *wqe_info); + +int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r); +void mana_gd_free_res_map(struct gdma_resource *r); + +void mana_gd_wq_ring_doorbell(struct gdma_context *gc, + struct gdma_queue *queue); + +int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length, + struct gdma_mem_info *gmi); + +void mana_gd_free_memory(struct gdma_mem_info *gmi); + +int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req, + u32 resp_len, void *resp); +#endif /* _GDMA_H */ diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c new file mode 100644 index 000000000000..2f87bf90f8ec --- /dev/null +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -0,0 +1,1415 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright (c) 2021, Microsoft Corporation. */ + +#include <linux/module.h> +#include <linux/pci.h> + +#include "mana.h" + +static u32 mana_gd_r32(struct gdma_context *g, u64 offset) +{ + return readl(g->bar0_va + offset); +} + +static u64 mana_gd_r64(struct gdma_context *g, u64 offset) +{ + return readq(g->bar0_va + offset); +} + +static void mana_gd_init_registers(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + + gc->db_page_size = mana_gd_r32(gc, GDMA_REG_DB_PAGE_SIZE) & 0xFFFF; + + gc->db_page_base = gc->bar0_va + + mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET); + + gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET); +} + +static int mana_gd_query_max_resources(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + struct gdma_query_max_resources_resp resp = {}; + struct gdma_general_req req = {}; + int err; + + mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES, + sizeof(req), sizeof(resp)); + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err || resp.hdr.status) { + dev_err(gc->dev, "Failed to query resource info: %d, 0x%x\n", + err, resp.hdr.status); + return err ? err : -EPROTO; + } + + if (gc->num_msix_usable > resp.max_msix) + gc->num_msix_usable = resp.max_msix; + + if (gc->num_msix_usable <= 1) + return -ENOSPC; + + gc->max_num_queues = num_online_cpus(); + if (gc->max_num_queues > MANA_MAX_NUM_QUEUES) + gc->max_num_queues = MANA_MAX_NUM_QUEUES; + + if (gc->max_num_queues > resp.max_eq) + gc->max_num_queues = resp.max_eq; + + if (gc->max_num_queues > resp.max_cq) + gc->max_num_queues = resp.max_cq; + + if (gc->max_num_queues > resp.max_sq) + gc->max_num_queues = resp.max_sq; + + if (gc->max_num_queues > resp.max_rq) + gc->max_num_queues = resp.max_rq; + + return 0; +} + +static int mana_gd_detect_devices(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + struct gdma_list_devices_resp resp = {}; + struct gdma_general_req req = {}; + struct gdma_dev_id dev; + u32 i, max_num_devs; + u16 dev_type; + int err; + + mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req), + sizeof(resp)); + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err || resp.hdr.status) { + dev_err(gc->dev, "Failed to detect devices: %d, 0x%x\n", err, + resp.hdr.status); + return err ? err : -EPROTO; + } + + max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs); + + for (i = 0; i < max_num_devs; i++) { + dev = resp.devs[i]; + dev_type = dev.type; + + /* HWC is already detected in mana_hwc_create_channel(). */ + if (dev_type == GDMA_DEVICE_HWC) + continue; + + if (dev_type == GDMA_DEVICE_MANA) { + gc->mana.gdma_context = gc; + gc->mana.dev_id = dev; + } + } + + return gc->mana.dev_id.type == 0 ? -ENODEV : 0; +} + +int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req, + u32 resp_len, void *resp) +{ + struct hw_channel_context *hwc = gc->hwc.driver_data; + + return mana_hwc_send_request(hwc, req_len, req, resp_len, resp); +} + +int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length, + struct gdma_mem_info *gmi) +{ + dma_addr_t dma_handle; + void *buf; + + if (length < PAGE_SIZE || !is_power_of_2(length)) + return -EINVAL; + + gmi->dev = gc->dev; + buf = dma_alloc_coherent(gmi->dev, length, &dma_handle, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + gmi->dma_handle = dma_handle; + gmi->virt_addr = buf; + gmi->length = length; + + return 0; +} + +void mana_gd_free_memory(struct gdma_mem_info *gmi) +{ + dma_free_coherent(gmi->dev, gmi->length, gmi->virt_addr, + gmi->dma_handle); +} + +static int mana_gd_create_hw_eq(struct gdma_context *gc, + struct gdma_queue *queue) +{ + struct gdma_create_queue_resp resp = {}; + struct gdma_create_queue_req req = {}; + int err; + + if (queue->type != GDMA_EQ) + return -EINVAL; + + mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_QUEUE, + sizeof(req), sizeof(resp)); + + req.hdr.dev_id = queue->gdma_dev->dev_id; + req.type = queue->type; + req.pdid = queue->gdma_dev->pdid; + req.doolbell_id = queue->gdma_dev->doorbell; + req.gdma_region = queue->mem_info.gdma_region; + req.queue_size = queue->queue_size; + req.log2_throttle_limit = queue->eq.log2_throttle_limit; + req.eq_pci_msix_index = queue->eq.msix_index; + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err || resp.hdr.status) { + dev_err(gc->dev, "Failed to create queue: %d, 0x%x\n", err, + resp.hdr.status); + return err ? err : -EPROTO; + } + + queue->id = resp.queue_index; + queue->eq.disable_needed = true; + queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; + return 0; +} + +static int mana_gd_disable_queue(struct gdma_queue *queue) +{ + struct gdma_context *gc = queue->gdma_dev->gdma_context; + struct gdma_disable_queue_req req = {}; + struct gdma_general_resp resp = {}; + int err; + + WARN_ON(queue->type != GDMA_EQ); + + mana_gd_init_req_hdr(&req.hdr, GDMA_DISABLE_QUEUE, + sizeof(req), sizeof(resp)); + + req.hdr.dev_id = queue->gdma_dev->dev_id; + req.type = queue->type; + req.queue_index = queue->id; + req.alloc_res_id_on_creation = 1; + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err || resp.hdr.status) { + dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err, + resp.hdr.status); + return err ? err : -EPROTO; + } + + return 0; +} + +#define DOORBELL_OFFSET_SQ 0x0 +#define DOORBELL_OFFSET_RQ 0x400 +#define DOORBELL_OFFSET_CQ 0x800 +#define DOORBELL_OFFSET_EQ 0xFF8 + +static void mana_gd_ring_doorbell(struct gdma_context *gc, u32 db_index, + enum gdma_queue_type q_type, u32 qid, + u32 tail_ptr, u8 num_req) +{ + void __iomem *addr = gc->db_page_base + gc->db_page_size * db_index; + union gdma_doorbell_entry e = {}; + + switch (q_type) { + case GDMA_EQ: + e.eq.id = qid; + e.eq.tail_ptr = tail_ptr; + e.eq.arm = num_req; + + addr += DOORBELL_OFFSET_EQ; + break; + + case GDMA_CQ: + e.cq.id = qid; + e.cq.tail_ptr = tail_ptr; + e.cq.arm = num_req; + + addr += DOORBELL_OFFSET_CQ; + break; + + case GDMA_RQ: + e.rq.id = qid; + e.rq.tail_ptr = tail_ptr; + e.rq.wqe_cnt = num_req; + + addr += DOORBELL_OFFSET_RQ; + break; + + case GDMA_SQ: + e.sq.id = qid; + e.sq.tail_ptr = tail_ptr; + + addr += DOORBELL_OFFSET_SQ; + break; + + default: + WARN_ON(1); + return; + } + + /* Ensure all writes are done before ring doorbell */ + wmb(); + + writeq(e.as_uint64, addr); +} + +void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue) +{ + mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type, + queue->id, queue->head * GDMA_WQE_BU_SIZE, 1); +} + +void mana_gd_arm_cq(struct gdma_queue *cq) +{ + struct gdma_context *gc = cq->gdma_dev->gdma_context; + + u32 num_cqe = cq->queue_size / GDMA_CQE_SIZE; + + u32 head = cq->head % (num_cqe << GDMA_CQE_OWNER_BITS); + + mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id, + head, SET_ARM_BIT); +} + +static void mana_gd_process_eqe(struct gdma_queue *eq) +{ + u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE); + struct gdma_context *gc = eq->gdma_dev->gdma_context; + struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr; + union gdma_eqe_info eqe_info; + enum gdma_eqe_type type; + struct gdma_event event; + struct gdma_queue *cq; + struct gdma_eqe *eqe; + u32 cq_id; + + eqe = &eq_eqe_ptr[head]; + eqe_info.as_uint32 = eqe->eqe_info; + type = eqe_info.type; + + switch (type) { + case GDMA_EQE_COMPLETION: + cq_id = eqe->details[0] & 0xFFFFFF; + if (WARN_ON_ONCE(cq_id >= gc->max_num_cqs)) + break; + + cq = gc->cq_table[cq_id]; + if (WARN_ON_ONCE(!cq || cq->type != GDMA_CQ || cq->id != cq_id)) + break; + + if (cq->cq.callback) + cq->cq.callback(cq->cq.context, cq); + + break; + + case GDMA_EQE_TEST_EVENT: + gc->test_event_eq_id = eq->id; + complete(&gc->eq_test_event); + break; + + case GDMA_EQE_HWC_INIT_EQ_ID_DB: + case GDMA_EQE_HWC_INIT_DATA: + case GDMA_EQE_HWC_INIT_DONE: + if (!eq->eq.callback) + break; + + event.type = type; + memcpy(&event.details, &eqe->details, GDMA_EVENT_DATA_SIZE); + eq->eq.callback(eq->eq.context, eq, &event); + break; + + default: + break; + } +} + +static void mana_gd_process_eq_events(void *arg) +{ + u32 owner_bits, new_bits, old_bits; + union gdma_eqe_info eqe_info; + struct gdma_eqe *eq_eqe_ptr; + struct gdma_queue *eq = arg; + struct gdma_context *gc; + struct gdma_eqe *eqe; + unsigned int arm_bit; + u32 head, num_eqe; + int i; + + gc = eq->gdma_dev->gdma_context; + + num_eqe = eq->queue_size / GDMA_EQE_SIZE; + eq_eqe_ptr = eq->queue_mem_ptr; + + /* Process up to 5 EQEs at a time, and update the HW head. */ + for (i = 0; i < 5; i++) { + eqe = &eq_eqe_ptr[eq->head % num_eqe]; + eqe_info.as_uint32 = eqe->eqe_info; + owner_bits = eqe_info.owner_bits; + + old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK; + /* No more entries */ + if (owner_bits == old_bits) + break; + + new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK; + if (owner_bits != new_bits) { + dev_err(gc->dev, "EQ %d: overflow detected\n", eq->id); + break; + } + + mana_gd_process_eqe(eq); + + eq->head++; + } + + /* Always rearm the EQ for HWC. For MANA, rearm it when NAPI is done. */ + if (mana_gd_is_hwc(eq->gdma_dev)) { + arm_bit = SET_ARM_BIT; + } else if (eq->eq.work_done < eq->eq.budget && + napi_complete_done(&eq->eq.napi, eq->eq.work_done)) { + arm_bit = SET_ARM_BIT; + } else { + arm_bit = 0; + } + + head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS); + + mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id, + head, arm_bit); +} + +static int mana_poll(struct napi_struct *napi, int budget) +{ + struct gdma_queue *eq = container_of(napi, struct gdma_queue, eq.napi); + + eq->eq.work_done = 0; + eq->eq.budget = budget; + + mana_gd_process_eq_events(eq); + + return min(eq->eq.work_done, budget); +} + +static void mana_gd_schedule_napi(void *arg) +{ + struct gdma_queue *eq = arg; + struct napi_struct *napi; + + napi = &eq->eq.napi; + napi_schedule_irqoff(napi); +} + +static int mana_gd_register_irq(struct gdma_queue *queue, + const struct gdma_queue_spec *spec) +{ + struct gdma_dev *gd = queue->gdma_dev; + bool is_mana = mana_gd_is_mana(gd); + struct gdma_irq_context *gic; + struct gdma_context *gc; + struct gdma_resource *r; + unsigned int msi_index; + unsigned long flags; + int err; + + gc = gd->gdma_context; + r = &gc->msix_resource; + + spin_lock_irqsave(&r->lock, flags); + + msi_index = find_first_zero_bit(r->map, r->size); + if (msi_index >= r->size) { + err = -ENOSPC; + } else { + bitmap_set(r->map, msi_index, 1); + queue->eq.msix_index = msi_index; + err = 0; + } + + spin_unlock_irqrestore(&r->lock, flags); + + if (err) + return err; + + WARN_ON(msi_index >= gc->num_msix_usable); + + gic = &gc->irq_contexts[msi_index]; + + if (is_mana) { + netif_napi_add(spec->eq.ndev, &queue->eq.napi, mana_poll, + NAPI_POLL_WEIGHT); + napi_enable(&queue->eq.napi); + } + + WARN_ON(gic->handler || gic->arg); + + gic->arg = queue; + + if (is_mana) + gic->handler = mana_gd_schedule_napi; + else + gic->handler = mana_gd_process_eq_events; + + return 0; +} + +static void mana_gd_deregiser_irq(struct gdma_queue *queue) +{ + struct gdma_dev *gd = queue->gdma_dev; + struct gdma_irq_context *gic; + struct gdma_context *gc; + struct gdma_resource *r; + unsigned int msix_index; + unsigned long flags; + + gc = gd->gdma_context; + r = &gc->msix_resource; + + /* At most num_online_cpus() + 1 interrupts are used. */ + msix_index = queue->eq.msix_index; + if (WARN_ON(msix_index >= gc->num_msix_usable)) + return; + + gic = &gc->irq_contexts[msix_index]; + gic->handler = NULL; + gic->arg = NULL; + + spin_lock_irqsave(&r->lock, flags); + bitmap_clear(r->map, msix_index, 1); + spin_unlock_irqrestore(&r->lock, flags); + + queue->eq.msix_index = INVALID_PCI_MSIX_INDEX; +} + +int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq) +{ + struct gdma_generate_test_event_req req = {}; + struct gdma_general_resp resp = {}; + struct device *dev = gc->dev; + int err; + + mutex_lock(&gc->eq_test_event_mutex); + + init_completion(&gc->eq_test_event); + gc->test_event_eq_id = INVALID_QUEUE_ID; + + mana_gd_init_req_hdr(&req.hdr, GDMA_GENERATE_TEST_EQE, + sizeof(req), sizeof(resp)); + + req.hdr.dev_id = eq->gdma_dev->dev_id; + req.queue_index = eq->id; + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err) { + dev_err(dev, "test_eq failed: %d\n", err); + goto out; + } + + err = -EPROTO; + + if (resp.hdr.status) { + dev_err(dev, "test_eq failed: 0x%x\n", resp.hdr.status); + goto out; + } + + if (!wait_for_completion_timeout(&gc->eq_test_event, 30 * HZ)) { + dev_err(dev, "test_eq timed out on queue %d\n", eq->id); + goto out; + } + + if (eq->id != gc->test_event_eq_id) { + dev_err(dev, "test_eq got an event on wrong queue %d (%d)\n", + gc->test_event_eq_id, eq->id); + goto out; + } + + err = 0; +out: + mutex_unlock(&gc->eq_test_event_mutex); + return err; +} + +static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets, + struct gdma_queue *queue) +{ + int err; + + if (flush_evenets) { + err = mana_gd_test_eq(gc, queue); + if (err) + dev_warn(gc->dev, "Failed to flush EQ: %d\n", err); + } + + mana_gd_deregiser_irq(queue); + + if (mana_gd_is_mana(queue->gdma_dev)) { + napi_disable(&queue->eq.napi); + netif_napi_del(&queue->eq.napi); + } + + if (queue->eq.disable_needed) + mana_gd_disable_queue(queue); +} + +static int mana_gd_create_eq(struct gdma_dev *gd, + const struct gdma_queue_spec *spec, + bool create_hwq, struct gdma_queue *queue) +{ + struct gdma_context *gc = gd->gdma_context; + struct device *dev = gc->dev; + u32 log2_num_entries; + int err; + + queue->eq.msix_index = INVALID_PCI_MSIX_INDEX; + + log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE); + + if (spec->eq.log2_throttle_limit > log2_num_entries) { + dev_err(dev, "EQ throttling limit (%lu) > maximum EQE (%u)\n", + spec->eq.log2_throttle_limit, log2_num_entries); + return -EINVAL; + } + + err = mana_gd_register_irq(queue, spec); + if (err) { + dev_err(dev, "Failed to register irq: %d\n", err); + return err; + } + + queue->eq.callback = spec->eq.callback; + queue->eq.context = spec->eq.context; + queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries); + queue->eq.log2_throttle_limit = spec->eq.log2_throttle_limit ?: 1; + + if (create_hwq) { + err = mana_gd_create_hw_eq(gc, queue); + if (err) + goto out; + + err = mana_gd_test_eq(gc, queue); + if (err) + goto out; + } + + return 0; +out: + dev_err(dev, "Failed to create EQ: %d\n", err); + mana_gd_destroy_eq(gc, false, queue); + return err; +} + +static void mana_gd_create_cq(const struct gdma_queue_spec *spec, + struct gdma_queue *queue) +{ + u32 log2_num_entries = ilog2(spec->queue_size / GDMA_CQE_SIZE); + + queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries); + queue->cq.parent = spec->cq.parent_eq; + queue->cq.context = spec->cq.context; + queue->cq.callback = spec->cq.callback; +} + +static void mana_gd_destroy_cq(struct gdma_context *gc, + struct gdma_queue *queue) +{ + u32 id = queue->id; + + if (id >= gc->max_num_cqs) + return; + + if (!gc->cq_table[id]) + return; + + gc->cq_table[id] = NULL; +} + +int mana_gd_create_hwc_queue(struct gdma_dev *gd, + const struct gdma_queue_spec *spec, + struct gdma_queue **queue_ptr) +{ + struct gdma_context *gc = gd->gdma_context; + struct gdma_mem_info *gmi; + struct gdma_queue *queue; + int err; + + queue = kzalloc(sizeof(*queue), GFP_KERNEL); + if (!queue) + return -ENOMEM; + + gmi = &queue->mem_info; + err = mana_gd_alloc_memory(gc, spec->queue_size, gmi); + if (err) + goto free_q; + + queue->head = 0; + queue->tail = 0; + queue->queue_mem_ptr = gmi->virt_addr; + queue->queue_size = spec->queue_size; + queue->monitor_avl_buf = spec->monitor_avl_buf; + queue->type = spec->type; + queue->gdma_dev = gd; + + if (spec->type == GDMA_EQ) + err = mana_gd_create_eq(gd, spec, false, queue); + else if (spec->type == GDMA_CQ) + mana_gd_create_cq(spec, queue); + + if (err) + goto out; + + *queue_ptr = queue; + return 0; +out: + mana_gd_free_memory(gmi); +free_q: + kfree(queue); + return err; +} + +static void mana_gd_destroy_dma_region(struct gdma_context *gc, u64 gdma_region) +{ + struct gdma_destroy_dma_region_req req = {}; + struct gdma_general_resp resp = {}; + int err; + + if (gdma_region == GDMA_INVALID_DMA_REGION) + return; + + mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req), + sizeof(resp)); + req.gdma_region = gdma_region; + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err || resp.hdr.status) + dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n", + err, resp.hdr.status); +} + +static int mana_gd_create_dma_region(struct gdma_dev *gd, + struct gdma_mem_info *gmi) +{ + unsigned int num_page = gmi->length / PAGE_SIZE; + struct gdma_create_dma_region_req *req = NULL; + struct gdma_create_dma_region_resp resp = {}; + struct gdma_context *gc = gd->gdma_context; + struct hw_channel_context *hwc; + u32 length = gmi->length; + u32 req_msg_size; + int err; + int i; + + if (length < PAGE_SIZE || !is_power_of_2(length)) + return -EINVAL; + + if (offset_in_page(gmi->virt_addr) != 0) + return -EINVAL; + + hwc = gc->hwc.driver_data; + req_msg_size = sizeof(*req) + num_page * sizeof(u64); + if (req_msg_size > hwc->max_req_msg_size) + return -EINVAL; + + req = kzalloc(req_msg_size, GFP_KERNEL); + if (!req) + return -ENOMEM; + + mana_gd_init_req_hdr(&req->hdr, GDMA_CREATE_DMA_REGION, + req_msg_size, sizeof(resp)); + req->length = length; + req->offset_in_page = 0; + req->gdma_page_type = GDMA_PAGE_TYPE_4K; + req->page_count = num_page; + req->page_addr_list_len = num_page; + + for (i = 0; i < num_page; i++) + req->page_addr_list[i] = gmi->dma_handle + i * PAGE_SIZE; + + err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp); + if (err) + goto out; + + if (resp.hdr.status || resp.gdma_region == GDMA_INVALID_DMA_REGION) { + dev_err(gc->dev, "Failed to create DMA region: 0x%x\n", + resp.hdr.status); + err = -EPROTO; + goto out; + } + + gmi->gdma_region = resp.gdma_region; +out: + kfree(req); + return err; +} + +int mana_gd_create_mana_eq(struct gdma_dev *gd, + const struct gdma_queue_spec *spec, + struct gdma_queue **queue_ptr) +{ + struct gdma_context *gc = gd->gdma_context; + struct gdma_mem_info *gmi; + struct gdma_queue *queue; + int err; + + if (spec->type != GDMA_EQ) + return -EINVAL; + + queue = kzalloc(sizeof(*queue), GFP_KERNEL); + if (!queue) + return -ENOMEM; + + gmi = &queue->mem_info; + err = mana_gd_alloc_memory(gc, spec->queue_size, gmi); + if (err) + goto free_q; + + err = mana_gd_create_dma_region(gd, gmi); + if (err) + goto out; + + queue->head = 0; + queue->tail = 0; + queue->queue_mem_ptr = gmi->virt_addr; + queue->queue_size = spec->queue_size; + queue->monitor_avl_buf = spec->monitor_avl_buf; + queue->type = spec->type; + queue->gdma_dev = gd; + + err = mana_gd_create_eq(gd, spec, true, queue); + if (err) + goto out; + + *queue_ptr = queue; + return 0; +out: + mana_gd_free_memory(gmi); +free_q: + kfree(queue); + return err; +} + +int mana_gd_create_mana_wq_cq(struct gdma_dev *gd, + const struct gdma_queue_spec *spec, + struct gdma_queue **queue_ptr) +{ + struct gdma_context *gc = gd->gdma_context; + struct gdma_mem_info *gmi; + struct gdma_queue *queue; + int err; + + if (spec->type != GDMA_CQ && spec->type != GDMA_SQ && + spec->type != GDMA_RQ) + return -EINVAL; + + queue = kzalloc(sizeof(*queue), GFP_KERNEL); + if (!queue) + return -ENOMEM; + + gmi = &queue->mem_info; + err = mana_gd_alloc_memory(gc, spec->queue_size, gmi); + if (err) + goto free_q; + + err = mana_gd_create_dma_region(gd, gmi); + if (err) + goto out; + + queue->head = 0; + queue->tail = 0; + queue->queue_mem_ptr = gmi->virt_addr; + queue->queue_size = spec->queue_size; + queue->monitor_avl_buf = spec->monitor_avl_buf; + queue->type = spec->type; + queue->gdma_dev = gd; + + if (spec->type == GDMA_CQ) + mana_gd_create_cq(spec, queue); + + *queue_ptr = queue; + return 0; +out: + mana_gd_free_memory(gmi); +free_q: + kfree(queue); + return err; +} + +void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue) +{ + struct gdma_mem_info *gmi = &queue->mem_info; + + switch (queue->type) { + case GDMA_EQ: + mana_gd_destroy_eq(gc, queue->eq.disable_needed, queue); + break; + + case GDMA_CQ: + mana_gd_destroy_cq(gc, queue); + break; + + case GDMA_RQ: + break; + + case GDMA_SQ: + break; + + default: + dev_err(gc->dev, "Can't destroy unknown queue: type=%d\n", + queue->type); + return; + } + + mana_gd_destroy_dma_region(gc, gmi->gdma_region); + mana_gd_free_memory(gmi); + kfree(queue); +} + +int mana_gd_verify_vf_version(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + struct gdma_verify_ver_resp resp = {}; + struct gdma_verify_ver_req req = {}; + int err; + + mana_gd_init_req_hdr(&req.hdr, GDMA_VERIFY_VF_DRIVER_VERSION, + sizeof(req), sizeof(resp)); + + req.protocol_ver_min = GDMA_PROTOCOL_FIRST; + req.protocol_ver_max = GDMA_PROTOCOL_LAST; + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err || resp.hdr.status) { + dev_err(gc->dev, "VfVerifyVersionOutput: %d, status=0x%x\n", + err, resp.hdr.status); + return err ? err : -EPROTO; + } + + return 0; +} + +int mana_gd_register_device(struct gdma_dev *gd) +{ + struct gdma_context *gc = gd->gdma_context; + struct gdma_register_device_resp resp = {}; + struct gdma_general_req req = {}; + int err; + + gd->pdid = INVALID_PDID; + gd->doorbell = INVALID_DOORBELL; + gd->gpa_mkey = INVALID_MEM_KEY; + + mana_gd_init_req_hdr(&req.hdr, GDMA_REGISTER_DEVICE, sizeof(req), + sizeof(resp)); + + req.hdr.dev_id = gd->dev_id; + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err || resp.hdr.status) { + dev_err(gc->dev, "gdma_register_device_resp failed: %d, 0x%x\n", + err, resp.hdr.status); + return err ? err : -EPROTO; + } + + gd->pdid = resp.pdid; + gd->gpa_mkey = resp.gpa_mkey; + gd->doorbell = resp.db_id; + + return 0; +} + +int mana_gd_deregister_device(struct gdma_dev *gd) +{ + struct gdma_context *gc = gd->gdma_context; + struct gdma_general_resp resp = {}; + struct gdma_general_req req = {}; + int err; + + if (gd->pdid == INVALID_PDID) + return -EINVAL; + + mana_gd_init_req_hdr(&req.hdr, GDMA_DEREGISTER_DEVICE, sizeof(req), + sizeof(resp)); + + req.hdr.dev_id = gd->dev_id; + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err || resp.hdr.status) { + dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n", + err, resp.hdr.status); + if (!err) + err = -EPROTO; + } + + gd->pdid = INVALID_PDID; + gd->doorbell = INVALID_DOORBELL; + gd->gpa_mkey = INVALID_MEM_KEY; + + return err; +} + +u32 mana_gd_wq_avail_space(struct gdma_queue *wq) +{ + u32 used_space = (wq->head - wq->tail) * GDMA_WQE_BU_SIZE; + u32 wq_size = wq->queue_size; + + WARN_ON_ONCE(used_space > wq_size); + + return wq_size - used_space; +} + +u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset) +{ + u32 offset = (wqe_offset * GDMA_WQE_BU_SIZE) & (wq->queue_size - 1); + + WARN_ON_ONCE((offset + GDMA_WQE_BU_SIZE) > wq->queue_size); + + return wq->queue_mem_ptr + offset; +} + +static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req, + enum gdma_queue_type q_type, + u32 client_oob_size, u32 sgl_data_size, + u8 *wqe_ptr) +{ + bool oob_in_sgl = !!(wqe_req->flags & GDMA_WR_OOB_IN_SGL); + bool pad_data = !!(wqe_req->flags & GDMA_WR_PAD_BY_SGE0); + struct gdma_wqe *header = (struct gdma_wqe *)wqe_ptr; + u8 *ptr; + + memset(header, 0, sizeof(struct gdma_wqe)); + header->num_sge = wqe_req->num_sge; + header->inline_oob_size_div4 = client_oob_size / sizeof(u32); + + if (oob_in_sgl) { + WARN_ON_ONCE(!pad_data || wqe_req->num_sge < 2); + + header->client_oob_in_sgl = 1; + + if (pad_data) + header->last_vbytes = wqe_req->sgl[0].size; + } + + if (q_type == GDMA_SQ) + header->client_data_unit = wqe_req->client_data_unit; + + /* The size of gdma_wqe + client_oob_size must be less than or equal + * to one Basic Unit (i.e. 32 bytes), so the pointer can't go beyond + * the queue memory buffer boundary. + */ + ptr = wqe_ptr + sizeof(header); + + if (wqe_req->inline_oob_data && wqe_req->inline_oob_size > 0) { + memcpy(ptr, wqe_req->inline_oob_data, wqe_req->inline_oob_size); + + if (client_oob_size > wqe_req->inline_oob_size) + memset(ptr + wqe_req->inline_oob_size, 0, + client_oob_size - wqe_req->inline_oob_size); + } + + return sizeof(header) + client_oob_size; +} + +static void mana_gd_write_sgl(struct gdma_queue *wq, u8 *wqe_ptr, + const struct gdma_wqe_request *wqe_req) +{ + u32 sgl_size = sizeof(struct gdma_sge) * wqe_req->num_sge; + const u8 *address = (u8 *)wqe_req->sgl; + u8 *base_ptr, *end_ptr; + u32 size_to_end; + + base_ptr = wq->queue_mem_ptr; + end_ptr = base_ptr + wq->queue_size; + size_to_end = (u32)(end_ptr - wqe_ptr); + + if (size_to_end < sgl_size) { + memcpy(wqe_ptr, address, size_to_end); + + wqe_ptr = base_ptr; + address += size_to_end; + sgl_size -= size_to_end; + } + + memcpy(wqe_ptr, address, sgl_size); +} + +int mana_gd_post_work_request(struct gdma_queue *wq, + const struct gdma_wqe_request *wqe_req, + struct gdma_posted_wqe_info *wqe_info) +{ + u32 client_oob_size = wqe_req->inline_oob_size; + struct gdma_context *gc; + u32 sgl_data_size; + u32 max_wqe_size; + u32 wqe_size; + u8 *wqe_ptr; + + if (wqe_req->num_sge == 0) + return -EINVAL; + + if (wq->type == GDMA_RQ) { + if (client_oob_size != 0) + return -EINVAL; + + client_oob_size = INLINE_OOB_SMALL_SIZE; + + max_wqe_size = GDMA_MAX_RQE_SIZE; + } else { + if (client_oob_size != INLINE_OOB_SMALL_SIZE && + client_oob_size != INLINE_OOB_LARGE_SIZE) + return -EINVAL; + + max_wqe_size = GDMA_MAX_SQE_SIZE; + } + + sgl_data_size = sizeof(struct gdma_sge) * wqe_req->num_sge; + wqe_size = ALIGN(sizeof(struct gdma_wqe) + client_oob_size + + sgl_data_size, GDMA_WQE_BU_SIZE); + if (wqe_size > max_wqe_size) + return -EINVAL; + + if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) { + gc = wq->gdma_dev->gdma_context; + dev_err(gc->dev, "unsuccessful flow control!\n"); + return -ENOSPC; + } + + if (wqe_info) + wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE; + + wqe_ptr = mana_gd_get_wqe_ptr(wq, wq->head); + wqe_ptr += mana_gd_write_client_oob(wqe_req, wq->type, client_oob_size, + sgl_data_size, wqe_ptr); + if (wqe_ptr >= (u8 *)wq->queue_mem_ptr + wq->queue_size) + wqe_ptr -= wq->queue_size; + + mana_gd_write_sgl(wq, wqe_ptr, wqe_req); + + wq->head += wqe_size / GDMA_WQE_BU_SIZE; + + return 0; +} + +int mana_gd_post_and_ring(struct gdma_queue *queue, + const struct gdma_wqe_request *wqe_req, + struct gdma_posted_wqe_info *wqe_info) +{ + struct gdma_context *gc = queue->gdma_dev->gdma_context; + int err; + + err = mana_gd_post_work_request(queue, wqe_req, wqe_info); + if (err) + return err; + + mana_gd_wq_ring_doorbell(gc, queue); + + return 0; +} + +static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp) +{ + unsigned int num_cqe = cq->queue_size / sizeof(struct gdma_cqe); + struct gdma_cqe *cq_cqe = cq->queue_mem_ptr; + u32 owner_bits, new_bits, old_bits; + struct gdma_cqe *cqe; + + cqe = &cq_cqe[cq->head % num_cqe]; + owner_bits = cqe->cqe_info.owner_bits; + + old_bits = (cq->head / num_cqe - 1) & GDMA_CQE_OWNER_MASK; + /* Return 0 if no more entries. */ + if (owner_bits == old_bits) + return 0; + + new_bits = (cq->head / num_cqe) & GDMA_CQE_OWNER_MASK; + /* Return -1 if overflow detected. */ + if (owner_bits != new_bits) + return -1; + + comp->wq_num = cqe->cqe_info.wq_num; + comp->is_sq = cqe->cqe_info.is_sq; + memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE); + + return 1; +} + +int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe) +{ + int cqe_idx; + int ret; + + for (cqe_idx = 0; cqe_idx < num_cqe; cqe_idx++) { + ret = mana_gd_read_cqe(cq, &comp[cqe_idx]); + + if (ret < 0) { + cq->head -= cqe_idx; + return ret; + } + + if (ret == 0) + break; + + cq->head++; + } + + return cqe_idx; +} + +static irqreturn_t mana_gd_intr(int irq, void *arg) +{ + struct gdma_irq_context *gic = arg; + + if (gic->handler) + gic->handler(gic->arg); + + return IRQ_HANDLED; +} + +int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r) +{ + r->map = bitmap_zalloc(res_avail, GFP_KERNEL); + if (!r->map) + return -ENOMEM; + + r->size = res_avail; + spin_lock_init(&r->lock); + + return 0; +} + +void mana_gd_free_res_map(struct gdma_resource *r) +{ + bitmap_free(r->map); + r->map = NULL; + r->size = 0; +} + +static int mana_gd_setup_irqs(struct pci_dev *pdev) +{ + unsigned int max_queues_per_port = num_online_cpus(); + struct gdma_context *gc = pci_get_drvdata(pdev); + struct gdma_irq_context *gic; + unsigned int max_irqs; + int nvec, irq; + int err, i, j; + + if (max_queues_per_port > MANA_MAX_NUM_QUEUES) + max_queues_per_port = MANA_MAX_NUM_QUEUES; + + max_irqs = max_queues_per_port * MAX_PORTS_IN_MANA_DEV; + + /* Need 1 interrupt for the Hardware communication Channel (HWC) */ + max_irqs++; + + nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX); + if (nvec < 0) + return nvec; + + gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context), + GFP_KERNEL); + if (!gc->irq_contexts) { + err = -ENOMEM; + goto free_irq_vector; + } + + for (i = 0; i < nvec; i++) { + gic = &gc->irq_contexts[i]; + gic->handler = NULL; + gic->arg = NULL; + + irq = pci_irq_vector(pdev, i); + if (irq < 0) { + err = irq; + goto free_irq; + } + + err = request_irq(irq, mana_gd_intr, 0, "mana_intr", gic); + if (err) + goto free_irq; + } + + err = mana_gd_alloc_res_map(nvec, &gc->msix_resource); + if (err) + goto free_irq; + + gc->max_num_msix = nvec; + gc->num_msix_usable = nvec; + + return 0; + +free_irq: + for (j = i - 1; j >= 0; j--) { + irq = pci_irq_vector(pdev, j); + gic = &gc->irq_contexts[j]; + free_irq(irq, gic); + } + + kfree(gc->irq_contexts); + gc->irq_contexts = NULL; +free_irq_vector: + pci_free_irq_vectors(pdev); + return err; +} + +static void mana_gd_remove_irqs(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + struct gdma_irq_context *gic; + int irq, i; + + if (gc->max_num_msix < 1) + return; + + mana_gd_free_res_map(&gc->msix_resource); + + for (i = 0; i < gc->max_num_msix; i++) { + irq = pci_irq_vector(pdev, i); + if (irq < 0) + continue; + + gic = &gc->irq_contexts[i]; + free_irq(irq, gic); + } + + pci_free_irq_vectors(pdev); + + gc->max_num_msix = 0; + gc->num_msix_usable = 0; + kfree(gc->irq_contexts); + gc->irq_contexts = NULL; +} + +static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct gdma_context *gc; + void __iomem *bar0_va; + int bar = 0; + int err; + + err = pci_enable_device(pdev); + if (err) + return -ENXIO; + + pci_set_master(pdev); + + err = pci_request_regions(pdev, "mana"); + if (err) + goto disable_dev; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) + goto release_region; + + err = -ENOMEM; + gc = vzalloc(sizeof(*gc)); + if (!gc) + goto release_region; + + bar0_va = pci_iomap(pdev, bar, 0); + if (!bar0_va) + goto free_gc; + + gc->bar0_va = bar0_va; + gc->dev = &pdev->dev; + + pci_set_drvdata(pdev, gc); + + mana_gd_init_registers(pdev); + + mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base); + + err = mana_gd_setup_irqs(pdev); + if (err) + goto unmap_bar; + + mutex_init(&gc->eq_test_event_mutex); + + err = mana_hwc_create_channel(gc); + if (err) + goto remove_irq; + + err = mana_gd_verify_vf_version(pdev); + if (err) + goto remove_irq; + + err = mana_gd_query_max_resources(pdev); + if (err) + goto remove_irq; + + err = mana_gd_detect_devices(pdev); + if (err) + goto remove_irq; + + err = mana_probe(&gc->mana); + if (err) + goto clean_up_gdma; + + return 0; + +clean_up_gdma: + mana_hwc_destroy_channel(gc); + vfree(gc->cq_table); + gc->cq_table = NULL; +remove_irq: + mana_gd_remove_irqs(pdev); +unmap_bar: + pci_iounmap(pdev, bar0_va); +free_gc: + vfree(gc); +release_region: + pci_release_regions(pdev); +disable_dev: + pci_clear_master(pdev); + pci_disable_device(pdev); + dev_err(&pdev->dev, "gdma probe failed: err = %d\n", err); + return err; +} + +static void mana_gd_remove(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + + mana_remove(&gc->mana); + + mana_hwc_destroy_channel(gc); + vfree(gc->cq_table); + gc->cq_table = NULL; + + mana_gd_remove_irqs(pdev); + + pci_iounmap(pdev, gc->bar0_va); + + vfree(gc); + + pci_release_regions(pdev); + pci_clear_master(pdev); + pci_disable_device(pdev); +} + +#ifndef PCI_VENDOR_ID_MICROSOFT +#define PCI_VENDOR_ID_MICROSOFT 0x1414 +#endif + +static const struct pci_device_id mana_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, 0x00BA) }, + { } +}; + +static struct pci_driver mana_driver = { + .name = "mana", + .id_table = mana_id_table, + .probe = mana_gd_probe, + .remove = mana_gd_remove, +}; + +module_pci_driver(mana_driver); + +MODULE_DEVICE_TABLE(pci, mana_id_table); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Microsoft Azure Network Adapter driver"); diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c new file mode 100644 index 000000000000..1a923fd99990 --- /dev/null +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -0,0 +1,843 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright (c) 2021, Microsoft Corporation. */ + +#include "gdma.h" +#include "hw_channel.h" + +static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id) +{ + struct gdma_resource *r = &hwc->inflight_msg_res; + unsigned long flags; + u32 index; + + down(&hwc->sema); + + spin_lock_irqsave(&r->lock, flags); + + index = find_first_zero_bit(hwc->inflight_msg_res.map, + hwc->inflight_msg_res.size); + + bitmap_set(hwc->inflight_msg_res.map, index, 1); + + spin_unlock_irqrestore(&r->lock, flags); + + *msg_id = index; + + return 0; +} + +static void mana_hwc_put_msg_index(struct hw_channel_context *hwc, u16 msg_id) +{ + struct gdma_resource *r = &hwc->inflight_msg_res; + unsigned long flags; + + spin_lock_irqsave(&r->lock, flags); + bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1); + spin_unlock_irqrestore(&r->lock, flags); + + up(&hwc->sema); +} + +static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx, + const struct gdma_resp_hdr *resp_msg, + u32 resp_len) +{ + if (resp_len < sizeof(*resp_msg)) + return -EPROTO; + + if (resp_len > caller_ctx->output_buflen) + return -EPROTO; + + return 0; +} + +static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len, + const struct gdma_resp_hdr *resp_msg) +{ + struct hwc_caller_ctx *ctx; + int err; + + if (!test_bit(resp_msg->response.hwc_msg_id, + hwc->inflight_msg_res.map)) { + dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n", + resp_msg->response.hwc_msg_id); + return; + } + + ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id; + err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len); + if (err) + goto out; + + ctx->status_code = resp_msg->status; + + memcpy(ctx->output_buf, resp_msg, resp_len); +out: + ctx->error = err; + complete(&ctx->comp_event); +} + +static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq, + struct hwc_work_request *req) +{ + struct device *dev = hwc_rxq->hwc->dev; + struct gdma_sge *sge; + int err; + + sge = &req->sge; + sge->address = (u64)req->buf_sge_addr; + sge->mem_key = hwc_rxq->msg_buf->gpa_mkey; + sge->size = req->buf_len; + + memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request)); + req->wqe_req.sgl = sge; + req->wqe_req.num_sge = 1; + req->wqe_req.client_data_unit = 0; + + err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL); + if (err) + dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err); + return err; +} + +static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self, + struct gdma_event *event) +{ + struct hw_channel_context *hwc = ctx; + struct gdma_dev *gd = hwc->gdma_dev; + union hwc_init_type_data type_data; + union hwc_init_eq_id_db eq_db; + u32 type, val; + + switch (event->type) { + case GDMA_EQE_HWC_INIT_EQ_ID_DB: + eq_db.as_uint32 = event->details[0]; + hwc->cq->gdma_eq->id = eq_db.eq_id; + gd->doorbell = eq_db.doorbell; + break; + + case GDMA_EQE_HWC_INIT_DATA: + type_data.as_uint32 = event->details[0]; + type = type_data.type; + val = type_data.value; + + switch (type) { + case HWC_INIT_DATA_CQID: + hwc->cq->gdma_cq->id = val; + break; + + case HWC_INIT_DATA_RQID: + hwc->rxq->gdma_wq->id = val; + break; + + case HWC_INIT_DATA_SQID: + hwc->txq->gdma_wq->id = val; + break; + + case HWC_INIT_DATA_QUEUE_DEPTH: + hwc->hwc_init_q_depth_max = (u16)val; + break; + + case HWC_INIT_DATA_MAX_REQUEST: + hwc->hwc_init_max_req_msg_size = val; + break; + + case HWC_INIT_DATA_MAX_RESPONSE: + hwc->hwc_init_max_resp_msg_size = val; + break; + + case HWC_INIT_DATA_MAX_NUM_CQS: + gd->gdma_context->max_num_cqs = val; + break; + + case HWC_INIT_DATA_PDID: + hwc->gdma_dev->pdid = val; + break; + + case HWC_INIT_DATA_GPA_MKEY: + hwc->rxq->msg_buf->gpa_mkey = val; + hwc->txq->msg_buf->gpa_mkey = val; + break; + } + + break; + + case GDMA_EQE_HWC_INIT_DONE: + complete(&hwc->hwc_init_eqe_comp); + break; + + default: + /* Ignore unknown events, which should never happen. */ + break; + } +} + +static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id, + const struct hwc_rx_oob *rx_oob) +{ + struct hw_channel_context *hwc = ctx; + struct hwc_wq *hwc_rxq = hwc->rxq; + struct hwc_work_request *rx_req; + struct gdma_resp_hdr *resp; + struct gdma_wqe *dma_oob; + struct gdma_queue *rq; + struct gdma_sge *sge; + u64 rq_base_addr; + u64 rx_req_idx; + u8 *wqe; + + if (WARN_ON_ONCE(hwc_rxq->gdma_wq->id != gdma_rxq_id)) + return; + + rq = hwc_rxq->gdma_wq; + wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE); + dma_oob = (struct gdma_wqe *)wqe; + + sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4); + + /* Select the RX work request for virtual address and for reposting. */ + rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle; + rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size; + + rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx]; + resp = (struct gdma_resp_hdr *)rx_req->buf_va; + + if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) { + dev_err(hwc->dev, "HWC RX: wrong msg_id=%u\n", + resp->response.hwc_msg_id); + return; + } + + mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp); + + /* Do no longer use 'resp', because the buffer is posted to the HW + * in the below mana_hwc_post_rx_wqe(). + */ + resp = NULL; + + mana_hwc_post_rx_wqe(hwc_rxq, rx_req); +} + +static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id, + const struct hwc_rx_oob *rx_oob) +{ + struct hw_channel_context *hwc = ctx; + struct hwc_wq *hwc_txq = hwc->txq; + + WARN_ON_ONCE(!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id); +} + +static int mana_hwc_create_gdma_wq(struct hw_channel_context *hwc, + enum gdma_queue_type type, u64 queue_size, + struct gdma_queue **queue) +{ + struct gdma_queue_spec spec = {}; + + if (type != GDMA_SQ && type != GDMA_RQ) + return -EINVAL; + + spec.type = type; + spec.monitor_avl_buf = false; + spec.queue_size = queue_size; + + return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue); +} + +static int mana_hwc_create_gdma_cq(struct hw_channel_context *hwc, + u64 queue_size, + void *ctx, gdma_cq_callback *cb, + struct gdma_queue *parent_eq, + struct gdma_queue **queue) +{ + struct gdma_queue_spec spec = {}; + + spec.type = GDMA_CQ; + spec.monitor_avl_buf = false; + spec.queue_size = queue_size; + spec.cq.context = ctx; + spec.cq.callback = cb; + spec.cq.parent_eq = parent_eq; + + return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue); +} + +static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc, + u64 queue_size, + void *ctx, gdma_eq_callback *cb, + struct gdma_queue **queue) +{ + struct gdma_queue_spec spec = {}; + + spec.type = GDMA_EQ; + spec.monitor_avl_buf = false; + spec.queue_size = queue_size; + spec.eq.context = ctx; + spec.eq.callback = cb; + spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ; + + return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue); +} + +static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self) +{ + struct hwc_rx_oob comp_data = {}; + struct gdma_comp *completions; + struct hwc_cq *hwc_cq = ctx; + int comp_read, i; + + WARN_ON_ONCE(hwc_cq->gdma_cq != q_self); + + completions = hwc_cq->comp_buf; + comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth); + WARN_ON_ONCE(comp_read <= 0 || comp_read > hwc_cq->queue_depth); + + for (i = 0; i < comp_read; ++i) { + comp_data = *(struct hwc_rx_oob *)completions[i].cqe_data; + + if (completions[i].is_sq) + hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx, + completions[i].wq_num, + &comp_data); + else + hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx, + completions[i].wq_num, + &comp_data); + } + + mana_gd_arm_cq(q_self); +} + +static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq) +{ + if (!hwc_cq) + return; + + kfree(hwc_cq->comp_buf); + + if (hwc_cq->gdma_cq) + mana_gd_destroy_queue(gc, hwc_cq->gdma_cq); + + if (hwc_cq->gdma_eq) + mana_gd_destroy_queue(gc, hwc_cq->gdma_eq); + + kfree(hwc_cq); +} + +static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth, + gdma_eq_callback *callback, void *ctx, + hwc_rx_event_handler_t *rx_ev_hdlr, + void *rx_ev_ctx, + hwc_tx_event_handler_t *tx_ev_hdlr, + void *tx_ev_ctx, struct hwc_cq **hwc_cq_ptr) +{ + struct gdma_queue *eq, *cq; + struct gdma_comp *comp_buf; + struct hwc_cq *hwc_cq; + u32 eq_size, cq_size; + int err; + + eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth); + if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE) + eq_size = MINIMUM_SUPPORTED_PAGE_SIZE; + + cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth); + if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE) + cq_size = MINIMUM_SUPPORTED_PAGE_SIZE; + + hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL); + if (!hwc_cq) + return -ENOMEM; + + err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq); + if (err) { + dev_err(hwc->dev, "Failed to create HWC EQ for RQ: %d\n", err); + goto out; + } + hwc_cq->gdma_eq = eq; + + err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq, mana_hwc_comp_event, + eq, &cq); + if (err) { + dev_err(hwc->dev, "Failed to create HWC CQ for RQ: %d\n", err); + goto out; + } + hwc_cq->gdma_cq = cq; + + comp_buf = kcalloc(q_depth, sizeof(struct gdma_comp), GFP_KERNEL); + if (!comp_buf) { + err = -ENOMEM; + goto out; + } + + hwc_cq->hwc = hwc; + hwc_cq->comp_buf = comp_buf; + hwc_cq->queue_depth = q_depth; + hwc_cq->rx_event_handler = rx_ev_hdlr; + hwc_cq->rx_event_ctx = rx_ev_ctx; + hwc_cq->tx_event_handler = tx_ev_hdlr; + hwc_cq->tx_event_ctx = tx_ev_ctx; + + *hwc_cq_ptr = hwc_cq; + return 0; +out: + mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq); + return err; +} + +static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth, + u32 max_msg_size, + struct hwc_dma_buf **dma_buf_ptr) +{ + struct gdma_context *gc = hwc->gdma_dev->gdma_context; + struct hwc_work_request *hwc_wr; + struct hwc_dma_buf *dma_buf; + struct gdma_mem_info *gmi; + void *virt_addr; + u32 buf_size; + u8 *base_pa; + int err; + u16 i; + + dma_buf = kzalloc(sizeof(*dma_buf) + + q_depth * sizeof(struct hwc_work_request), + GFP_KERNEL); + if (!dma_buf) + return -ENOMEM; + + dma_buf->num_reqs = q_depth; + + buf_size = PAGE_ALIGN(q_depth * max_msg_size); + + gmi = &dma_buf->mem_info; + err = mana_gd_alloc_memory(gc, buf_size, gmi); + if (err) { + dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err); + goto out; + } + + virt_addr = dma_buf->mem_info.virt_addr; + base_pa = (u8 *)dma_buf->mem_info.dma_handle; + + for (i = 0; i < q_depth; i++) { + hwc_wr = &dma_buf->reqs[i]; + + hwc_wr->buf_va = virt_addr + i * max_msg_size; + hwc_wr->buf_sge_addr = base_pa + i * max_msg_size; + + hwc_wr->buf_len = max_msg_size; + } + + *dma_buf_ptr = dma_buf; + return 0; +out: + kfree(dma_buf); + return err; +} + +static void mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc, + struct hwc_dma_buf *dma_buf) +{ + if (!dma_buf) + return; + + mana_gd_free_memory(&dma_buf->mem_info); + + kfree(dma_buf); +} + +static void mana_hwc_destroy_wq(struct hw_channel_context *hwc, + struct hwc_wq *hwc_wq) +{ + if (!hwc_wq) + return; + + mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf); + + if (hwc_wq->gdma_wq) + mana_gd_destroy_queue(hwc->gdma_dev->gdma_context, + hwc_wq->gdma_wq); + + kfree(hwc_wq); +} + +static int mana_hwc_create_wq(struct hw_channel_context *hwc, + enum gdma_queue_type q_type, u16 q_depth, + u32 max_msg_size, struct hwc_cq *hwc_cq, + struct hwc_wq **hwc_wq_ptr) +{ + struct gdma_queue *queue; + struct hwc_wq *hwc_wq; + u32 queue_size; + int err; + + WARN_ON(q_type != GDMA_SQ && q_type != GDMA_RQ); + + if (q_type == GDMA_RQ) + queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth); + else + queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth); + + if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE) + queue_size = MINIMUM_SUPPORTED_PAGE_SIZE; + + hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL); + if (!hwc_wq) + return -ENOMEM; + + err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue); + if (err) + goto out; + + err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size, + &hwc_wq->msg_buf); + if (err) + goto out; + + hwc_wq->hwc = hwc; + hwc_wq->gdma_wq = queue; + hwc_wq->queue_depth = q_depth; + hwc_wq->hwc_cq = hwc_cq; + + *hwc_wq_ptr = hwc_wq; + return 0; +out: + if (err) + mana_hwc_destroy_wq(hwc, hwc_wq); + return err; +} + +static int mana_hwc_post_tx_wqe(const struct hwc_wq *hwc_txq, + struct hwc_work_request *req, + u32 dest_virt_rq_id, u32 dest_virt_rcq_id, + bool dest_pf) +{ + struct device *dev = hwc_txq->hwc->dev; + struct hwc_tx_oob *tx_oob; + struct gdma_sge *sge; + int err; + + if (req->msg_size == 0 || req->msg_size > req->buf_len) { + dev_err(dev, "wrong msg_size: %u, buf_len: %u\n", + req->msg_size, req->buf_len); + return -EINVAL; + } + + tx_oob = &req->tx_oob; + + tx_oob->vrq_id = dest_virt_rq_id; + tx_oob->dest_vfid = 0; + tx_oob->vrcq_id = dest_virt_rcq_id; + tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id; + tx_oob->loopback = false; + tx_oob->lso_override = false; + tx_oob->dest_pf = dest_pf; + tx_oob->vsq_id = hwc_txq->gdma_wq->id; + + sge = &req->sge; + sge->address = (u64)req->buf_sge_addr; + sge->mem_key = hwc_txq->msg_buf->gpa_mkey; + sge->size = req->msg_size; + + memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request)); + req->wqe_req.sgl = sge; + req->wqe_req.num_sge = 1; + req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob); + req->wqe_req.inline_oob_data = tx_oob; + req->wqe_req.client_data_unit = 0; + + err = mana_gd_post_and_ring(hwc_txq->gdma_wq, &req->wqe_req, NULL); + if (err) + dev_err(dev, "Failed to post WQE on HWC SQ: %d\n", err); + return err; +} + +static int mana_hwc_init_inflight_msg(struct hw_channel_context *hwc, + u16 num_msg) +{ + int err; + + sema_init(&hwc->sema, num_msg); + + err = mana_gd_alloc_res_map(num_msg, &hwc->inflight_msg_res); + if (err) + dev_err(hwc->dev, "Failed to init inflight_msg_res: %d\n", err); + return err; +} + +static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth, + u32 max_req_msg_size, u32 max_resp_msg_size) +{ + struct gdma_context *gc = hwc->gdma_dev->gdma_context; + struct hwc_wq *hwc_rxq = hwc->rxq; + struct hwc_work_request *req; + struct hwc_caller_ctx *ctx; + int err; + int i; + + /* Post all WQEs on the RQ */ + for (i = 0; i < q_depth; i++) { + req = &hwc_rxq->msg_buf->reqs[i]; + err = mana_hwc_post_rx_wqe(hwc_rxq, req); + if (err) + return err; + } + + ctx = kzalloc(q_depth * sizeof(struct hwc_caller_ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + for (i = 0; i < q_depth; ++i) + init_completion(&ctx[i].comp_event); + + hwc->caller_ctx = ctx; + + return mana_gd_test_eq(gc, hwc->cq->gdma_eq); +} + +static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth, + u32 *max_req_msg_size, + u32 *max_resp_msg_size) +{ + struct hw_channel_context *hwc = gc->hwc.driver_data; + struct gdma_queue *rq = hwc->rxq->gdma_wq; + struct gdma_queue *sq = hwc->txq->gdma_wq; + struct gdma_queue *eq = hwc->cq->gdma_eq; + struct gdma_queue *cq = hwc->cq->gdma_cq; + int err; + + init_completion(&hwc->hwc_init_eqe_comp); + + err = mana_smc_setup_hwc(&gc->shm_channel, false, + eq->mem_info.dma_handle, + cq->mem_info.dma_handle, + rq->mem_info.dma_handle, + sq->mem_info.dma_handle, + eq->eq.msix_index); + if (err) + return err; + + if (!wait_for_completion_timeout(&hwc->hwc_init_eqe_comp, 60 * HZ)) + return -ETIMEDOUT; + + *q_depth = hwc->hwc_init_q_depth_max; + *max_req_msg_size = hwc->hwc_init_max_req_msg_size; + *max_resp_msg_size = hwc->hwc_init_max_resp_msg_size; + + if (WARN_ON(cq->id >= gc->max_num_cqs)) + return -EPROTO; + + gc->cq_table = vzalloc(gc->max_num_cqs * sizeof(struct gdma_queue *)); + if (!gc->cq_table) + return -ENOMEM; + + gc->cq_table[cq->id] = cq; + + return 0; +} + +static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth, + u32 max_req_msg_size, u32 max_resp_msg_size) +{ + struct hwc_wq *hwc_rxq = NULL; + struct hwc_wq *hwc_txq = NULL; + struct hwc_cq *hwc_cq = NULL; + int err; + + err = mana_hwc_init_inflight_msg(hwc, q_depth); + if (err) + return err; + + /* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ + * queue depth and RQ queue depth. + */ + err = mana_hwc_create_cq(hwc, q_depth * 2, + mana_hwc_init_event_handler, hwc, + mana_hwc_rx_event_handler, hwc, + mana_hwc_tx_event_handler, hwc, &hwc_cq); + if (err) { + dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err); + goto out; + } + hwc->cq = hwc_cq; + + err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size, + hwc_cq, &hwc_rxq); + if (err) { + dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err); + goto out; + } + hwc->rxq = hwc_rxq; + + err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size, + hwc_cq, &hwc_txq); + if (err) { + dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err); + goto out; + } + hwc->txq = hwc_txq; + + hwc->num_inflight_msg = q_depth; + hwc->max_req_msg_size = max_req_msg_size; + + return 0; +out: + if (hwc_txq) + mana_hwc_destroy_wq(hwc, hwc_txq); + + if (hwc_rxq) + mana_hwc_destroy_wq(hwc, hwc_rxq); + + if (hwc_cq) + mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq); + + mana_gd_free_res_map(&hwc->inflight_msg_res); + return err; +} + +int mana_hwc_create_channel(struct gdma_context *gc) +{ + u32 max_req_msg_size, max_resp_msg_size; + struct gdma_dev *gd = &gc->hwc; + struct hw_channel_context *hwc; + u16 q_depth_max; + int err; + + hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); + if (!hwc) + return -ENOMEM; + + gd->gdma_context = gc; + gd->driver_data = hwc; + hwc->gdma_dev = gd; + hwc->dev = gc->dev; + + /* HWC's instance number is always 0. */ + gd->dev_id.as_uint32 = 0; + gd->dev_id.type = GDMA_DEVICE_HWC; + + gd->pdid = INVALID_PDID; + gd->doorbell = INVALID_DOORBELL; + + err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH, + HW_CHANNEL_MAX_REQUEST_SIZE, + HW_CHANNEL_MAX_RESPONSE_SIZE); + if (err) { + dev_err(hwc->dev, "Failed to initialize HWC: %d\n", err); + goto out; + } + + err = mana_hwc_establish_channel(gc, &q_depth_max, &max_req_msg_size, + &max_resp_msg_size); + if (err) { + dev_err(hwc->dev, "Failed to establish HWC: %d\n", err); + goto out; + } + + err = mana_hwc_test_channel(gc->hwc.driver_data, + HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH, + max_req_msg_size, max_resp_msg_size); + if (err) { + dev_err(hwc->dev, "Failed to test HWC: %d\n", err); + goto out; + } + + return 0; +out: + kfree(hwc); + return err; +} + +void mana_hwc_destroy_channel(struct gdma_context *gc) +{ + struct hw_channel_context *hwc = gc->hwc.driver_data; + struct hwc_caller_ctx *ctx; + + mana_smc_teardown_hwc(&gc->shm_channel, false); + + ctx = hwc->caller_ctx; + kfree(ctx); + hwc->caller_ctx = NULL; + + mana_hwc_destroy_wq(hwc, hwc->txq); + hwc->txq = NULL; + + mana_hwc_destroy_wq(hwc, hwc->rxq); + hwc->rxq = NULL; + + mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq); + hwc->cq = NULL; + + mana_gd_free_res_map(&hwc->inflight_msg_res); + + hwc->num_inflight_msg = 0; + + if (hwc->gdma_dev->pdid != INVALID_PDID) { + hwc->gdma_dev->doorbell = INVALID_DOORBELL; + hwc->gdma_dev->pdid = INVALID_PDID; + } + + kfree(hwc); + gc->hwc.driver_data = NULL; + gc->hwc.gdma_context = NULL; +} + +int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len, + const void *req, u32 resp_len, void *resp) +{ + struct hwc_work_request *tx_wr; + struct hwc_wq *txq = hwc->txq; + struct gdma_req_hdr *req_msg; + struct hwc_caller_ctx *ctx; + u16 msg_id; + int err; + + mana_hwc_get_msg_index(hwc, &msg_id); + + tx_wr = &txq->msg_buf->reqs[msg_id]; + + if (req_len > tx_wr->buf_len) { + dev_err(hwc->dev, "HWC: req msg size: %d > %d\n", req_len, + tx_wr->buf_len); + err = -EINVAL; + goto out; + } + + ctx = hwc->caller_ctx + msg_id; + ctx->output_buf = resp; + ctx->output_buflen = resp_len; + + req_msg = (struct gdma_req_hdr *)tx_wr->buf_va; + if (req) + memcpy(req_msg, req, req_len); + + req_msg->req.hwc_msg_id = msg_id; + + tx_wr->msg_size = req_len; + + err = mana_hwc_post_tx_wqe(txq, tx_wr, 0, 0, false); + if (err) { + dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err); + goto out; + } + + if (!wait_for_completion_timeout(&ctx->comp_event, 30 * HZ)) { + dev_err(hwc->dev, "HWC: Request timed out!\n"); + err = -ETIMEDOUT; + goto out; + } + + if (ctx->error) { + err = ctx->error; + goto out; + } + + if (ctx->status_code) { + dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n", + ctx->status_code); + err = -EPROTO; + goto out; + } +out: + mana_hwc_put_msg_index(hwc, msg_id); + return err; +} diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.h b/drivers/net/ethernet/microsoft/mana/hw_channel.h new file mode 100644 index 000000000000..31c6e83c454a --- /dev/null +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.h @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright (c) 2021, Microsoft Corporation. */ + +#ifndef _HW_CHANNEL_H +#define _HW_CHANNEL_H + +#define DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ 4 + +#define HW_CHANNEL_MAX_REQUEST_SIZE 0x1000 +#define HW_CHANNEL_MAX_RESPONSE_SIZE 0x1000 + +#define HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH 1 + +#define HWC_INIT_DATA_CQID 1 +#define HWC_INIT_DATA_RQID 2 +#define HWC_INIT_DATA_SQID 3 +#define HWC_INIT_DATA_QUEUE_DEPTH 4 +#define HWC_INIT_DATA_MAX_REQUEST 5 +#define HWC_INIT_DATA_MAX_RESPONSE 6 +#define HWC_INIT_DATA_MAX_NUM_CQS 7 +#define HWC_INIT_DATA_PDID 8 +#define HWC_INIT_DATA_GPA_MKEY 9 + +/* Structures labeled with "HW DATA" are exchanged with the hardware. All of + * them are naturally aligned and hence don't need __packed. + */ + +union hwc_init_eq_id_db { + u32 as_uint32; + + struct { + u32 eq_id : 16; + u32 doorbell : 16; + }; +}; /* HW DATA */ + +union hwc_init_type_data { + u32 as_uint32; + + struct { + u32 value : 24; + u32 type : 8; + }; +}; /* HW DATA */ + +struct hwc_rx_oob { + u32 type : 6; + u32 eom : 1; + u32 som : 1; + u32 vendor_err : 8; + u32 reserved1 : 16; + + u32 src_virt_wq : 24; + u32 src_vfid : 8; + + u32 reserved2; + + union { + u32 wqe_addr_low; + u32 wqe_offset; + }; + + u32 wqe_addr_high; + + u32 client_data_unit : 14; + u32 reserved3 : 18; + + u32 tx_oob_data_size; + + u32 chunk_offset : 21; + u32 reserved4 : 11; +}; /* HW DATA */ + +struct hwc_tx_oob { + u32 reserved1; + + u32 reserved2; + + u32 vrq_id : 24; + u32 dest_vfid : 8; + + u32 vrcq_id : 24; + u32 reserved3 : 8; + + u32 vscq_id : 24; + u32 loopback : 1; + u32 lso_override: 1; + u32 dest_pf : 1; + u32 reserved4 : 5; + + u32 vsq_id : 24; + u32 reserved5 : 8; +}; /* HW DATA */ + +struct hwc_work_request { + void *buf_va; + void *buf_sge_addr; + u32 buf_len; + u32 msg_size; + + struct gdma_wqe_request wqe_req; + struct hwc_tx_oob tx_oob; + + struct gdma_sge sge; +}; + +/* hwc_dma_buf represents the array of in-flight WQEs. + * mem_info as know as the GDMA mapped memory is partitioned and used by + * in-flight WQEs. + * The number of WQEs is determined by the number of in-flight messages. + */ +struct hwc_dma_buf { + struct gdma_mem_info mem_info; + + u32 gpa_mkey; + + u32 num_reqs; + struct hwc_work_request reqs[]; +}; + +typedef void hwc_rx_event_handler_t(void *ctx, u32 gdma_rxq_id, + const struct hwc_rx_oob *rx_oob); + +typedef void hwc_tx_event_handler_t(void *ctx, u32 gdma_txq_id, + const struct hwc_rx_oob *rx_oob); + +struct hwc_cq { + struct hw_channel_context *hwc; + + struct gdma_queue *gdma_cq; + struct gdma_queue *gdma_eq; + struct gdma_comp *comp_buf; + u16 queue_depth; + + hwc_rx_event_handler_t *rx_event_handler; + void *rx_event_ctx; + + hwc_tx_event_handler_t *tx_event_handler; + void *tx_event_ctx; +}; + +struct hwc_wq { + struct hw_channel_context *hwc; + + struct gdma_queue *gdma_wq; + struct hwc_dma_buf *msg_buf; + u16 queue_depth; + + struct hwc_cq *hwc_cq; +}; + +struct hwc_caller_ctx { + struct completion comp_event; + void *output_buf; + u32 output_buflen; + + u32 error; /* Linux error code */ + u32 status_code; +}; + +struct hw_channel_context { + struct gdma_dev *gdma_dev; + struct device *dev; + + u16 num_inflight_msg; + u32 max_req_msg_size; + + u16 hwc_init_q_depth_max; + u32 hwc_init_max_req_msg_size; + u32 hwc_init_max_resp_msg_size; + + struct completion hwc_init_eqe_comp; + + struct hwc_wq *rxq; + struct hwc_wq *txq; + struct hwc_cq *cq; + + struct semaphore sema; + struct gdma_resource inflight_msg_res; + + struct hwc_caller_ctx *caller_ctx; +}; + +int mana_hwc_create_channel(struct gdma_context *gc); +void mana_hwc_destroy_channel(struct gdma_context *gc); + +int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len, + const void *req, u32 resp_len, void *resp); + +#endif /* _HW_CHANNEL_H */ diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h new file mode 100644 index 000000000000..a2c3f826f022 --- /dev/null +++ b/drivers/net/ethernet/microsoft/mana/mana.h @@ -0,0 +1,533 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright (c) 2021, Microsoft Corporation. */ + +#ifndef _MANA_H +#define _MANA_H + +#include "gdma.h" +#include "hw_channel.h" + +/* Microsoft Azure Network Adapter (MANA)'s definitions + * + * Structures labeled with "HW DATA" are exchanged with the hardware. All of + * them are naturally aligned and hence don't need __packed. + */ + +/* MANA protocol version */ +#define MANA_MAJOR_VERSION 0 +#define MANA_MINOR_VERSION 1 +#define MANA_MICRO_VERSION 1 + +typedef u64 mana_handle_t; +#define INVALID_MANA_HANDLE ((mana_handle_t)-1) + +enum TRI_STATE { + TRI_STATE_UNKNOWN = -1, + TRI_STATE_FALSE = 0, + TRI_STATE_TRUE = 1 +}; + +/* Number of entries for hardware indirection table must be in power of 2 */ +#define MANA_INDIRECT_TABLE_SIZE 64 +#define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1) + +/* The Toeplitz hash key's length in bytes: should be multiple of 8 */ +#define MANA_HASH_KEY_SIZE 40 + +#define COMP_ENTRY_SIZE 64 + +#define ADAPTER_MTU_SIZE 1500 +#define MAX_FRAME_SIZE (ADAPTER_MTU_SIZE + 14) + +#define RX_BUFFERS_PER_QUEUE 512 + +#define MAX_SEND_BUFFERS_PER_QUEUE 256 + +#define EQ_SIZE (8 * PAGE_SIZE) +#define LOG2_EQ_THROTTLE 3 + +#define MAX_PORTS_IN_MANA_DEV 16 + +struct mana_stats { + u64 packets; + u64 bytes; + struct u64_stats_sync syncp; +}; + +struct mana_txq { + struct gdma_queue *gdma_sq; + + union { + u32 gdma_txq_id; + struct { + u32 reserved1 : 10; + u32 vsq_frame : 14; + u32 reserved2 : 8; + }; + }; + + u16 vp_offset; + + struct net_device *ndev; + + /* The SKBs are sent to the HW and we are waiting for the CQEs. */ + struct sk_buff_head pending_skbs; + struct netdev_queue *net_txq; + + atomic_t pending_sends; + + struct mana_stats stats; +}; + +/* skb data and frags dma mappings */ +struct mana_skb_head { + dma_addr_t dma_handle[MAX_SKB_FRAGS + 1]; + + u32 size[MAX_SKB_FRAGS + 1]; +}; + +#define MANA_HEADROOM sizeof(struct mana_skb_head) + +enum mana_tx_pkt_format { + MANA_SHORT_PKT_FMT = 0, + MANA_LONG_PKT_FMT = 1, +}; + +struct mana_tx_short_oob { + u32 pkt_fmt : 2; + u32 is_outer_ipv4 : 1; + u32 is_outer_ipv6 : 1; + u32 comp_iphdr_csum : 1; + u32 comp_tcp_csum : 1; + u32 comp_udp_csum : 1; + u32 supress_txcqe_gen : 1; + u32 vcq_num : 24; + + u32 trans_off : 10; /* Transport header offset */ + u32 vsq_frame : 14; + u32 short_vp_offset : 8; +}; /* HW DATA */ + +struct mana_tx_long_oob { + u32 is_encap : 1; + u32 inner_is_ipv6 : 1; + u32 inner_tcp_opt : 1; + u32 inject_vlan_pri_tag : 1; + u32 reserved1 : 12; + u32 pcp : 3; /* 802.1Q */ + u32 dei : 1; /* 802.1Q */ + u32 vlan_id : 12; /* 802.1Q */ + + u32 inner_frame_offset : 10; + u32 inner_ip_rel_offset : 6; + u32 long_vp_offset : 12; + u32 reserved2 : 4; + + u32 reserved3; + u32 reserved4; +}; /* HW DATA */ + +struct mana_tx_oob { + struct mana_tx_short_oob s_oob; + struct mana_tx_long_oob l_oob; +}; /* HW DATA */ + +enum mana_cq_type { + MANA_CQ_TYPE_RX, + MANA_CQ_TYPE_TX, +}; + +enum mana_cqe_type { + CQE_INVALID = 0, + CQE_RX_OKAY = 1, + CQE_RX_COALESCED_4 = 2, + CQE_RX_OBJECT_FENCE = 3, + CQE_RX_TRUNCATED = 4, + + CQE_TX_OKAY = 32, + CQE_TX_SA_DROP = 33, + CQE_TX_MTU_DROP = 34, + CQE_TX_INVALID_OOB = 35, + CQE_TX_INVALID_ETH_TYPE = 36, + CQE_TX_HDR_PROCESSING_ERROR = 37, + CQE_TX_VF_DISABLED = 38, + CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39, + CQE_TX_VPORT_DISABLED = 40, + CQE_TX_VLAN_TAGGING_VIOLATION = 41, +}; + +#define MANA_CQE_COMPLETION 1 + +struct mana_cqe_header { + u32 cqe_type : 6; + u32 client_type : 2; + u32 vendor_err : 24; +}; /* HW DATA */ + +/* NDIS HASH Types */ +#define NDIS_HASH_IPV4 BIT(0) +#define NDIS_HASH_TCP_IPV4 BIT(1) +#define NDIS_HASH_UDP_IPV4 BIT(2) +#define NDIS_HASH_IPV6 BIT(3) +#define NDIS_HASH_TCP_IPV6 BIT(4) +#define NDIS_HASH_UDP_IPV6 BIT(5) +#define NDIS_HASH_IPV6_EX BIT(6) +#define NDIS_HASH_TCP_IPV6_EX BIT(7) +#define NDIS_HASH_UDP_IPV6_EX BIT(8) + +#define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX) +#define MANA_HASH_L4 \ + (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \ + NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX) + +struct mana_rxcomp_perpkt_info { + u32 pkt_len : 16; + u32 reserved1 : 16; + u32 reserved2; + u32 pkt_hash; +}; /* HW DATA */ + +#define MANA_RXCOMP_OOB_NUM_PPI 4 + +/* Receive completion OOB */ +struct mana_rxcomp_oob { + struct mana_cqe_header cqe_hdr; + + u32 rx_vlan_id : 12; + u32 rx_vlantag_present : 1; + u32 rx_outer_iphdr_csum_succeed : 1; + u32 rx_outer_iphdr_csum_fail : 1; + u32 reserved1 : 1; + u32 rx_hashtype : 9; + u32 rx_iphdr_csum_succeed : 1; + u32 rx_iphdr_csum_fail : 1; + u32 rx_tcp_csum_succeed : 1; + u32 rx_tcp_csum_fail : 1; + u32 rx_udp_csum_succeed : 1; + u32 rx_udp_csum_fail : 1; + u32 reserved2 : 1; + + struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI]; + + u32 rx_wqe_offset; +}; /* HW DATA */ + +struct mana_tx_comp_oob { + struct mana_cqe_header cqe_hdr; + + u32 tx_data_offset; + + u32 tx_sgl_offset : 5; + u32 tx_wqe_offset : 27; + + u32 reserved[12]; +}; /* HW DATA */ + +struct mana_rxq; + +struct mana_cq { + struct gdma_queue *gdma_cq; + + /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */ + u32 gdma_id; + + /* Type of the CQ: TX or RX */ + enum mana_cq_type type; + + /* Pointer to the mana_rxq that is pushing RX CQEs to the queue. + * Only and must be non-NULL if type is MANA_CQ_TYPE_RX. + */ + struct mana_rxq *rxq; + + /* Pointer to the mana_txq that is pushing TX CQEs to the queue. + * Only and must be non-NULL if type is MANA_CQ_TYPE_TX. + */ + struct mana_txq *txq; + + /* Pointer to a buffer which the CQ handler can copy the CQE's into. */ + struct gdma_comp *gdma_comp_buf; +}; + +#define GDMA_MAX_RQE_SGES 15 + +struct mana_recv_buf_oob { + /* A valid GDMA work request representing the data buffer. */ + struct gdma_wqe_request wqe_req; + + void *buf_va; + dma_addr_t buf_dma_addr; + + /* SGL of the buffer going to be sent has part of the work request. */ + u32 num_sge; + struct gdma_sge sgl[GDMA_MAX_RQE_SGES]; + + /* Required to store the result of mana_gd_post_work_request. + * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the + * work queue when the WQE is consumed. + */ + struct gdma_posted_wqe_info wqe_inf; +}; + +struct mana_rxq { + struct gdma_queue *gdma_rq; + /* Cache the gdma receive queue id */ + u32 gdma_id; + + /* Index of RQ in the vPort, not gdma receive queue id */ + u32 rxq_idx; + + u32 datasize; + + mana_handle_t rxobj; + + struct mana_cq rx_cq; + + struct net_device *ndev; + + /* Total number of receive buffers to be allocated */ + u32 num_rx_buf; + + u32 buf_index; + + struct mana_stats stats; + + /* MUST BE THE LAST MEMBER: + * Each receive buffer has an associated mana_recv_buf_oob. + */ + struct mana_recv_buf_oob rx_oobs[]; +}; + +struct mana_tx_qp { + struct mana_txq txq; + + struct mana_cq tx_cq; + + mana_handle_t tx_object; +}; + +struct mana_ethtool_stats { + u64 stop_queue; + u64 wake_queue; +}; + +struct mana_context { + struct gdma_dev *gdma_dev; + + u16 num_ports; + + struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; +}; + +struct mana_port_context { + struct mana_context *ac; + struct net_device *ndev; + + u8 mac_addr[ETH_ALEN]; + + struct mana_eq *eqs; + + enum TRI_STATE rss_state; + + mana_handle_t default_rxobj; + bool tx_shortform_allowed; + u16 tx_vp_offset; + + struct mana_tx_qp *tx_qp; + + /* Indirection Table for RX & TX. The values are queue indexes */ + u32 indir_table[MANA_INDIRECT_TABLE_SIZE]; + + /* Indirection table containing RxObject Handles */ + mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE]; + + /* Hash key used by the NIC */ + u8 hashkey[MANA_HASH_KEY_SIZE]; + + /* This points to an array of num_queues of RQ pointers. */ + struct mana_rxq **rxqs; + + /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */ + unsigned int max_queues; + unsigned int num_queues; + + mana_handle_t port_handle; + + u16 port_idx; + + bool port_is_up; + bool port_st_save; /* Saved port state */ + + struct mana_ethtool_stats eth_stats; +}; + +int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx, + bool update_hash, bool update_tab); + +int mana_alloc_queues(struct net_device *ndev); +int mana_attach(struct net_device *ndev); +int mana_detach(struct net_device *ndev, bool from_close); + +int mana_probe(struct gdma_dev *gd); +void mana_remove(struct gdma_dev *gd); + +extern const struct ethtool_ops mana_ethtool_ops; + +struct mana_obj_spec { + u32 queue_index; + u64 gdma_region; + u32 queue_size; + u32 attached_eq; + u32 modr_ctx_id; +}; + +enum mana_command_code { + MANA_QUERY_DEV_CONFIG = 0x20001, + MANA_QUERY_GF_STAT = 0x20002, + MANA_CONFIG_VPORT_TX = 0x20003, + MANA_CREATE_WQ_OBJ = 0x20004, + MANA_DESTROY_WQ_OBJ = 0x20005, + MANA_FENCE_RQ = 0x20006, + MANA_CONFIG_VPORT_RX = 0x20007, + MANA_QUERY_VPORT_CONFIG = 0x20008, +}; + +/* Query Device Configuration */ +struct mana_query_device_cfg_req { + struct gdma_req_hdr hdr; + + /* Driver Capability flags */ + u64 drv_cap_flags1; + u64 drv_cap_flags2; + u64 drv_cap_flags3; + u64 drv_cap_flags4; + + u32 proto_major_ver; + u32 proto_minor_ver; + u32 proto_micro_ver; + + u32 reserved; +}; /* HW DATA */ + +struct mana_query_device_cfg_resp { + struct gdma_resp_hdr hdr; + + u64 pf_cap_flags1; + u64 pf_cap_flags2; + u64 pf_cap_flags3; + u64 pf_cap_flags4; + + u16 max_num_vports; + u16 reserved; + u32 max_num_eqs; +}; /* HW DATA */ + +/* Query vPort Configuration */ +struct mana_query_vport_cfg_req { + struct gdma_req_hdr hdr; + u32 vport_index; +}; /* HW DATA */ + +struct mana_query_vport_cfg_resp { + struct gdma_resp_hdr hdr; + u32 max_num_sq; + u32 max_num_rq; + u32 num_indirection_ent; + u32 reserved1; + u8 mac_addr[6]; + u8 reserved2[2]; + mana_handle_t vport; +}; /* HW DATA */ + +/* Configure vPort */ +struct mana_config_vport_req { + struct gdma_req_hdr hdr; + mana_handle_t vport; + u32 pdid; + u32 doorbell_pageid; +}; /* HW DATA */ + +struct mana_config_vport_resp { + struct gdma_resp_hdr hdr; + u16 tx_vport_offset; + u8 short_form_allowed; + u8 reserved; +}; /* HW DATA */ + +/* Create WQ Object */ +struct mana_create_wqobj_req { + struct gdma_req_hdr hdr; + mana_handle_t vport; + u32 wq_type; + u32 reserved; + u64 wq_gdma_region; + u64 cq_gdma_region; + u32 wq_size; + u32 cq_size; + u32 cq_moderation_ctx_id; + u32 cq_parent_qid; +}; /* HW DATA */ + +struct mana_create_wqobj_resp { + struct gdma_resp_hdr hdr; + u32 wq_id; + u32 cq_id; + mana_handle_t wq_obj; +}; /* HW DATA */ + +/* Destroy WQ Object */ +struct mana_destroy_wqobj_req { + struct gdma_req_hdr hdr; + u32 wq_type; + u32 reserved; + mana_handle_t wq_obj_handle; +}; /* HW DATA */ + +struct mana_destroy_wqobj_resp { + struct gdma_resp_hdr hdr; +}; /* HW DATA */ + +/* Fence RQ */ +struct mana_fence_rq_req { + struct gdma_req_hdr hdr; + mana_handle_t wq_obj_handle; +}; /* HW DATA */ + +struct mana_fence_rq_resp { + struct gdma_resp_hdr hdr; +}; /* HW DATA */ + +/* Configure vPort Rx Steering */ +struct mana_cfg_rx_steer_req { + struct gdma_req_hdr hdr; + mana_handle_t vport; + u16 num_indir_entries; + u16 indir_tab_offset; + u32 rx_enable; + u32 rss_enable; + u8 update_default_rxobj; + u8 update_hashkey; + u8 update_indir_tab; + u8 reserved; + mana_handle_t default_rxobj; + u8 hashkey[MANA_HASH_KEY_SIZE]; +}; /* HW DATA */ + +struct mana_cfg_rx_steer_resp { + struct gdma_resp_hdr hdr; +}; /* HW DATA */ + +#define MANA_MAX_NUM_QUEUES 16 + +#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1) + +struct mana_tx_package { + struct gdma_wqe_request wqe_req; + struct gdma_sge sgl_array[5]; + struct gdma_sge *sgl_ptr; + + struct mana_tx_oob tx_oob; + + struct gdma_posted_wqe_info wqe_info; +}; + +#endif /* _MANA_H */ diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c new file mode 100644 index 000000000000..04d067243457 --- /dev/null +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -0,0 +1,1895 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright (c) 2021, Microsoft Corporation. */ + +#include <linux/inetdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/mm.h> + +#include <net/checksum.h> +#include <net/ip6_checksum.h> + +#include "mana.h" + +/* Microsoft Azure Network Adapter (MANA) functions */ + +static int mana_open(struct net_device *ndev) +{ + struct mana_port_context *apc = netdev_priv(ndev); + int err; + + err = mana_alloc_queues(ndev); + if (err) + return err; + + apc->port_is_up = true; + + /* Ensure port state updated before txq state */ + smp_wmb(); + + netif_carrier_on(ndev); + netif_tx_wake_all_queues(ndev); + + return 0; +} + +static int mana_close(struct net_device *ndev) +{ + struct mana_port_context *apc = netdev_priv(ndev); + + if (!apc->port_is_up) + return 0; + + return mana_detach(ndev, true); +} + +static bool mana_can_tx(struct gdma_queue *wq) +{ + return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE; +} + +static unsigned int mana_checksum_info(struct sk_buff *skb) +{ + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *ip = ip_hdr(skb); + + if (ip->protocol == IPPROTO_TCP) + return IPPROTO_TCP; + + if (ip->protocol == IPPROTO_UDP) + return IPPROTO_UDP; + } else if (skb->protocol == htons(ETH_P_IPV6)) { + struct ipv6hdr *ip6 = ipv6_hdr(skb); + + if (ip6->nexthdr == IPPROTO_TCP) + return IPPROTO_TCP; + + if (ip6->nexthdr == IPPROTO_UDP) + return IPPROTO_UDP; + } + + /* No csum offloading */ + return 0; +} + +static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc, + struct mana_tx_package *tp) +{ + struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; + struct gdma_dev *gd = apc->ac->gdma_dev; + struct gdma_context *gc; + struct device *dev; + skb_frag_t *frag; + dma_addr_t da; + int i; + + gc = gd->gdma_context; + dev = gc->dev; + da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); + + if (dma_mapping_error(dev, da)) + return -ENOMEM; + + ash->dma_handle[0] = da; + ash->size[0] = skb_headlen(skb); + + tp->wqe_req.sgl[0].address = ash->dma_handle[0]; + tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey; + tp->wqe_req.sgl[0].size = ash->size[0]; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), + DMA_TO_DEVICE); + + if (dma_mapping_error(dev, da)) + goto frag_err; + + ash->dma_handle[i + 1] = da; + ash->size[i + 1] = skb_frag_size(frag); + + tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1]; + tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey; + tp->wqe_req.sgl[i + 1].size = ash->size[i + 1]; + } + + return 0; + +frag_err: + for (i = i - 1; i >= 0; i--) + dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1], + DMA_TO_DEVICE); + + dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE); + + return -ENOMEM; +} + +static int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT; + struct mana_port_context *apc = netdev_priv(ndev); + u16 txq_idx = skb_get_queue_mapping(skb); + struct gdma_dev *gd = apc->ac->gdma_dev; + bool ipv4 = false, ipv6 = false; + struct mana_tx_package pkg = {}; + struct netdev_queue *net_txq; + struct mana_stats *tx_stats; + struct gdma_queue *gdma_sq; + unsigned int csum_type; + struct mana_txq *txq; + struct mana_cq *cq; + int err, len; + + if (unlikely(!apc->port_is_up)) + goto tx_drop; + + if (skb_cow_head(skb, MANA_HEADROOM)) + goto tx_drop_count; + + txq = &apc->tx_qp[txq_idx].txq; + gdma_sq = txq->gdma_sq; + cq = &apc->tx_qp[txq_idx].tx_cq; + + pkg.tx_oob.s_oob.vcq_num = cq->gdma_id; + pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame; + + if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) { + pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset; + pkt_fmt = MANA_LONG_PKT_FMT; + } else { + pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset; + } + + pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt; + + if (pkt_fmt == MANA_SHORT_PKT_FMT) + pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob); + else + pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob); + + pkg.wqe_req.inline_oob_data = &pkg.tx_oob; + pkg.wqe_req.flags = 0; + pkg.wqe_req.client_data_unit = 0; + + pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags; + WARN_ON_ONCE(pkg.wqe_req.num_sge > 30); + + if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) { + pkg.wqe_req.sgl = pkg.sgl_array; + } else { + pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge, + sizeof(struct gdma_sge), + GFP_ATOMIC); + if (!pkg.sgl_ptr) + goto tx_drop_count; + + pkg.wqe_req.sgl = pkg.sgl_ptr; + } + + if (skb->protocol == htons(ETH_P_IP)) + ipv4 = true; + else if (skb->protocol == htons(ETH_P_IPV6)) + ipv6 = true; + + if (skb_is_gso(skb)) { + pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4; + pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6; + + pkg.tx_oob.s_oob.comp_iphdr_csum = 1; + pkg.tx_oob.s_oob.comp_tcp_csum = 1; + pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb); + + pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size; + pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0; + if (ipv4) { + ip_hdr(skb)->tot_len = 0; + ip_hdr(skb)->check = 0; + tcp_hdr(skb)->check = + ~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, 0, + IPPROTO_TCP, 0); + } else { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, 0, + IPPROTO_TCP, 0); + } + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + csum_type = mana_checksum_info(skb); + + if (csum_type == IPPROTO_TCP) { + pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4; + pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6; + + pkg.tx_oob.s_oob.comp_tcp_csum = 1; + pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb); + + } else if (csum_type == IPPROTO_UDP) { + pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4; + pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6; + + pkg.tx_oob.s_oob.comp_udp_csum = 1; + } else { + /* Can't do offload of this type of checksum */ + if (skb_checksum_help(skb)) + goto free_sgl_ptr; + } + } + + if (mana_map_skb(skb, apc, &pkg)) + goto free_sgl_ptr; + + skb_queue_tail(&txq->pending_skbs, skb); + + len = skb->len; + net_txq = netdev_get_tx_queue(ndev, txq_idx); + + err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req, + (struct gdma_posted_wqe_info *)skb->cb); + if (!mana_can_tx(gdma_sq)) { + netif_tx_stop_queue(net_txq); + apc->eth_stats.stop_queue++; + } + + if (err) { + (void)skb_dequeue_tail(&txq->pending_skbs); + netdev_warn(ndev, "Failed to post TX OOB: %d\n", err); + err = NETDEV_TX_BUSY; + goto tx_busy; + } + + err = NETDEV_TX_OK; + atomic_inc(&txq->pending_sends); + + mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq); + + /* skb may be freed after mana_gd_post_work_request. Do not use it. */ + skb = NULL; + + tx_stats = &txq->stats; + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->packets++; + tx_stats->bytes += len; + u64_stats_update_end(&tx_stats->syncp); + +tx_busy: + if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) { + netif_tx_wake_queue(net_txq); + apc->eth_stats.wake_queue++; + } + + kfree(pkg.sgl_ptr); + return err; + +free_sgl_ptr: + kfree(pkg.sgl_ptr); +tx_drop_count: + ndev->stats.tx_dropped++; +tx_drop: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static void mana_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *st) +{ + struct mana_port_context *apc = netdev_priv(ndev); + unsigned int num_queues = apc->num_queues; + struct mana_stats *stats; + unsigned int start; + u64 packets, bytes; + int q; + + if (!apc->port_is_up) + return; + + netdev_stats_to_stats64(st, &ndev->stats); + + for (q = 0; q < num_queues; q++) { + stats = &apc->rxqs[q]->stats; + + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + packets = stats->packets; + bytes = stats->bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + st->rx_packets += packets; + st->rx_bytes += bytes; + } + + for (q = 0; q < num_queues; q++) { + stats = &apc->tx_qp[q].txq.stats; + + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + packets = stats->packets; + bytes = stats->bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + st->tx_packets += packets; + st->tx_bytes += bytes; + } +} + +static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb, + int old_q) +{ + struct mana_port_context *apc = netdev_priv(ndev); + u32 hash = skb_get_hash(skb); + struct sock *sk = skb->sk; + int txq; + + txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK]; + + if (txq != old_q && sk && sk_fullsock(sk) && + rcu_access_pointer(sk->sk_dst_cache)) + sk_tx_queue_set(sk, txq); + + return txq; +} + +static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + int txq; + + if (ndev->real_num_tx_queues == 1) + return 0; + + txq = sk_tx_queue_get(skb->sk); + + if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) { + if (skb_rx_queue_recorded(skb)) + txq = skb_get_rx_queue(skb); + else + txq = mana_get_tx_queue(ndev, skb, txq); + } + + return txq; +} + +static const struct net_device_ops mana_devops = { + .ndo_open = mana_open, + .ndo_stop = mana_close, + .ndo_select_queue = mana_select_queue, + .ndo_start_xmit = mana_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_get_stats64 = mana_get_stats64, +}; + +static void mana_cleanup_port_context(struct mana_port_context *apc) +{ + kfree(apc->rxqs); + apc->rxqs = NULL; +} + +static int mana_init_port_context(struct mana_port_context *apc) +{ + apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *), + GFP_KERNEL); + + return !apc->rxqs ? -ENOMEM : 0; +} + +static int mana_send_request(struct mana_context *ac, void *in_buf, + u32 in_len, void *out_buf, u32 out_len) +{ + struct gdma_context *gc = ac->gdma_dev->gdma_context; + struct gdma_resp_hdr *resp = out_buf; + struct gdma_req_hdr *req = in_buf; + struct device *dev = gc->dev; + static atomic_t activity_id; + int err; + + req->dev_id = gc->mana.dev_id; + req->activity_id = atomic_inc_return(&activity_id); + + err = mana_gd_send_request(gc, in_len, in_buf, out_len, + out_buf); + if (err || resp->status) { + dev_err(dev, "Failed to send mana message: %d, 0x%x\n", + err, resp->status); + return err ? err : -EPROTO; + } + + if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 || + req->activity_id != resp->activity_id) { + dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n", + req->dev_id.as_uint32, resp->dev_id.as_uint32, + req->activity_id, resp->activity_id); + return -EPROTO; + } + + return 0; +} + +static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr, + const enum mana_command_code expected_code, + const u32 min_size) +{ + if (resp_hdr->response.msg_type != expected_code) + return -EPROTO; + + if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1) + return -EPROTO; + + if (resp_hdr->response.msg_size < min_size) + return -EPROTO; + + return 0; +} + +static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver, + u32 proto_minor_ver, u32 proto_micro_ver, + u16 *max_num_vports) +{ + struct gdma_context *gc = ac->gdma_dev->gdma_context; + struct mana_query_device_cfg_resp resp = {}; + struct mana_query_device_cfg_req req = {}; + struct device *dev = gc->dev; + int err = 0; + + mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG, + sizeof(req), sizeof(resp)); + req.proto_major_ver = proto_major_ver; + req.proto_minor_ver = proto_minor_ver; + req.proto_micro_ver = proto_micro_ver; + + err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp)); + if (err) { + dev_err(dev, "Failed to query config: %d", err); + return err; + } + + err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG, + sizeof(resp)); + if (err || resp.hdr.status) { + dev_err(dev, "Invalid query result: %d, 0x%x\n", err, + resp.hdr.status); + if (!err) + err = -EPROTO; + return err; + } + + *max_num_vports = resp.max_num_vports; + + return 0; +} + +static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index, + u32 *max_sq, u32 *max_rq, u32 *num_indir_entry) +{ + struct mana_query_vport_cfg_resp resp = {}; + struct mana_query_vport_cfg_req req = {}; + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG, + sizeof(req), sizeof(resp)); + + req.vport_index = vport_index; + + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + sizeof(resp)); + if (err) + return err; + + err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG, + sizeof(resp)); + if (err) + return err; + + if (resp.hdr.status) + return -EPROTO; + + *max_sq = resp.max_num_sq; + *max_rq = resp.max_num_rq; + *num_indir_entry = resp.num_indirection_ent; + + apc->port_handle = resp.vport; + ether_addr_copy(apc->mac_addr, resp.mac_addr); + + return 0; +} + +static int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, + u32 doorbell_pg_id) +{ + struct mana_config_vport_resp resp = {}; + struct mana_config_vport_req req = {}; + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX, + sizeof(req), sizeof(resp)); + req.vport = apc->port_handle; + req.pdid = protection_dom_id; + req.doorbell_pageid = doorbell_pg_id; + + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + sizeof(resp)); + if (err) { + netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err); + goto out; + } + + err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX, + sizeof(resp)); + if (err || resp.hdr.status) { + netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n", + err, resp.hdr.status); + if (!err) + err = -EPROTO; + + goto out; + } + + apc->tx_shortform_allowed = resp.short_form_allowed; + apc->tx_vp_offset = resp.tx_vport_offset; +out: + return err; +} + +static int mana_cfg_vport_steering(struct mana_port_context *apc, + enum TRI_STATE rx, + bool update_default_rxobj, bool update_key, + bool update_tab) +{ + u16 num_entries = MANA_INDIRECT_TABLE_SIZE; + struct mana_cfg_rx_steer_req *req = NULL; + struct mana_cfg_rx_steer_resp resp = {}; + struct net_device *ndev = apc->ndev; + mana_handle_t *req_indir_tab; + u32 req_buf_size; + int err; + + req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries; + req = kzalloc(req_buf_size, GFP_KERNEL); + if (!req) + return -ENOMEM; + + mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size, + sizeof(resp)); + + req->vport = apc->port_handle; + req->num_indir_entries = num_entries; + req->indir_tab_offset = sizeof(*req); + req->rx_enable = rx; + req->rss_enable = apc->rss_state; + req->update_default_rxobj = update_default_rxobj; + req->update_hashkey = update_key; + req->update_indir_tab = update_tab; + req->default_rxobj = apc->default_rxobj; + + if (update_key) + memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE); + + if (update_tab) { + req_indir_tab = (mana_handle_t *)(req + 1); + memcpy(req_indir_tab, apc->rxobj_table, + req->num_indir_entries * sizeof(mana_handle_t)); + } + + err = mana_send_request(apc->ac, req, req_buf_size, &resp, + sizeof(resp)); + if (err) { + netdev_err(ndev, "Failed to configure vPort RX: %d\n", err); + goto out; + } + + err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX, + sizeof(resp)); + if (err) { + netdev_err(ndev, "vPort RX configuration failed: %d\n", err); + goto out; + } + + if (resp.hdr.status) { + netdev_err(ndev, "vPort RX configuration failed: 0x%x\n", + resp.hdr.status); + err = -EPROTO; + } +out: + kfree(req); + return err; +} + +static int mana_create_wq_obj(struct mana_port_context *apc, + mana_handle_t vport, + u32 wq_type, struct mana_obj_spec *wq_spec, + struct mana_obj_spec *cq_spec, + mana_handle_t *wq_obj) +{ + struct mana_create_wqobj_resp resp = {}; + struct mana_create_wqobj_req req = {}; + struct net_device *ndev = apc->ndev; + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ, + sizeof(req), sizeof(resp)); + req.vport = vport; + req.wq_type = wq_type; + req.wq_gdma_region = wq_spec->gdma_region; + req.cq_gdma_region = cq_spec->gdma_region; + req.wq_size = wq_spec->queue_size; + req.cq_size = cq_spec->queue_size; + req.cq_moderation_ctx_id = cq_spec->modr_ctx_id; + req.cq_parent_qid = cq_spec->attached_eq; + + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + sizeof(resp)); + if (err) { + netdev_err(ndev, "Failed to create WQ object: %d\n", err); + goto out; + } + + err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ, + sizeof(resp)); + if (err || resp.hdr.status) { + netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err, + resp.hdr.status); + if (!err) + err = -EPROTO; + goto out; + } + + if (resp.wq_obj == INVALID_MANA_HANDLE) { + netdev_err(ndev, "Got an invalid WQ object handle\n"); + err = -EPROTO; + goto out; + } + + *wq_obj = resp.wq_obj; + wq_spec->queue_index = resp.wq_id; + cq_spec->queue_index = resp.cq_id; + + return 0; +out: + return err; +} + +static void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, + mana_handle_t wq_obj) +{ + struct mana_destroy_wqobj_resp resp = {}; + struct mana_destroy_wqobj_req req = {}; + struct net_device *ndev = apc->ndev; + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ, + sizeof(req), sizeof(resp)); + req.wq_type = wq_type; + req.wq_obj_handle = wq_obj; + + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + sizeof(resp)); + if (err) { + netdev_err(ndev, "Failed to destroy WQ object: %d\n", err); + return; + } + + err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ, + sizeof(resp)); + if (err || resp.hdr.status) + netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err, + resp.hdr.status); +} + +static void mana_init_cqe_poll_buf(struct gdma_comp *cqe_poll_buf) +{ + int i; + + for (i = 0; i < CQE_POLLING_BUFFER; i++) + memset(&cqe_poll_buf[i], 0, sizeof(struct gdma_comp)); +} + +static void mana_destroy_eq(struct gdma_context *gc, + struct mana_port_context *apc) +{ + struct gdma_queue *eq; + int i; + + if (!apc->eqs) + return; + + for (i = 0; i < apc->num_queues; i++) { + eq = apc->eqs[i].eq; + if (!eq) + continue; + + mana_gd_destroy_queue(gc, eq); + } + + kfree(apc->eqs); + apc->eqs = NULL; +} + +static int mana_create_eq(struct mana_port_context *apc) +{ + struct gdma_dev *gd = apc->ac->gdma_dev; + struct gdma_queue_spec spec = {}; + int err; + int i; + + apc->eqs = kcalloc(apc->num_queues, sizeof(struct mana_eq), + GFP_KERNEL); + if (!apc->eqs) + return -ENOMEM; + + spec.type = GDMA_EQ; + spec.monitor_avl_buf = false; + spec.queue_size = EQ_SIZE; + spec.eq.callback = NULL; + spec.eq.context = apc->eqs; + spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE; + spec.eq.ndev = apc->ndev; + + for (i = 0; i < apc->num_queues; i++) { + mana_init_cqe_poll_buf(apc->eqs[i].cqe_poll); + + err = mana_gd_create_mana_eq(gd, &spec, &apc->eqs[i].eq); + if (err) + goto out; + } + + return 0; +out: + mana_destroy_eq(gd->gdma_context, apc); + return err; +} + +static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units) +{ + u32 used_space_old; + u32 used_space_new; + + used_space_old = wq->head - wq->tail; + used_space_new = wq->head - (wq->tail + num_units); + + if (WARN_ON_ONCE(used_space_new > used_space_old)) + return -ERANGE; + + wq->tail += num_units; + return 0; +} + +static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc) +{ + struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; + struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; + struct device *dev = gc->dev; + int i; + + dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE); + + for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++) + dma_unmap_page(dev, ash->dma_handle[i], ash->size[i], + DMA_TO_DEVICE); +} + +static void mana_poll_tx_cq(struct mana_cq *cq) +{ + struct gdma_queue *gdma_eq = cq->gdma_cq->cq.parent; + struct gdma_comp *completions = cq->gdma_comp_buf; + struct gdma_posted_wqe_info *wqe_info; + unsigned int pkt_transmitted = 0; + unsigned int wqe_unit_cnt = 0; + struct mana_txq *txq = cq->txq; + struct mana_port_context *apc; + struct netdev_queue *net_txq; + struct gdma_queue *gdma_wq; + unsigned int avail_space; + struct net_device *ndev; + struct sk_buff *skb; + bool txq_stopped; + int comp_read; + int i; + + ndev = txq->ndev; + apc = netdev_priv(ndev); + + comp_read = mana_gd_poll_cq(cq->gdma_cq, completions, + CQE_POLLING_BUFFER); + + for (i = 0; i < comp_read; i++) { + struct mana_tx_comp_oob *cqe_oob; + + if (WARN_ON_ONCE(!completions[i].is_sq)) + return; + + cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data; + if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type != + MANA_CQE_COMPLETION)) + return; + + switch (cqe_oob->cqe_hdr.cqe_type) { + case CQE_TX_OKAY: + break; + + case CQE_TX_SA_DROP: + case CQE_TX_MTU_DROP: + case CQE_TX_INVALID_OOB: + case CQE_TX_INVALID_ETH_TYPE: + case CQE_TX_HDR_PROCESSING_ERROR: + case CQE_TX_VF_DISABLED: + case CQE_TX_VPORT_IDX_OUT_OF_RANGE: + case CQE_TX_VPORT_DISABLED: + case CQE_TX_VLAN_TAGGING_VIOLATION: + WARN_ONCE(1, "TX: CQE error %d: ignored.\n", + cqe_oob->cqe_hdr.cqe_type); + break; + + default: + /* If the CQE type is unexpected, log an error, assert, + * and go through the error path. + */ + WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n", + cqe_oob->cqe_hdr.cqe_type); + return; + } + + if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num)) + return; + + skb = skb_dequeue(&txq->pending_skbs); + if (WARN_ON_ONCE(!skb)) + return; + + wqe_info = (struct gdma_posted_wqe_info *)skb->cb; + wqe_unit_cnt += wqe_info->wqe_size_in_bu; + + mana_unmap_skb(skb, apc); + + napi_consume_skb(skb, gdma_eq->eq.budget); + + pkt_transmitted++; + } + + if (WARN_ON_ONCE(wqe_unit_cnt == 0)) + return; + + mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt); + + gdma_wq = txq->gdma_sq; + avail_space = mana_gd_wq_avail_space(gdma_wq); + + /* Ensure tail updated before checking q stop */ + smp_mb(); + + net_txq = txq->net_txq; + txq_stopped = netif_tx_queue_stopped(net_txq); + + /* Ensure checking txq_stopped before apc->port_is_up. */ + smp_rmb(); + + if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) { + netif_tx_wake_queue(net_txq); + apc->eth_stats.wake_queue++; + } + + if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0) + WARN_ON_ONCE(1); +} + +static void mana_post_pkt_rxq(struct mana_rxq *rxq) +{ + struct mana_recv_buf_oob *recv_buf_oob; + u32 curr_index; + int err; + + curr_index = rxq->buf_index++; + if (rxq->buf_index == rxq->num_rx_buf) + rxq->buf_index = 0; + + recv_buf_oob = &rxq->rx_oobs[curr_index]; + + err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req, + &recv_buf_oob->wqe_inf); + if (WARN_ON_ONCE(err)) + return; + + WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1); +} + +static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe, + struct mana_rxq *rxq) +{ + struct mana_stats *rx_stats = &rxq->stats; + struct net_device *ndev = rxq->ndev; + uint pkt_len = cqe->ppi[0].pkt_len; + struct mana_port_context *apc; + u16 rxq_idx = rxq->rxq_idx; + struct napi_struct *napi; + struct gdma_queue *eq; + struct sk_buff *skb; + u32 hash_value; + + apc = netdev_priv(ndev); + eq = apc->eqs[rxq_idx].eq; + eq->eq.work_done++; + napi = &eq->eq.napi; + + if (!buf_va) { + ++ndev->stats.rx_dropped; + return; + } + + skb = build_skb(buf_va, PAGE_SIZE); + + if (!skb) { + free_page((unsigned long)buf_va); + ++ndev->stats.rx_dropped; + return; + } + + skb_put(skb, pkt_len); + skb->dev = napi->dev; + + skb->protocol = eth_type_trans(skb, ndev); + skb_checksum_none_assert(skb); + skb_record_rx_queue(skb, rxq_idx); + + if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) { + if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + + if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) { + hash_value = cqe->ppi[0].pkt_hash; + + if (cqe->rx_hashtype & MANA_HASH_L4) + skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4); + else + skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3); + } + + napi_gro_receive(napi, skb); + + u64_stats_update_begin(&rx_stats->syncp); + rx_stats->packets++; + rx_stats->bytes += pkt_len; + u64_stats_update_end(&rx_stats->syncp); +} + +static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, + struct gdma_comp *cqe) +{ + struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data; + struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context; + struct net_device *ndev = rxq->ndev; + struct mana_recv_buf_oob *rxbuf_oob; + struct device *dev = gc->dev; + void *new_buf, *old_buf; + struct page *new_page; + u32 curr, pktlen; + dma_addr_t da; + + switch (oob->cqe_hdr.cqe_type) { + case CQE_RX_OKAY: + break; + + case CQE_RX_TRUNCATED: + netdev_err(ndev, "Dropped a truncated packet\n"); + return; + + case CQE_RX_COALESCED_4: + netdev_err(ndev, "RX coalescing is unsupported\n"); + return; + + case CQE_RX_OBJECT_FENCE: + netdev_err(ndev, "RX Fencing is unsupported\n"); + return; + + default: + netdev_err(ndev, "Unknown RX CQE type = %d\n", + oob->cqe_hdr.cqe_type); + return; + } + + if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY) + return; + + pktlen = oob->ppi[0].pkt_len; + + if (pktlen == 0) { + /* data packets should never have packetlength of zero */ + netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n", + rxq->gdma_id, cq->gdma_id, rxq->rxobj); + return; + } + + curr = rxq->buf_index; + rxbuf_oob = &rxq->rx_oobs[curr]; + WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1); + + new_page = alloc_page(GFP_ATOMIC); + + if (new_page) { + da = dma_map_page(dev, new_page, 0, rxq->datasize, + DMA_FROM_DEVICE); + + if (dma_mapping_error(dev, da)) { + __free_page(new_page); + new_page = NULL; + } + } + + new_buf = new_page ? page_to_virt(new_page) : NULL; + + if (new_buf) { + dma_unmap_page(dev, rxbuf_oob->buf_dma_addr, rxq->datasize, + DMA_FROM_DEVICE); + + old_buf = rxbuf_oob->buf_va; + + /* refresh the rxbuf_oob with the new page */ + rxbuf_oob->buf_va = new_buf; + rxbuf_oob->buf_dma_addr = da; + rxbuf_oob->sgl[0].address = rxbuf_oob->buf_dma_addr; + } else { + old_buf = NULL; /* drop the packet if no memory */ + } + + mana_rx_skb(old_buf, oob, rxq); + + mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu); + + mana_post_pkt_rxq(rxq); +} + +static void mana_poll_rx_cq(struct mana_cq *cq) +{ + struct gdma_comp *comp = cq->gdma_comp_buf; + int comp_read, i; + + comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER); + WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER); + + for (i = 0; i < comp_read; i++) { + if (WARN_ON_ONCE(comp[i].is_sq)) + return; + + /* verify recv cqe references the right rxq */ + if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id)) + return; + + mana_process_rx_cqe(cq->rxq, cq, &comp[i]); + } +} + +static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue) +{ + struct mana_cq *cq = context; + + WARN_ON_ONCE(cq->gdma_cq != gdma_queue); + + if (cq->type == MANA_CQ_TYPE_RX) + mana_poll_rx_cq(cq); + else + mana_poll_tx_cq(cq); + + mana_gd_arm_cq(gdma_queue); +} + +static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq) +{ + struct gdma_dev *gd = apc->ac->gdma_dev; + + if (!cq->gdma_cq) + return; + + mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq); +} + +static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq) +{ + struct gdma_dev *gd = apc->ac->gdma_dev; + + if (!txq->gdma_sq) + return; + + mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq); +} + +static void mana_destroy_txq(struct mana_port_context *apc) +{ + int i; + + if (!apc->tx_qp) + return; + + for (i = 0; i < apc->num_queues; i++) { + mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object); + + mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq); + + mana_deinit_txq(apc, &apc->tx_qp[i].txq); + } + + kfree(apc->tx_qp); + apc->tx_qp = NULL; +} + +static int mana_create_txq(struct mana_port_context *apc, + struct net_device *net) +{ + struct gdma_dev *gd = apc->ac->gdma_dev; + struct mana_obj_spec wq_spec; + struct mana_obj_spec cq_spec; + struct gdma_queue_spec spec; + struct gdma_context *gc; + struct mana_txq *txq; + struct mana_cq *cq; + u32 txq_size; + u32 cq_size; + int err; + int i; + + apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp), + GFP_KERNEL); + if (!apc->tx_qp) + return -ENOMEM; + + /* The minimum size of the WQE is 32 bytes, hence + * MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs + * the SQ can store. This value is then used to size other queues + * to prevent overflow. + */ + txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32; + BUILD_BUG_ON(!PAGE_ALIGNED(txq_size)); + + cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE; + cq_size = PAGE_ALIGN(cq_size); + + gc = gd->gdma_context; + + for (i = 0; i < apc->num_queues; i++) { + apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE; + + /* Create SQ */ + txq = &apc->tx_qp[i].txq; + + u64_stats_init(&txq->stats.syncp); + txq->ndev = net; + txq->net_txq = netdev_get_tx_queue(net, i); + txq->vp_offset = apc->tx_vp_offset; + skb_queue_head_init(&txq->pending_skbs); + + memset(&spec, 0, sizeof(spec)); + spec.type = GDMA_SQ; + spec.monitor_avl_buf = true; + spec.queue_size = txq_size; + err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq); + if (err) + goto out; + + /* Create SQ's CQ */ + cq = &apc->tx_qp[i].tx_cq; + cq->gdma_comp_buf = apc->eqs[i].cqe_poll; + cq->type = MANA_CQ_TYPE_TX; + + cq->txq = txq; + + memset(&spec, 0, sizeof(spec)); + spec.type = GDMA_CQ; + spec.monitor_avl_buf = false; + spec.queue_size = cq_size; + spec.cq.callback = mana_cq_handler; + spec.cq.parent_eq = apc->eqs[i].eq; + spec.cq.context = cq; + err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); + if (err) + goto out; + + memset(&wq_spec, 0, sizeof(wq_spec)); + memset(&cq_spec, 0, sizeof(cq_spec)); + + wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region; + wq_spec.queue_size = txq->gdma_sq->queue_size; + + cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region; + cq_spec.queue_size = cq->gdma_cq->queue_size; + cq_spec.modr_ctx_id = 0; + cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; + + err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ, + &wq_spec, &cq_spec, + &apc->tx_qp[i].tx_object); + + if (err) + goto out; + + txq->gdma_sq->id = wq_spec.queue_index; + cq->gdma_cq->id = cq_spec.queue_index; + + txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; + cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; + + txq->gdma_txq_id = txq->gdma_sq->id; + + cq->gdma_id = cq->gdma_cq->id; + + if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) + return -EINVAL; + + gc->cq_table[cq->gdma_id] = cq->gdma_cq; + + mana_gd_arm_cq(cq->gdma_cq); + } + + return 0; +out: + mana_destroy_txq(apc); + return err; +} + +static void mana_napi_sync_for_rx(struct mana_rxq *rxq) +{ + struct net_device *ndev = rxq->ndev; + struct mana_port_context *apc; + u16 rxq_idx = rxq->rxq_idx; + struct napi_struct *napi; + struct gdma_queue *eq; + + apc = netdev_priv(ndev); + eq = apc->eqs[rxq_idx].eq; + napi = &eq->eq.napi; + + napi_synchronize(napi); +} + +static void mana_destroy_rxq(struct mana_port_context *apc, + struct mana_rxq *rxq, bool validate_state) + +{ + struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; + struct mana_recv_buf_oob *rx_oob; + struct device *dev = gc->dev; + int i; + + if (!rxq) + return; + + if (validate_state) + mana_napi_sync_for_rx(rxq); + + mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj); + + mana_deinit_cq(apc, &rxq->rx_cq); + + for (i = 0; i < rxq->num_rx_buf; i++) { + rx_oob = &rxq->rx_oobs[i]; + + if (!rx_oob->buf_va) + continue; + + dma_unmap_page(dev, rx_oob->buf_dma_addr, rxq->datasize, + DMA_FROM_DEVICE); + + free_page((unsigned long)rx_oob->buf_va); + rx_oob->buf_va = NULL; + } + + if (rxq->gdma_rq) + mana_gd_destroy_queue(gc, rxq->gdma_rq); + + kfree(rxq); +} + +#define MANA_WQE_HEADER_SIZE 16 +#define MANA_WQE_SGE_SIZE 16 + +static int mana_alloc_rx_wqe(struct mana_port_context *apc, + struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size) +{ + struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; + struct mana_recv_buf_oob *rx_oob; + struct device *dev = gc->dev; + struct page *page; + dma_addr_t da; + u32 buf_idx; + + WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE); + + *rxq_size = 0; + *cq_size = 0; + + for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { + rx_oob = &rxq->rx_oobs[buf_idx]; + memset(rx_oob, 0, sizeof(*rx_oob)); + + page = alloc_page(GFP_KERNEL); + if (!page) + return -ENOMEM; + + da = dma_map_page(dev, page, 0, rxq->datasize, DMA_FROM_DEVICE); + + if (dma_mapping_error(dev, da)) { + __free_page(page); + return -ENOMEM; + } + + rx_oob->buf_va = page_to_virt(page); + rx_oob->buf_dma_addr = da; + + rx_oob->num_sge = 1; + rx_oob->sgl[0].address = rx_oob->buf_dma_addr; + rx_oob->sgl[0].size = rxq->datasize; + rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey; + + rx_oob->wqe_req.sgl = rx_oob->sgl; + rx_oob->wqe_req.num_sge = rx_oob->num_sge; + rx_oob->wqe_req.inline_oob_size = 0; + rx_oob->wqe_req.inline_oob_data = NULL; + rx_oob->wqe_req.flags = 0; + rx_oob->wqe_req.client_data_unit = 0; + + *rxq_size += ALIGN(MANA_WQE_HEADER_SIZE + + MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32); + *cq_size += COMP_ENTRY_SIZE; + } + + return 0; +} + +static int mana_push_wqe(struct mana_rxq *rxq) +{ + struct mana_recv_buf_oob *rx_oob; + u32 buf_idx; + int err; + + for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { + rx_oob = &rxq->rx_oobs[buf_idx]; + + err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req, + &rx_oob->wqe_inf); + if (err) + return -ENOSPC; + } + + return 0; +} + +static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, + u32 rxq_idx, struct mana_eq *eq, + struct net_device *ndev) +{ + struct gdma_dev *gd = apc->ac->gdma_dev; + struct mana_obj_spec wq_spec; + struct mana_obj_spec cq_spec; + struct gdma_queue_spec spec; + struct mana_cq *cq = NULL; + struct gdma_context *gc; + u32 cq_size, rq_size; + struct mana_rxq *rxq; + int err; + + gc = gd->gdma_context; + + rxq = kzalloc(sizeof(*rxq) + + RX_BUFFERS_PER_QUEUE * sizeof(struct mana_recv_buf_oob), + GFP_KERNEL); + if (!rxq) + return NULL; + + rxq->ndev = ndev; + rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE; + rxq->rxq_idx = rxq_idx; + rxq->datasize = ALIGN(MAX_FRAME_SIZE, 64); + rxq->rxobj = INVALID_MANA_HANDLE; + + err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size); + if (err) + goto out; + + rq_size = PAGE_ALIGN(rq_size); + cq_size = PAGE_ALIGN(cq_size); + + /* Create RQ */ + memset(&spec, 0, sizeof(spec)); + spec.type = GDMA_RQ; + spec.monitor_avl_buf = true; + spec.queue_size = rq_size; + err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq); + if (err) + goto out; + + /* Create RQ's CQ */ + cq = &rxq->rx_cq; + cq->gdma_comp_buf = eq->cqe_poll; + cq->type = MANA_CQ_TYPE_RX; + cq->rxq = rxq; + + memset(&spec, 0, sizeof(spec)); + spec.type = GDMA_CQ; + spec.monitor_avl_buf = false; + spec.queue_size = cq_size; + spec.cq.callback = mana_cq_handler; + spec.cq.parent_eq = eq->eq; + spec.cq.context = cq; + err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); + if (err) + goto out; + + memset(&wq_spec, 0, sizeof(wq_spec)); + memset(&cq_spec, 0, sizeof(cq_spec)); + wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region; + wq_spec.queue_size = rxq->gdma_rq->queue_size; + + cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region; + cq_spec.queue_size = cq->gdma_cq->queue_size; + cq_spec.modr_ctx_id = 0; + cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; + + err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ, + &wq_spec, &cq_spec, &rxq->rxobj); + if (err) + goto out; + + rxq->gdma_rq->id = wq_spec.queue_index; + cq->gdma_cq->id = cq_spec.queue_index; + + rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; + cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; + + rxq->gdma_id = rxq->gdma_rq->id; + cq->gdma_id = cq->gdma_cq->id; + + err = mana_push_wqe(rxq); + if (err) + goto out; + + if (cq->gdma_id >= gc->max_num_cqs) + goto out; + + gc->cq_table[cq->gdma_id] = cq->gdma_cq; + + mana_gd_arm_cq(cq->gdma_cq); +out: + if (!err) + return rxq; + + netdev_err(ndev, "Failed to create RXQ: err = %d\n", err); + + mana_destroy_rxq(apc, rxq, false); + + if (cq) + mana_deinit_cq(apc, cq); + + return NULL; +} + +static int mana_add_rx_queues(struct mana_port_context *apc, + struct net_device *ndev) +{ + struct mana_rxq *rxq; + int err = 0; + int i; + + for (i = 0; i < apc->num_queues; i++) { + rxq = mana_create_rxq(apc, i, &apc->eqs[i], ndev); + if (!rxq) { + err = -ENOMEM; + goto out; + } + + u64_stats_init(&rxq->stats.syncp); + + apc->rxqs[i] = rxq; + } + + apc->default_rxobj = apc->rxqs[0]->rxobj; +out: + return err; +} + +static void mana_destroy_vport(struct mana_port_context *apc) +{ + struct mana_rxq *rxq; + u32 rxq_idx; + + for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { + rxq = apc->rxqs[rxq_idx]; + if (!rxq) + continue; + + mana_destroy_rxq(apc, rxq, true); + apc->rxqs[rxq_idx] = NULL; + } + + mana_destroy_txq(apc); +} + +static int mana_create_vport(struct mana_port_context *apc, + struct net_device *net) +{ + struct gdma_dev *gd = apc->ac->gdma_dev; + int err; + + apc->default_rxobj = INVALID_MANA_HANDLE; + + err = mana_cfg_vport(apc, gd->pdid, gd->doorbell); + if (err) + return err; + + return mana_create_txq(apc, net); +} + +static void mana_rss_table_init(struct mana_port_context *apc) +{ + int i; + + for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) + apc->indir_table[i] = + ethtool_rxfh_indir_default(i, apc->num_queues); +} + +int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx, + bool update_hash, bool update_tab) +{ + u32 queue_idx; + int i; + + if (update_tab) { + for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) { + queue_idx = apc->indir_table[i]; + apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj; + } + } + + return mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab); +} + +static int mana_init_port(struct net_device *ndev) +{ + struct mana_port_context *apc = netdev_priv(ndev); + u32 max_txq, max_rxq, max_queues; + int port_idx = apc->port_idx; + u32 num_indirect_entries; + int err; + + err = mana_init_port_context(apc); + if (err) + return err; + + err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq, + &num_indirect_entries); + if (err) { + netdev_err(ndev, "Failed to query info for vPort 0\n"); + goto reset_apc; + } + + max_queues = min_t(u32, max_txq, max_rxq); + if (apc->max_queues > max_queues) + apc->max_queues = max_queues; + + if (apc->num_queues > apc->max_queues) + apc->num_queues = apc->max_queues; + + ether_addr_copy(ndev->dev_addr, apc->mac_addr); + + return 0; + +reset_apc: + kfree(apc->rxqs); + apc->rxqs = NULL; + return err; +} + +int mana_alloc_queues(struct net_device *ndev) +{ + struct mana_port_context *apc = netdev_priv(ndev); + struct gdma_dev *gd = apc->ac->gdma_dev; + int err; + + err = mana_create_eq(apc); + if (err) + return err; + + err = mana_create_vport(apc, ndev); + if (err) + goto destroy_eq; + + err = netif_set_real_num_tx_queues(ndev, apc->num_queues); + if (err) + goto destroy_vport; + + err = mana_add_rx_queues(apc, ndev); + if (err) + goto destroy_vport; + + apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE; + + err = netif_set_real_num_rx_queues(ndev, apc->num_queues); + if (err) + goto destroy_vport; + + mana_rss_table_init(apc); + + err = mana_config_rss(apc, TRI_STATE_TRUE, true, true); + if (err) + goto destroy_vport; + + return 0; + +destroy_vport: + mana_destroy_vport(apc); +destroy_eq: + mana_destroy_eq(gd->gdma_context, apc); + return err; +} + +int mana_attach(struct net_device *ndev) +{ + struct mana_port_context *apc = netdev_priv(ndev); + int err; + + ASSERT_RTNL(); + + err = mana_init_port(ndev); + if (err) + return err; + + err = mana_alloc_queues(ndev); + if (err) { + kfree(apc->rxqs); + apc->rxqs = NULL; + return err; + } + + netif_device_attach(ndev); + + apc->port_is_up = apc->port_st_save; + + /* Ensure port state updated before txq state */ + smp_wmb(); + + if (apc->port_is_up) { + netif_carrier_on(ndev); + netif_tx_wake_all_queues(ndev); + } + + return 0; +} + +static int mana_dealloc_queues(struct net_device *ndev) +{ + struct mana_port_context *apc = netdev_priv(ndev); + struct mana_txq *txq; + int i, err; + + if (apc->port_is_up) + return -EINVAL; + + /* No packet can be transmitted now since apc->port_is_up is false. + * There is still a tiny chance that mana_poll_tx_cq() can re-enable + * a txq because it may not timely see apc->port_is_up being cleared + * to false, but it doesn't matter since mana_start_xmit() drops any + * new packets due to apc->port_is_up being false. + * + * Drain all the in-flight TX packets + */ + for (i = 0; i < apc->num_queues; i++) { + txq = &apc->tx_qp[i].txq; + + while (atomic_read(&txq->pending_sends) > 0) + usleep_range(1000, 2000); + } + + /* We're 100% sure the queues can no longer be woken up, because + * we're sure now mana_poll_tx_cq() can't be running. + */ + + apc->rss_state = TRI_STATE_FALSE; + err = mana_config_rss(apc, TRI_STATE_FALSE, false, false); + if (err) { + netdev_err(ndev, "Failed to disable vPort: %d\n", err); + return err; + } + + /* TODO: Implement RX fencing */ + ssleep(1); + + mana_destroy_vport(apc); + + mana_destroy_eq(apc->ac->gdma_dev->gdma_context, apc); + + return 0; +} + +int mana_detach(struct net_device *ndev, bool from_close) +{ + struct mana_port_context *apc = netdev_priv(ndev); + int err; + + ASSERT_RTNL(); + + apc->port_st_save = apc->port_is_up; + apc->port_is_up = false; + + /* Ensure port state updated before txq state */ + smp_wmb(); + + netif_tx_disable(ndev); + netif_carrier_off(ndev); + + if (apc->port_st_save) { + err = mana_dealloc_queues(ndev); + if (err) + return err; + } + + if (!from_close) { + netif_device_detach(ndev); + mana_cleanup_port_context(apc); + } + + return 0; +} + +static int mana_probe_port(struct mana_context *ac, int port_idx, + struct net_device **ndev_storage) +{ + struct gdma_context *gc = ac->gdma_dev->gdma_context; + struct mana_port_context *apc; + struct net_device *ndev; + int err; + + ndev = alloc_etherdev_mq(sizeof(struct mana_port_context), + gc->max_num_queues); + if (!ndev) + return -ENOMEM; + + *ndev_storage = ndev; + + apc = netdev_priv(ndev); + apc->ac = ac; + apc->ndev = ndev; + apc->max_queues = gc->max_num_queues; + apc->num_queues = min_t(uint, gc->max_num_queues, MANA_MAX_NUM_QUEUES); + apc->port_handle = INVALID_MANA_HANDLE; + apc->port_idx = port_idx; + + ndev->netdev_ops = &mana_devops; + ndev->ethtool_ops = &mana_ethtool_ops; + ndev->mtu = ETH_DATA_LEN; + ndev->max_mtu = ndev->mtu; + ndev->min_mtu = ndev->mtu; + ndev->needed_headroom = MANA_HEADROOM; + SET_NETDEV_DEV(ndev, gc->dev); + + netif_carrier_off(ndev); + + netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE); + + err = mana_init_port(ndev); + if (err) + goto free_net; + + netdev_lockdep_set_classes(ndev); + + ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + ndev->hw_features |= NETIF_F_RXCSUM; + ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; + ndev->hw_features |= NETIF_F_RXHASH; + ndev->features = ndev->hw_features; + ndev->vlan_features = 0; + + err = register_netdev(ndev); + if (err) { + netdev_err(ndev, "Unable to register netdev.\n"); + goto reset_apc; + } + + return 0; + +reset_apc: + kfree(apc->rxqs); + apc->rxqs = NULL; +free_net: + *ndev_storage = NULL; + netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err); + free_netdev(ndev); + return err; +} + +int mana_probe(struct gdma_dev *gd) +{ + struct gdma_context *gc = gd->gdma_context; + struct device *dev = gc->dev; + struct mana_context *ac; + int err; + int i; + + dev_info(dev, + "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n", + MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION); + + err = mana_gd_register_device(gd); + if (err) + return err; + + ac = kzalloc(sizeof(*ac), GFP_KERNEL); + if (!ac) + return -ENOMEM; + + ac->gdma_dev = gd; + ac->num_ports = 1; + gd->driver_data = ac; + + err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION, + MANA_MICRO_VERSION, &ac->num_ports); + if (err) + goto out; + + if (ac->num_ports > MAX_PORTS_IN_MANA_DEV) + ac->num_ports = MAX_PORTS_IN_MANA_DEV; + + for (i = 0; i < ac->num_ports; i++) { + err = mana_probe_port(ac, i, &ac->ports[i]); + if (err) + break; + } +out: + if (err) + mana_remove(gd); + + return err; +} + +void mana_remove(struct gdma_dev *gd) +{ + struct gdma_context *gc = gd->gdma_context; + struct mana_context *ac = gd->driver_data; + struct device *dev = gc->dev; + struct net_device *ndev; + int i; + + for (i = 0; i < ac->num_ports; i++) { + ndev = ac->ports[i]; + if (!ndev) { + if (i == 0) + dev_err(dev, "No net device to remove\n"); + goto out; + } + + /* All cleanup actions should stay after rtnl_lock(), otherwise + * other functions may access partially cleaned up data. + */ + rtnl_lock(); + + mana_detach(ndev, false); + + unregister_netdevice(ndev); + + rtnl_unlock(); + + free_netdev(ndev); + } +out: + mana_gd_deregister_device(gd); + gd->driver_data = NULL; + gd->gdma_context = NULL; + kfree(ac); +} diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c new file mode 100644 index 000000000000..7e74339f39ae --- /dev/null +++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright (c) 2021, Microsoft Corporation. */ + +#include <linux/inetdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> + +#include "mana.h" + +static const struct { + char name[ETH_GSTRING_LEN]; + u16 offset; +} mana_eth_stats[] = { + {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)}, + {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)}, +}; + +static int mana_get_sset_count(struct net_device *ndev, int stringset) +{ + struct mana_port_context *apc = netdev_priv(ndev); + unsigned int num_queues = apc->num_queues; + + if (stringset != ETH_SS_STATS) + return -EINVAL; + + return ARRAY_SIZE(mana_eth_stats) + num_queues * 4; +} + +static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + struct mana_port_context *apc = netdev_priv(ndev); + unsigned int num_queues = apc->num_queues; + u8 *p = data; + int i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++) { + memcpy(p, mana_eth_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < num_queues; i++) { + sprintf(p, "rx_%d_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_%d_bytes", i); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < num_queues; i++) { + sprintf(p, "tx_%d_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_%d_bytes", i); + p += ETH_GSTRING_LEN; + } +} + +static void mana_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *e_stats, u64 *data) +{ + struct mana_port_context *apc = netdev_priv(ndev); + unsigned int num_queues = apc->num_queues; + void *eth_stats = &apc->eth_stats; + struct mana_stats *stats; + unsigned int start; + u64 packets, bytes; + int q, i = 0; + + if (!apc->port_is_up) + return; + + for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++) + data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset); + + for (q = 0; q < num_queues; q++) { + stats = &apc->rxqs[q]->stats; + + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + packets = stats->packets; + bytes = stats->bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + data[i++] = packets; + data[i++] = bytes; + } + + for (q = 0; q < num_queues; q++) { + stats = &apc->tx_qp[q].txq.stats; + + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + packets = stats->packets; + bytes = stats->bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + data[i++] = packets; + data[i++] = bytes; + } +} + +static int mana_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *cmd, + u32 *rules) +{ + struct mana_port_context *apc = netdev_priv(ndev); + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = apc->num_queues; + return 0; + } + + return -EOPNOTSUPP; +} + +static u32 mana_get_rxfh_key_size(struct net_device *ndev) +{ + return MANA_HASH_KEY_SIZE; +} + +static u32 mana_rss_indir_size(struct net_device *ndev) +{ + return MANA_INDIRECT_TABLE_SIZE; +} + +static int mana_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct mana_port_context *apc = netdev_priv(ndev); + int i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ + + if (indir) { + for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) + indir[i] = apc->indir_table[i]; + } + + if (key) + memcpy(key, apc->hashkey, MANA_HASH_KEY_SIZE); + + return 0; +} + +static int mana_set_rxfh(struct net_device *ndev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct mana_port_context *apc = netdev_priv(ndev); + bool update_hash = false, update_table = false; + u32 save_table[MANA_INDIRECT_TABLE_SIZE]; + u8 save_key[MANA_HASH_KEY_SIZE]; + int i, err; + + if (!apc->port_is_up) + return -EOPNOTSUPP; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (indir) { + for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) + if (indir[i] >= apc->num_queues) + return -EINVAL; + + update_table = true; + for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) { + save_table[i] = apc->indir_table[i]; + apc->indir_table[i] = indir[i]; + } + } + + if (key) { + update_hash = true; + memcpy(save_key, apc->hashkey, MANA_HASH_KEY_SIZE); + memcpy(apc->hashkey, key, MANA_HASH_KEY_SIZE); + } + + err = mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table); + + if (err) { /* recover to original values */ + if (update_table) { + for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) + apc->indir_table[i] = save_table[i]; + } + + if (update_hash) + memcpy(apc->hashkey, save_key, MANA_HASH_KEY_SIZE); + + mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table); + } + + return err; +} + +static void mana_get_channels(struct net_device *ndev, + struct ethtool_channels *channel) +{ + struct mana_port_context *apc = netdev_priv(ndev); + + channel->max_combined = apc->max_queues; + channel->combined_count = apc->num_queues; +} + +static int mana_set_channels(struct net_device *ndev, + struct ethtool_channels *channels) +{ + struct mana_port_context *apc = netdev_priv(ndev); + unsigned int new_count = channels->combined_count; + unsigned int old_count = apc->num_queues; + int err, err2; + + if (!apc->port_is_up) + return -EOPNOTSUPP; + + err = mana_detach(ndev, false); + if (err) { + netdev_err(ndev, "mana_detach failed: %d\n", err); + return err; + } + + apc->num_queues = new_count; + err = mana_attach(ndev); + if (!err) + return 0; + + netdev_err(ndev, "mana_attach failed: %d\n", err); + + /* Try to roll it back to the old configuration. */ + apc->num_queues = old_count; + err2 = mana_attach(ndev); + if (err2) + netdev_err(ndev, "mana re-attach failed: %d\n", err2); + + return err; +} + +const struct ethtool_ops mana_ethtool_ops = { + .get_ethtool_stats = mana_get_ethtool_stats, + .get_sset_count = mana_get_sset_count, + .get_strings = mana_get_strings, + .get_rxnfc = mana_get_rxnfc, + .get_rxfh_key_size = mana_get_rxfh_key_size, + .get_rxfh_indir_size = mana_rss_indir_size, + .get_rxfh = mana_get_rxfh, + .set_rxfh = mana_set_rxfh, + .get_channels = mana_get_channels, + .set_channels = mana_set_channels, +}; diff --git a/drivers/net/ethernet/microsoft/mana/shm_channel.c b/drivers/net/ethernet/microsoft/mana/shm_channel.c new file mode 100644 index 000000000000..da255da62176 --- /dev/null +++ b/drivers/net/ethernet/microsoft/mana/shm_channel.c @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright (c) 2021, Microsoft Corporation. */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/mm.h> + +#include "shm_channel.h" + +#define PAGE_FRAME_L48_WIDTH_BYTES 6 +#define PAGE_FRAME_L48_WIDTH_BITS (PAGE_FRAME_L48_WIDTH_BYTES * 8) +#define PAGE_FRAME_L48_MASK 0x0000FFFFFFFFFFFF +#define PAGE_FRAME_H4_WIDTH_BITS 4 +#define VECTOR_MASK 0xFFFF +#define SHMEM_VF_RESET_STATE ((u32)-1) + +#define SMC_MSG_TYPE_ESTABLISH_HWC 1 +#define SMC_MSG_TYPE_ESTABLISH_HWC_VERSION 0 + +#define SMC_MSG_TYPE_DESTROY_HWC 2 +#define SMC_MSG_TYPE_DESTROY_HWC_VERSION 0 + +#define SMC_MSG_DIRECTION_REQUEST 0 +#define SMC_MSG_DIRECTION_RESPONSE 1 + +/* Structures labeled with "HW DATA" are exchanged with the hardware. All of + * them are naturally aligned and hence don't need __packed. + */ + +/* Shared memory channel protocol header + * + * msg_type: set on request and response; response matches request. + * msg_version: newer PF writes back older response (matching request) + * older PF acts on latest version known and sets that version in result + * (less than request). + * direction: 0 for request, VF->PF; 1 for response, PF->VF. + * status: 0 on request, + * operation result on response (success = 0, failure = 1 or greater). + * reset_vf: If set on either establish or destroy request, indicates perform + * FLR before/after the operation. + * owner_is_pf: 1 indicates PF owned, 0 indicates VF owned. + */ +union smc_proto_hdr { + u32 as_uint32; + + struct { + u8 msg_type : 3; + u8 msg_version : 3; + u8 reserved_1 : 1; + u8 direction : 1; + + u8 status; + + u8 reserved_2; + + u8 reset_vf : 1; + u8 reserved_3 : 6; + u8 owner_is_pf : 1; + }; +}; /* HW DATA */ + +#define SMC_APERTURE_BITS 256 +#define SMC_BASIC_UNIT (sizeof(u32)) +#define SMC_APERTURE_DWORDS (SMC_APERTURE_BITS / (SMC_BASIC_UNIT * 8)) +#define SMC_LAST_DWORD (SMC_APERTURE_DWORDS - 1) + +static int mana_smc_poll_register(void __iomem *base, bool reset) +{ + void __iomem *ptr = base + SMC_LAST_DWORD * SMC_BASIC_UNIT; + u32 last_dword; + int i; + + /* Poll the hardware for the ownership bit. This should be pretty fast, + * but let's do it in a loop just in case the hardware or the PF + * driver are temporarily busy. + */ + for (i = 0; i < 20 * 1000; i++) { + last_dword = readl(ptr); + + /* shmem reads as 0xFFFFFFFF in the reset case */ + if (reset && last_dword == SHMEM_VF_RESET_STATE) + return 0; + + /* If bit_31 is set, the PF currently owns the SMC. */ + if (!(last_dword & BIT(31))) + return 0; + + usleep_range(1000, 2000); + } + + return -ETIMEDOUT; +} + +static int mana_smc_read_response(struct shm_channel *sc, u32 msg_type, + u32 msg_version, bool reset_vf) +{ + void __iomem *base = sc->base; + union smc_proto_hdr hdr; + int err; + + /* Wait for PF to respond. */ + err = mana_smc_poll_register(base, reset_vf); + if (err) + return err; + + hdr.as_uint32 = readl(base + SMC_LAST_DWORD * SMC_BASIC_UNIT); + + if (reset_vf && hdr.as_uint32 == SHMEM_VF_RESET_STATE) + return 0; + + /* Validate protocol fields from the PF driver */ + if (hdr.msg_type != msg_type || hdr.msg_version > msg_version || + hdr.direction != SMC_MSG_DIRECTION_RESPONSE) { + dev_err(sc->dev, "Wrong SMC response 0x%x, type=%d, ver=%d\n", + hdr.as_uint32, msg_type, msg_version); + return -EPROTO; + } + + /* Validate the operation result */ + if (hdr.status != 0) { + dev_err(sc->dev, "SMC operation failed: 0x%x\n", hdr.status); + return -EPROTO; + } + + return 0; +} + +void mana_smc_init(struct shm_channel *sc, struct device *dev, + void __iomem *base) +{ + sc->dev = dev; + sc->base = base; +} + +int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr, + u64 cq_addr, u64 rq_addr, u64 sq_addr, + u32 eq_msix_index) +{ + union smc_proto_hdr *hdr; + u16 all_addr_h4bits = 0; + u16 frame_addr_seq = 0; + u64 frame_addr = 0; + u8 shm_buf[32]; + u64 *shmem; + u32 *dword; + u8 *ptr; + int err; + int i; + + /* Ensure VF already has possession of shared memory */ + err = mana_smc_poll_register(sc->base, false); + if (err) { + dev_err(sc->dev, "Timeout when setting up HWC: %d\n", err); + return err; + } + + if (!PAGE_ALIGNED(eq_addr) || !PAGE_ALIGNED(cq_addr) || + !PAGE_ALIGNED(rq_addr) || !PAGE_ALIGNED(sq_addr)) + return -EINVAL; + + if ((eq_msix_index & VECTOR_MASK) != eq_msix_index) + return -EINVAL; + + /* Scheme for packing four addresses and extra info into 256 bits. + * + * Addresses must be page frame aligned, so only frame address bits + * are transferred. + * + * 52-bit frame addresses are split into the lower 48 bits and upper + * 4 bits. Lower 48 bits of 4 address are written sequentially from + * the start of the 256-bit shared memory region followed by 16 bits + * containing the upper 4 bits of the 4 addresses in sequence. + * + * A 16 bit EQ vector number fills out the next-to-last 32-bit dword. + * + * The final 32-bit dword is used for protocol control information as + * defined in smc_proto_hdr. + */ + + memset(shm_buf, 0, sizeof(shm_buf)); + ptr = shm_buf; + + /* EQ addr: low 48 bits of frame address */ + shmem = (u64 *)ptr; + frame_addr = PHYS_PFN(eq_addr); + *shmem = frame_addr & PAGE_FRAME_L48_MASK; + all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << + (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS); + ptr += PAGE_FRAME_L48_WIDTH_BYTES; + + /* CQ addr: low 48 bits of frame address */ + shmem = (u64 *)ptr; + frame_addr = PHYS_PFN(cq_addr); + *shmem = frame_addr & PAGE_FRAME_L48_MASK; + all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << + (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS); + ptr += PAGE_FRAME_L48_WIDTH_BYTES; + + /* RQ addr: low 48 bits of frame address */ + shmem = (u64 *)ptr; + frame_addr = PHYS_PFN(rq_addr); + *shmem = frame_addr & PAGE_FRAME_L48_MASK; + all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << + (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS); + ptr += PAGE_FRAME_L48_WIDTH_BYTES; + + /* SQ addr: low 48 bits of frame address */ + shmem = (u64 *)ptr; + frame_addr = PHYS_PFN(sq_addr); + *shmem = frame_addr & PAGE_FRAME_L48_MASK; + all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << + (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS); + ptr += PAGE_FRAME_L48_WIDTH_BYTES; + + /* High 4 bits of the four frame addresses */ + *((u16 *)ptr) = all_addr_h4bits; + ptr += sizeof(u16); + + /* EQ MSIX vector number */ + *((u16 *)ptr) = (u16)eq_msix_index; + ptr += sizeof(u16); + + /* 32-bit protocol header in final dword */ + *((u32 *)ptr) = 0; + + hdr = (union smc_proto_hdr *)ptr; + hdr->msg_type = SMC_MSG_TYPE_ESTABLISH_HWC; + hdr->msg_version = SMC_MSG_TYPE_ESTABLISH_HWC_VERSION; + hdr->direction = SMC_MSG_DIRECTION_REQUEST; + hdr->reset_vf = reset_vf; + + /* Write 256-message buffer to shared memory (final 32-bit write + * triggers HW to set possession bit to PF). + */ + dword = (u32 *)shm_buf; + for (i = 0; i < SMC_APERTURE_DWORDS; i++) + writel(*dword++, sc->base + i * SMC_BASIC_UNIT); + + /* Read shmem response (polling for VF possession) and validate. + * For setup, waiting for response on shared memory is not strictly + * necessary, since wait occurs later for results to appear in EQE's. + */ + err = mana_smc_read_response(sc, SMC_MSG_TYPE_ESTABLISH_HWC, + SMC_MSG_TYPE_ESTABLISH_HWC_VERSION, + reset_vf); + if (err) { + dev_err(sc->dev, "Error when setting up HWC: %d\n", err); + return err; + } + + return 0; +} + +int mana_smc_teardown_hwc(struct shm_channel *sc, bool reset_vf) +{ + union smc_proto_hdr hdr = {}; + int err; + + /* Ensure already has possession of shared memory */ + err = mana_smc_poll_register(sc->base, false); + if (err) { + dev_err(sc->dev, "Timeout when tearing down HWC\n"); + return err; + } + + /* Set up protocol header for HWC destroy message */ + hdr.msg_type = SMC_MSG_TYPE_DESTROY_HWC; + hdr.msg_version = SMC_MSG_TYPE_DESTROY_HWC_VERSION; + hdr.direction = SMC_MSG_DIRECTION_REQUEST; + hdr.reset_vf = reset_vf; + + /* Write message in high 32 bits of 256-bit shared memory, causing HW + * to set possession bit to PF. + */ + writel(hdr.as_uint32, sc->base + SMC_LAST_DWORD * SMC_BASIC_UNIT); + + /* Read shmem response (polling for VF possession) and validate. + * For teardown, waiting for response is required to ensure hardware + * invalidates MST entries before software frees memory. + */ + err = mana_smc_read_response(sc, SMC_MSG_TYPE_DESTROY_HWC, + SMC_MSG_TYPE_DESTROY_HWC_VERSION, + reset_vf); + if (err) { + dev_err(sc->dev, "Error when tearing down HWC: %d\n", err); + return err; + } + + return 0; +} diff --git a/drivers/net/ethernet/microsoft/mana/shm_channel.h b/drivers/net/ethernet/microsoft/mana/shm_channel.h new file mode 100644 index 000000000000..5199b41497ff --- /dev/null +++ b/drivers/net/ethernet/microsoft/mana/shm_channel.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright (c) 2021, Microsoft Corporation. */ + +#ifndef _SHM_CHANNEL_H +#define _SHM_CHANNEL_H + +struct shm_channel { + struct device *dev; + void __iomem *base; +}; + +void mana_smc_init(struct shm_channel *sc, struct device *dev, + void __iomem *base); + +int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr, + u64 cq_addr, u64 rq_addr, u64 sq_addr, + u32 eq_msix_index); + +int mana_smc_teardown_hwc(struct shm_channel *sc, bool reset_vf); + +#endif /* _SHM_CHANNEL_H */ diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 49fd843c4c8a..b85733942053 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -485,7 +485,6 @@ static int moxart_mac_probe(struct platform_device *pdev) ndev->base_addr = res->start; priv->base = devm_ioremap_resource(p_dev, res); if (IS_ERR(priv->base)) { - dev_err(p_dev, "devm_ioremap_resource failed\n"); ret = PTR_ERR(priv->base); goto init_fail; } diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig index 05cb040c2677..2d3157e4d081 100644 --- a/drivers/net/ethernet/mscc/Kconfig +++ b/drivers/net/ethernet/mscc/Kconfig @@ -11,7 +11,7 @@ config NET_VENDOR_MICROSEMI if NET_VENDOR_MICROSEMI -# Users should depend on NET_SWITCHDEV, HAS_IOMEM +# Users should depend on NET_SWITCHDEV, HAS_IOMEM, BRIDGE config MSCC_OCELOT_SWITCH_LIB select NET_DEVLINK select REGMAP_MMIO @@ -24,6 +24,7 @@ config MSCC_OCELOT_SWITCH_LIB config MSCC_OCELOT_SWITCH tristate "Ocelot switch driver" + depends on BRIDGE || BRIDGE=n depends on NET_SWITCHDEV depends on HAS_IOMEM depends on OF_NET diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 46e5c9136bac..0c4283319d7f 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -6,6 +6,7 @@ */ #include <linux/dsa/ocelot.h> #include <linux/if_bridge.h> +#include <linux/ptp_classify.h> #include <soc/mscc/ocelot_vcap.h> #include "ocelot.h" #include "ocelot_vcap.h" @@ -484,7 +485,8 @@ void ocelot_adjust_link(struct ocelot *ocelot, int port, DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG); /* Take MAC, Port, Phy (intern) and PCS (SGMII/Serdes) clock out of - * reset */ + * reset + */ ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(speed), DEV_CLOCK_CFG); @@ -529,22 +531,92 @@ void ocelot_port_disable(struct ocelot *ocelot, int port) } EXPORT_SYMBOL(ocelot_port_disable); -void ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port, - struct sk_buff *clone) +static void ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port, + struct sk_buff *clone) { struct ocelot_port *ocelot_port = ocelot->ports[port]; spin_lock(&ocelot_port->ts_id_lock); skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS; - /* Store timestamp ID in cb[0] of sk_buff */ - clone->cb[0] = ocelot_port->ts_id; + /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */ + OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id; ocelot_port->ts_id = (ocelot_port->ts_id + 1) % 4; skb_queue_tail(&ocelot_port->tx_skbs, clone); spin_unlock(&ocelot_port->ts_id_lock); } -EXPORT_SYMBOL(ocelot_port_add_txtstamp_skb); + +u32 ocelot_ptp_rew_op(struct sk_buff *skb) +{ + struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone; + u8 ptp_cmd = OCELOT_SKB_CB(skb)->ptp_cmd; + u32 rew_op = 0; + + if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP && clone) { + rew_op = ptp_cmd; + rew_op |= OCELOT_SKB_CB(clone)->ts_id << 3; + } else if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) { + rew_op = ptp_cmd; + } + + return rew_op; +} +EXPORT_SYMBOL(ocelot_ptp_rew_op); + +static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb) +{ + struct ptp_header *hdr; + unsigned int ptp_class; + u8 msgtype, twostep; + + ptp_class = ptp_classify_raw(skb); + if (ptp_class == PTP_CLASS_NONE) + return false; + + hdr = ptp_parse_header(skb, ptp_class); + if (!hdr) + return false; + + msgtype = ptp_get_msgtype(hdr, ptp_class); + twostep = hdr->flag_field[0] & 0x2; + + if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0) + return true; + + return false; +} + +int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port, + struct sk_buff *skb, + struct sk_buff **clone) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + u8 ptp_cmd = ocelot_port->ptp_cmd; + + /* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */ + if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) { + if (ocelot_ptp_is_onestep_sync(skb)) { + OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd; + return 0; + } + + /* Fall back to two-step timestamping */ + ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; + } + + if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) { + *clone = skb_clone_sk(skb); + if (!(*clone)) + return -ENOMEM; + + ocelot_port_add_txtstamp_skb(ocelot, port, *clone); + OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd; + } + + return 0; +} +EXPORT_SYMBOL(ocelot_port_txtstamp_request); static void ocelot_get_hwtimestamp(struct ocelot *ocelot, struct timespec64 *ts) @@ -603,7 +675,7 @@ void ocelot_get_txtstamp(struct ocelot *ocelot) spin_lock_irqsave(&port->tx_skbs.lock, flags); skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) { - if (skb->cb[0] != id) + if (OCELOT_SKB_CB(skb)->ts_id != id) continue; __skb_unlink(skb, &port->tx_skbs); skb_match = skb; @@ -687,7 +759,7 @@ static int ocelot_xtr_poll_xfh(struct ocelot *ocelot, int grp, u32 *xfh) int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb) { struct skb_shared_hwtstamps *shhwtstamps; - u64 tod_in_ns, full_ts_in_ns, cpuq; + u64 tod_in_ns, full_ts_in_ns; u64 timestamp, src_port, len; u32 xfh[OCELOT_TAG_LEN / 4]; struct net_device *dev; @@ -704,7 +776,6 @@ int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb) ocelot_xfh_get_src_port(xfh, &src_port); ocelot_xfh_get_len(xfh, &len); ocelot_xfh_get_rew_val(xfh, ×tamp); - ocelot_xfh_get_cpuq(xfh, &cpuq); if (WARN_ON(src_port >= ocelot->num_phys_ports)) return -EINVAL; @@ -767,17 +838,11 @@ int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb) /* Everything we see on an interface that is in the HW bridge * has already been forwarded. */ - if (ocelot->bridge_mask & BIT(src_port)) + if (ocelot->ports[src_port]->bridge) skb->offload_fwd_mark = 1; skb->protocol = eth_type_trans(skb, dev); -#if IS_ENABLED(CONFIG_BRIDGE_MRP) - if (skb->protocol == cpu_to_be16(ETH_P_MRP) && - cpuq & BIT(OCELOT_MRP_CPUQ)) - skb->offload_fwd_mark = 0; -#endif - *nskb = skb; return 0; @@ -1190,6 +1255,26 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond, return mask; } +static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, + struct net_device *bridge) +{ + u32 mask = 0; + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (!ocelot_port) + continue; + + if (ocelot_port->stp_state == BR_STATE_FORWARDING && + ocelot_port->bridge == bridge) + mask |= BIT(port); + } + + return mask; +} + static u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot) { u32 mask = 0; @@ -1239,10 +1324,12 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot) */ mask = GENMASK(ocelot->num_phys_ports - 1, 0); mask &= ~cpu_fwd_mask; - } else if (ocelot->bridge_fwd_mask & BIT(port)) { + } else if (ocelot_port->bridge) { + struct net_device *bridge = ocelot_port->bridge; struct net_device *bond = ocelot_port->bond; - mask = ocelot->bridge_fwd_mask & ~BIT(port); + mask = ocelot_get_bridge_fwd_mask(ocelot, bridge); + mask &= ~BIT(port); if (bond) { mask &= ~ocelot_get_bond_mask(ocelot, bond, false); @@ -1263,29 +1350,16 @@ EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask); void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - u32 port_cfg; + u32 learn_ena = 0; - if (!(BIT(port) & ocelot->bridge_mask)) - return; + ocelot_port->stp_state = state; - port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, port); + if ((state == BR_STATE_LEARNING || state == BR_STATE_FORWARDING) && + ocelot_port->learn_ena) + learn_ena = ANA_PORT_PORT_CFG_LEARN_ENA; - switch (state) { - case BR_STATE_FORWARDING: - ocelot->bridge_fwd_mask |= BIT(port); - fallthrough; - case BR_STATE_LEARNING: - if (ocelot_port->learn_ena) - port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA; - break; - - default: - port_cfg &= ~ANA_PORT_PORT_CFG_LEARN_ENA; - ocelot->bridge_fwd_mask &= ~BIT(port); - break; - } - - ocelot_write_gix(ocelot, port_cfg, ANA_PORT_PORT_CFG, port); + ocelot_rmw_gix(ocelot, learn_ena, ANA_PORT_PORT_CFG_LEARN_ENA, + ANA_PORT_PORT_CFG, port); ocelot_apply_bridge_fwd_mask(ocelot); } @@ -1512,43 +1586,28 @@ int ocelot_port_mdb_del(struct ocelot *ocelot, int port, } EXPORT_SYMBOL(ocelot_port_mdb_del); -int ocelot_port_bridge_join(struct ocelot *ocelot, int port, - struct net_device *bridge) +void ocelot_port_bridge_join(struct ocelot *ocelot, int port, + struct net_device *bridge) { - if (!ocelot->bridge_mask) { - ocelot->hw_bridge_dev = bridge; - } else { - if (ocelot->hw_bridge_dev != bridge) - /* This is adding the port to a second bridge, this is - * unsupported */ - return -ENODEV; - } + struct ocelot_port *ocelot_port = ocelot->ports[port]; - ocelot->bridge_mask |= BIT(port); + ocelot_port->bridge = bridge; - return 0; + ocelot_apply_bridge_fwd_mask(ocelot); } EXPORT_SYMBOL(ocelot_port_bridge_join); -int ocelot_port_bridge_leave(struct ocelot *ocelot, int port, - struct net_device *bridge) +void ocelot_port_bridge_leave(struct ocelot *ocelot, int port, + struct net_device *bridge) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; struct ocelot_vlan pvid = {0}, native_vlan = {0}; - int ret; - ocelot->bridge_mask &= ~BIT(port); - - if (!ocelot->bridge_mask) - ocelot->hw_bridge_dev = NULL; - - ret = ocelot_port_vlan_filtering(ocelot, port, false); - if (ret) - return ret; + ocelot_port->bridge = NULL; ocelot_port_set_pvid(ocelot, port, pvid); ocelot_port_set_native_vlan(ocelot, port, native_vlan); - - return 0; + ocelot_apply_bridge_fwd_mask(ocelot); } EXPORT_SYMBOL(ocelot_port_bridge_leave); @@ -2051,6 +2110,9 @@ int ocelot_init(struct ocelot *ocelot) ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i); } + + ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_BLACKHOLE); + /* Allow broadcast and unknown L2 multicast to the CPU. */ ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index a41b458b1b3e..8b843d3c9189 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -220,6 +220,11 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port, "Last action must be GOTO"); return -EOPNOTSUPP; } + if (a->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, + "QoS offload not support packets per second"); + return -EOPNOTSUPP; + } filter->action.police_ena = true; rate = a->police.rate_bytes_ps; filter->action.pol.rate = div_u64(rate, 1000) * 8; diff --git a/drivers/net/ethernet/mscc/ocelot_mrp.c b/drivers/net/ethernet/mscc/ocelot_mrp.c index 683da320bfd8..08b481a93460 100644 --- a/drivers/net/ethernet/mscc/ocelot_mrp.c +++ b/drivers/net/ethernet/mscc/ocelot_mrp.c @@ -1,9 +1,6 @@ // SPDX-License-Identifier: (GPL-2.0 OR MIT) /* Microsemi Ocelot Switch driver * - * This contains glue logic between the switchdev driver operations and the - * mscc_ocelot_switch_lib. - * * Copyright (c) 2017, 2019 Microsemi Corporation * Copyright 2020-2021 NXP Semiconductors */ @@ -15,13 +12,34 @@ #include "ocelot.h" #include "ocelot_vcap.h" -static int ocelot_mrp_del_vcap(struct ocelot *ocelot, int port) +static const u8 mrp_test_dmac[] = { 0x01, 0x15, 0x4e, 0x00, 0x00, 0x01 }; +static const u8 mrp_control_dmac[] = { 0x01, 0x15, 0x4e, 0x00, 0x00, 0x02 }; + +static int ocelot_mrp_find_partner_port(struct ocelot *ocelot, + struct ocelot_port *p) +{ + int i; + + for (i = 0; i < ocelot->num_phys_ports; ++i) { + struct ocelot_port *ocelot_port = ocelot->ports[i]; + + if (!ocelot_port || p == ocelot_port) + continue; + + if (ocelot_port->mrp_ring_id == p->mrp_ring_id) + return i; + } + + return -1; +} + +static int ocelot_mrp_del_vcap(struct ocelot *ocelot, int id) { struct ocelot_vcap_block *block_vcap_is2; struct ocelot_vcap_filter *filter; block_vcap_is2 = &ocelot->block[VCAP_IS2]; - filter = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, port, + filter = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, id, false); if (!filter) return 0; @@ -29,6 +47,87 @@ static int ocelot_mrp_del_vcap(struct ocelot *ocelot, int port) return ocelot_vcap_filter_del(ocelot, filter); } +static int ocelot_mrp_redirect_add_vcap(struct ocelot *ocelot, int src_port, + int dst_port) +{ + const u8 mrp_test_mask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + struct ocelot_vcap_filter *filter; + int err; + + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return -ENOMEM; + + filter->key_type = OCELOT_VCAP_KEY_ETYPE; + filter->prio = 1; + filter->id.cookie = src_port; + filter->id.tc_offload = false; + filter->block_id = VCAP_IS2; + filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + filter->ingress_port_mask = BIT(src_port); + ether_addr_copy(filter->key.etype.dmac.value, mrp_test_dmac); + ether_addr_copy(filter->key.etype.dmac.mask, mrp_test_mask); + filter->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; + filter->action.port_mask = BIT(dst_port); + + err = ocelot_vcap_filter_add(ocelot, filter, NULL); + if (err) + kfree(filter); + + return err; +} + +static int ocelot_mrp_copy_add_vcap(struct ocelot *ocelot, int port, + int prio, unsigned long cookie) +{ + const u8 mrp_mask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; + struct ocelot_vcap_filter *filter; + int err; + + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return -ENOMEM; + + filter->key_type = OCELOT_VCAP_KEY_ETYPE; + filter->prio = prio; + filter->id.cookie = cookie; + filter->id.tc_offload = false; + filter->block_id = VCAP_IS2; + filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + filter->ingress_port_mask = BIT(port); + /* Here is possible to use control or test dmac because the mask + * doesn't cover the LSB + */ + ether_addr_copy(filter->key.etype.dmac.value, mrp_test_dmac); + ether_addr_copy(filter->key.etype.dmac.mask, mrp_mask); + filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; + filter->action.port_mask = 0x0; + filter->action.cpu_copy_ena = true; + filter->action.cpu_qu_num = OCELOT_MRP_CPUQ; + + err = ocelot_vcap_filter_add(ocelot, filter, NULL); + if (err) + kfree(filter); + + return err; +} + +static void ocelot_mrp_save_mac(struct ocelot *ocelot, + struct ocelot_port *port) +{ + ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_test_dmac, + port->pvid_vlan.vid, ENTRYTYPE_LOCKED); + ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_control_dmac, + port->pvid_vlan.vid, ENTRYTYPE_LOCKED); +} + +static void ocelot_mrp_del_mac(struct ocelot *ocelot, + struct ocelot_port *port) +{ + ocelot_mact_forget(ocelot, mrp_test_dmac, port->pvid_vlan.vid); + ocelot_mact_forget(ocelot, mrp_control_dmac, port->pvid_vlan.vid); +} + int ocelot_mrp_add(struct ocelot *ocelot, int port, const struct switchdev_obj_mrp *mrp) { @@ -45,18 +144,7 @@ int ocelot_mrp_add(struct ocelot *ocelot, int port, if (mrp->p_port != dev && mrp->s_port != dev) return 0; - if (ocelot->mrp_ring_id != 0 && - ocelot->mrp_s_port && - ocelot->mrp_p_port) - return -EINVAL; - - if (mrp->p_port == dev) - ocelot->mrp_p_port = dev; - - if (mrp->s_port == dev) - ocelot->mrp_s_port = dev; - - ocelot->mrp_ring_id = mrp->ring_id; + ocelot_port->mrp_ring_id = mrp->ring_id; return 0; } @@ -66,33 +154,14 @@ int ocelot_mrp_del(struct ocelot *ocelot, int port, const struct switchdev_obj_mrp *mrp) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - struct ocelot_port_private *priv; - struct net_device *dev; if (!ocelot_port) return -EOPNOTSUPP; - priv = container_of(ocelot_port, struct ocelot_port_private, port); - dev = priv->dev; - - if (ocelot->mrp_p_port != dev && ocelot->mrp_s_port != dev) + if (ocelot_port->mrp_ring_id != mrp->ring_id) return 0; - if (ocelot->mrp_ring_id == 0 && - !ocelot->mrp_s_port && - !ocelot->mrp_p_port) - return -EINVAL; - - if (ocelot_mrp_del_vcap(ocelot, priv->chip_port)) - return -EINVAL; - - if (ocelot->mrp_p_port == dev) - ocelot->mrp_p_port = NULL; - - if (ocelot->mrp_s_port == dev) - ocelot->mrp_s_port = NULL; - - ocelot->mrp_ring_id = 0; + ocelot_port->mrp_ring_id = 0; return 0; } @@ -102,49 +171,39 @@ int ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port, const struct switchdev_obj_ring_role_mrp *mrp) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - struct ocelot_vcap_filter *filter; - struct ocelot_port_private *priv; - struct net_device *dev; + int dst_port; int err; if (!ocelot_port) return -EOPNOTSUPP; - priv = container_of(ocelot_port, struct ocelot_port_private, port); - dev = priv->dev; - - if (ocelot->mrp_ring_id != mrp->ring_id) - return -EINVAL; - - if (!mrp->sw_backup) + if (mrp->ring_role != BR_MRP_RING_ROLE_MRC && !mrp->sw_backup) return -EOPNOTSUPP; - if (ocelot->mrp_p_port != dev && ocelot->mrp_s_port != dev) + if (ocelot_port->mrp_ring_id != mrp->ring_id) return 0; - filter = kzalloc(sizeof(*filter), GFP_ATOMIC); - if (!filter) - return -ENOMEM; + ocelot_mrp_save_mac(ocelot, ocelot_port); - filter->key_type = OCELOT_VCAP_KEY_ETYPE; - filter->prio = 1; - filter->id.cookie = priv->chip_port; - filter->id.tc_offload = false; - filter->block_id = VCAP_IS2; - filter->type = OCELOT_VCAP_FILTER_OFFLOAD; - filter->ingress_port_mask = BIT(priv->chip_port); - *(__be16 *)filter->key.etype.etype.value = htons(ETH_P_MRP); - *(__be16 *)filter->key.etype.etype.mask = htons(0xffff); - filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; - filter->action.port_mask = 0x0; - filter->action.cpu_copy_ena = true; - filter->action.cpu_qu_num = OCELOT_MRP_CPUQ; + if (mrp->ring_role != BR_MRP_RING_ROLE_MRC) + return ocelot_mrp_copy_add_vcap(ocelot, port, 1, port); - err = ocelot_vcap_filter_add(ocelot, filter, NULL); + dst_port = ocelot_mrp_find_partner_port(ocelot, ocelot_port); + if (dst_port == -1) + return -EINVAL; + + err = ocelot_mrp_redirect_add_vcap(ocelot, port, dst_port); if (err) - kfree(filter); + return err; - return err; + err = ocelot_mrp_copy_add_vcap(ocelot, port, 2, + port + ocelot->num_phys_ports); + if (err) { + ocelot_mrp_del_vcap(ocelot, port); + return err; + } + + return 0; } EXPORT_SYMBOL(ocelot_mrp_add_ring_role); @@ -152,24 +211,32 @@ int ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port, const struct switchdev_obj_ring_role_mrp *mrp) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - struct ocelot_port_private *priv; - struct net_device *dev; + int i; if (!ocelot_port) return -EOPNOTSUPP; - priv = container_of(ocelot_port, struct ocelot_port_private, port); - dev = priv->dev; - - if (ocelot->mrp_ring_id != mrp->ring_id) - return -EINVAL; - - if (!mrp->sw_backup) + if (mrp->ring_role != BR_MRP_RING_ROLE_MRC && !mrp->sw_backup) return -EOPNOTSUPP; - if (ocelot->mrp_p_port != dev && ocelot->mrp_s_port != dev) + if (ocelot_port->mrp_ring_id != mrp->ring_id) return 0; - return ocelot_mrp_del_vcap(ocelot, priv->chip_port); + ocelot_mrp_del_vcap(ocelot, port); + ocelot_mrp_del_vcap(ocelot, port + ocelot->num_phys_ports); + + for (i = 0; i < ocelot->num_phys_ports; ++i) { + ocelot_port = ocelot->ports[i]; + + if (!ocelot_port) + continue; + + if (ocelot_port->mrp_ring_id != 0) + goto out; + } + + ocelot_mrp_del_mac(ocelot, ocelot->ports[port]); +out: + return 0; } EXPORT_SYMBOL(ocelot_mrp_del_ring_role); diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 12cb6867a2d0..aad33d22c33f 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -251,6 +251,12 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv, return -EEXIST; } + if (action->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, + "QoS offload not support packets per second"); + return -EOPNOTSUPP; + } + pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8; pol.burst = action->police.burst; @@ -501,21 +507,17 @@ static netdev_tx_t ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) /* Check if timestamping is needed */ if (ocelot->ptp && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { - rew_op = ocelot_port->ptp_cmd; + struct sk_buff *clone = NULL; - if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) { - struct sk_buff *clone; - - clone = skb_clone_sk(skb); - if (!clone) { - kfree_skb(skb); - return NETDEV_TX_OK; - } + if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) { + kfree_skb(skb); + return NETDEV_TX_OK; + } - ocelot_port_add_txtstamp_skb(ocelot, port, clone); + if (clone) + OCELOT_SKB_CB(skb)->clone = clone; - rew_op |= clone->cb[0] << 3; - } + rew_op = ocelot_ptp_rew_op(skb); } ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb); @@ -1111,77 +1113,213 @@ static int ocelot_port_obj_del(struct net_device *dev, return ret; } -static int ocelot_netdevice_bridge_join(struct ocelot *ocelot, int port, - struct net_device *bridge) +static void ocelot_inherit_brport_flags(struct ocelot *ocelot, int port, + struct net_device *brport_dev) +{ + struct switchdev_brport_flags flags = {0}; + int flag; + + flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; + + for_each_set_bit(flag, &flags.mask, 32) + if (br_port_flag_is_set(brport_dev, BIT(flag))) + flags.val |= BIT(flag); + + ocelot_port_bridge_flags(ocelot, port, flags); +} + +static void ocelot_clear_brport_flags(struct ocelot *ocelot, int port) { struct switchdev_brport_flags flags; - int err; flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; - flags.val = flags.mask; + flags.val = flags.mask & ~BR_LEARNING; + + ocelot_port_bridge_flags(ocelot, port, flags); +} - err = ocelot_port_bridge_join(ocelot, port, bridge); +static int ocelot_switchdev_sync(struct ocelot *ocelot, int port, + struct net_device *brport_dev, + struct net_device *bridge_dev, + struct netlink_ext_ack *extack) +{ + clock_t ageing_time; + u8 stp_state; + int err; + + ocelot_inherit_brport_flags(ocelot, port, brport_dev); + + stp_state = br_port_get_stp_state(brport_dev); + ocelot_bridge_stp_state_set(ocelot, port, stp_state); + + err = ocelot_port_vlan_filtering(ocelot, port, + br_vlan_enabled(bridge_dev)); if (err) return err; - ocelot_port_bridge_flags(ocelot, port, flags); + ageing_time = br_get_ageing_time(bridge_dev); + ocelot_port_attr_ageing_set(ocelot, port, ageing_time); + + err = br_mdb_replay(bridge_dev, brport_dev, + &ocelot_switchdev_blocking_nb, extack); + if (err && err != -EOPNOTSUPP) + return err; + + err = br_fdb_replay(bridge_dev, brport_dev, &ocelot_switchdev_nb); + if (err) + return err; + + err = br_vlan_replay(bridge_dev, brport_dev, + &ocelot_switchdev_blocking_nb, extack); + if (err && err != -EOPNOTSUPP) + return err; + + return 0; +} + +static int ocelot_switchdev_unsync(struct ocelot *ocelot, int port) +{ + int err; + + err = ocelot_port_vlan_filtering(ocelot, port, false); + if (err) + return err; + + ocelot_clear_brport_flags(ocelot, port); + + ocelot_bridge_stp_state_set(ocelot, port, BR_STATE_FORWARDING); + + return 0; +} + +static int ocelot_netdevice_bridge_join(struct net_device *dev, + struct net_device *brport_dev, + struct net_device *bridge, + struct netlink_ext_ack *extack) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->chip_port; + int err; + + ocelot_port_bridge_join(ocelot, port, bridge); + + err = ocelot_switchdev_sync(ocelot, port, brport_dev, bridge, extack); + if (err) + goto err_switchdev_sync; return 0; + +err_switchdev_sync: + ocelot_port_bridge_leave(ocelot, port, bridge); + return err; } -static int ocelot_netdevice_bridge_leave(struct ocelot *ocelot, int port, +static int ocelot_netdevice_bridge_leave(struct net_device *dev, + struct net_device *brport_dev, struct net_device *bridge) { - struct switchdev_brport_flags flags; + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->chip_port; int err; - flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; - flags.val = flags.mask & ~BR_LEARNING; + err = ocelot_switchdev_unsync(ocelot, port); + if (err) + return err; - err = ocelot_port_bridge_leave(ocelot, port, bridge); + ocelot_port_bridge_leave(ocelot, port, bridge); - ocelot_port_bridge_flags(ocelot, port, flags); + return 0; +} + +static int ocelot_netdevice_lag_join(struct net_device *dev, + struct net_device *bond, + struct netdev_lag_upper_info *info, + struct netlink_ext_ack *extack) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + struct net_device *bridge_dev; + int port = priv->chip_port; + int err; + err = ocelot_port_lag_join(ocelot, port, bond, info); + if (err == -EOPNOTSUPP) { + NL_SET_ERR_MSG_MOD(extack, "Offloading not supported"); + return 0; + } + + bridge_dev = netdev_master_upper_dev_get(bond); + if (!bridge_dev || !netif_is_bridge_master(bridge_dev)) + return 0; + + err = ocelot_netdevice_bridge_join(dev, bond, bridge_dev, extack); + if (err) + goto err_bridge_join; + + return 0; + +err_bridge_join: + ocelot_port_lag_leave(ocelot, port, bond); return err; } -static int ocelot_netdevice_changeupper(struct net_device *dev, - struct netdev_notifier_changeupper_info *info) +static int ocelot_netdevice_lag_leave(struct net_device *dev, + struct net_device *bond) { struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; + struct net_device *bridge_dev; int port = priv->chip_port; + + ocelot_port_lag_leave(ocelot, port, bond); + + bridge_dev = netdev_master_upper_dev_get(bond); + if (!bridge_dev || !netif_is_bridge_master(bridge_dev)) + return 0; + + return ocelot_netdevice_bridge_leave(dev, bond, bridge_dev); +} + +static int ocelot_netdevice_changeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct netlink_ext_ack *extack; int err = 0; + extack = netdev_notifier_info_to_extack(&info->info); + if (netif_is_bridge_master(info->upper_dev)) { - if (info->linking) { - err = ocelot_netdevice_bridge_join(ocelot, port, - info->upper_dev); - } else { - err = ocelot_netdevice_bridge_leave(ocelot, port, + if (info->linking) + err = ocelot_netdevice_bridge_join(dev, dev, + info->upper_dev, + extack); + else + err = ocelot_netdevice_bridge_leave(dev, dev, info->upper_dev); - } } if (netif_is_lag_master(info->upper_dev)) { - if (info->linking) { - err = ocelot_port_lag_join(ocelot, port, - info->upper_dev, - info->upper_info); - if (err == -EOPNOTSUPP) { - NL_SET_ERR_MSG_MOD(info->info.extack, - "Offloading not supported"); - err = 0; - } - } else { - ocelot_port_lag_leave(ocelot, port, - info->upper_dev); - } + if (info->linking) + err = ocelot_netdevice_lag_join(dev, info->upper_dev, + info->upper_info, extack); + else + ocelot_netdevice_lag_leave(dev, info->upper_dev); } return notifier_from_errno(err); } +/* Treat CHANGEUPPER events on an offloaded LAG as individual CHANGEUPPER + * events for the lower physical ports of the LAG. + * If the LAG upper isn't offloaded, ignore its CHANGEUPPER events. + * In case the LAG joined a bridge, notify that we are offloading it and can do + * forwarding in hardware towards it. + */ static int ocelot_netdevice_lag_changeupper(struct net_device *dev, struct netdev_notifier_changeupper_info *info) @@ -1191,6 +1329,12 @@ ocelot_netdevice_lag_changeupper(struct net_device *dev, int err = NOTIFY_DONE; netdev_for_each_lower_dev(dev, lower, iter) { + struct ocelot_port_private *priv = netdev_priv(lower); + struct ocelot_port *ocelot_port = &priv->port; + + if (ocelot_port->bond != dev) + return NOTIFY_OK; + err = ocelot_netdevice_changeupper(lower, info); if (err) return notifier_from_errno(err); diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c index a33ab315cc6b..87ad2137ba06 100644 --- a/drivers/net/ethernet/mscc/ocelot_ptp.c +++ b/drivers/net/ethernet/mscc/ocelot_ptp.c @@ -4,6 +4,8 @@ * Copyright (c) 2017 Microsemi Corporation * Copyright 2020 NXP */ +#include <linux/time64.h> + #include <soc/mscc/ocelot_ptp.h> #include <soc/mscc/ocelot_sys.h> #include <soc/mscc/ocelot.h> diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c index 37a232911395..7945393a0655 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.c +++ b/drivers/net/ethernet/mscc/ocelot_vcap.c @@ -761,6 +761,7 @@ static void is1_entry_set(struct ocelot *ocelot, int ix, vcap_key_bytes_set(vcap, &data, VCAP_IS1_HK_ETYPE, etype.value, etype.mask); } + break; } default: break; diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 8f2f091bce89..9cfcd5500462 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -6657,7 +6657,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu) /** * s2io_set_link - Set the LInk status - * @work: work struct containing a pointer to device private structue + * @work: work struct containing a pointer to device private structure * Description: Sets the link status for the adapter */ diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h index 9c86f4f9cd42..63f65193dd49 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.h +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h @@ -454,49 +454,49 @@ int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override); #define vxge_debug_ll_config(level, fmt, ...) \ vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, ##__VA_ARGS__) #else -#define vxge_debug_ll_config(level, fmt, ...) +#define vxge_debug_ll_config(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__) #endif #if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) #define vxge_debug_init(level, fmt, ...) \ vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, ##__VA_ARGS__) #else -#define vxge_debug_init(level, fmt, ...) +#define vxge_debug_init(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__) #endif #if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK) #define vxge_debug_tx(level, fmt, ...) \ vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, ##__VA_ARGS__) #else -#define vxge_debug_tx(level, fmt, ...) +#define vxge_debug_tx(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__) #endif #if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK) #define vxge_debug_rx(level, fmt, ...) \ vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, ##__VA_ARGS__) #else -#define vxge_debug_rx(level, fmt, ...) +#define vxge_debug_rx(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__) #endif #if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK) #define vxge_debug_mem(level, fmt, ...) \ vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, ##__VA_ARGS__) #else -#define vxge_debug_mem(level, fmt, ...) +#define vxge_debug_mem(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__) #endif #if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK) #define vxge_debug_entryexit(level, fmt, ...) \ vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, ##__VA_ARGS__) #else -#define vxge_debug_entryexit(level, fmt, ...) +#define vxge_debug_entryexit(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__) #endif #if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK) #define vxge_debug_intr(level, fmt, ...) \ vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, ##__VA_ARGS__) #else -#define vxge_debug_intr(level, fmt, ...) +#define vxge_debug_intr(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__) #endif #define VXGE_DEVICE_DEBUG_LEVEL_SET(level, mask, vdev) {\ diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index bdbf0726145e..605a1617b195 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -419,8 +419,8 @@ nfp_abm_port_get_stats_strings(struct nfp_app *app, struct nfp_port *port, return data; alink = repr->app_priv; for (i = 0; i < alink->vnic->dp.num_r_vecs; i++) { - data = nfp_pr_et(data, "q%u_no_wait", i); - data = nfp_pr_et(data, "q%u_delayed", i); + ethtool_sprintf(&data, "q%u_no_wait", i); + ethtool_sprintf(&data, "q%u_delayed", i); } return data; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 56833a41f3d2..31377923ea3d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -47,6 +47,7 @@ struct nfp_app; #define NFP_FL_FEATS_PRE_TUN_RULES BIT(6) #define NFP_FL_FEATS_IPV6_TUN BIT(7) #define NFP_FL_FEATS_VLAN_QINQ BIT(8) +#define NFP_FL_FEATS_QOS_PPS BIT(9) #define NFP_FL_FEATS_HOST_ACK BIT(31) #define NFP_FL_ENABLE_FLOW_MERGE BIT(0) @@ -61,7 +62,8 @@ struct nfp_app; NFP_FL_FEATS_FLOW_MOD | \ NFP_FL_FEATS_PRE_TUN_RULES | \ NFP_FL_FEATS_IPV6_TUN | \ - NFP_FL_FEATS_VLAN_QINQ) + NFP_FL_FEATS_VLAN_QINQ | \ + NFP_FL_FEATS_QOS_PPS) struct nfp_fl_mask_id { struct circ_buf mask_id_free_list; diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c index d4ce8f9ef3cc..784c6dbf8bc4 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c @@ -10,19 +10,26 @@ #include "../nfp_port.h" #define NFP_FL_QOS_UPDATE msecs_to_jiffies(1000) +#define NFP_FL_QOS_PPS BIT(15) struct nfp_police_cfg_head { __be32 flags_opts; __be32 port; }; +enum NFP_FL_QOS_TYPES { + NFP_FL_QOS_TYPE_BPS, + NFP_FL_QOS_TYPE_PPS, + NFP_FL_QOS_TYPE_MAX, +}; + /* Police cmsg for configuring a trTCM traffic conditioner (8W/32B) * See RFC 2698 for more details. * ---------------------------------------------------------------- * 3 2 1 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * | Flag options | + * | Reserved |p| Reserved | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Port Ingress | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ @@ -38,6 +45,9 @@ struct nfp_police_cfg_head { * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Committed Information Rate | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * Word[0](FLag options): + * [15] p(pps) 1 for pps ,0 for bps + * */ struct nfp_police_config { struct nfp_police_cfg_head head; @@ -62,13 +72,18 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev, struct tc_cls_matchall_offload *flow, struct netlink_ext_ack *extack) { - struct flow_action_entry *action = &flow->rule->action.entries[0]; + struct flow_action_entry *paction = &flow->rule->action.entries[0]; + u32 action_num = flow->rule->action.num_entries; struct nfp_flower_priv *fl_priv = app->priv; + struct flow_action_entry *action = NULL; struct nfp_flower_repr_priv *repr_priv; struct nfp_police_config *config; + u32 netdev_port_id, i; struct nfp_repr *repr; struct sk_buff *skb; - u32 netdev_port_id; + bool pps_support; + u32 bps_num = 0; + u32 pps_num = 0; u32 burst; u64 rate; @@ -78,6 +93,8 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev, } repr = netdev_priv(netdev); repr_priv = repr->app_priv; + netdev_port_id = nfp_repr_get_port_id(netdev); + pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS); if (repr_priv->block_shared) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on shared blocks"); @@ -89,9 +106,18 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev, return -EOPNOTSUPP; } - if (!flow_offload_has_one_action(&flow->rule->action)) { - NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires a single action"); - return -EOPNOTSUPP; + if (pps_support) { + if (action_num > 2 || action_num == 0) { + NL_SET_ERR_MSG_MOD(extack, + "unsupported offload: qos rate limit offload only support action number 1 or 2"); + return -EOPNOTSUPP; + } + } else { + if (!flow_offload_has_one_action(&flow->rule->action)) { + NL_SET_ERR_MSG_MOD(extack, + "unsupported offload: qos rate limit offload requires a single action"); + return -EOPNOTSUPP; + } } if (flow->common.prio != 1) { @@ -99,31 +125,69 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev, return -EOPNOTSUPP; } - if (action->id != FLOW_ACTION_POLICE) { - NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires police action"); - return -EOPNOTSUPP; + for (i = 0 ; i < action_num; i++) { + action = paction + i; + if (action->id != FLOW_ACTION_POLICE) { + NL_SET_ERR_MSG_MOD(extack, + "unsupported offload: qos rate limit offload requires police action"); + return -EOPNOTSUPP; + } + if (action->police.rate_bytes_ps > 0) { + if (bps_num++) { + NL_SET_ERR_MSG_MOD(extack, + "unsupported offload: qos rate limit offload only support one BPS action"); + return -EOPNOTSUPP; + } + } + if (action->police.rate_pkt_ps > 0) { + if (!pps_support) { + NL_SET_ERR_MSG_MOD(extack, + "unsupported offload: FW does not support PPS action"); + return -EOPNOTSUPP; + } + if (pps_num++) { + NL_SET_ERR_MSG_MOD(extack, + "unsupported offload: qos rate limit offload only support one PPS action"); + return -EOPNOTSUPP; + } + } } - rate = action->police.rate_bytes_ps; - burst = action->police.burst; - netdev_port_id = nfp_repr_get_port_id(netdev); - - skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config), - NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL); - if (!skb) - return -ENOMEM; - - config = nfp_flower_cmsg_get_data(skb); - memset(config, 0, sizeof(struct nfp_police_config)); - config->head.port = cpu_to_be32(netdev_port_id); - config->bkt_tkn_p = cpu_to_be32(burst); - config->bkt_tkn_c = cpu_to_be32(burst); - config->pbs = cpu_to_be32(burst); - config->cbs = cpu_to_be32(burst); - config->pir = cpu_to_be32(rate); - config->cir = cpu_to_be32(rate); - nfp_ctrl_tx(repr->app->ctrl, skb); + for (i = 0 ; i < action_num; i++) { + /* Set QoS data for this interface */ + action = paction + i; + if (action->police.rate_bytes_ps > 0) { + rate = action->police.rate_bytes_ps; + burst = action->police.burst; + } else if (action->police.rate_pkt_ps > 0) { + rate = action->police.rate_pkt_ps; + burst = action->police.burst_pkt; + } else { + NL_SET_ERR_MSG_MOD(extack, + "unsupported offload: qos rate limit is not BPS or PPS"); + continue; + } + if (rate != 0) { + skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config), + NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + config = nfp_flower_cmsg_get_data(skb); + memset(config, 0, sizeof(struct nfp_police_config)); + if (action->police.rate_pkt_ps > 0) + config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_PPS); + config->head.port = cpu_to_be32(netdev_port_id); + config->bkt_tkn_p = cpu_to_be32(burst); + config->bkt_tkn_c = cpu_to_be32(burst); + config->pbs = cpu_to_be32(burst); + config->cbs = cpu_to_be32(burst); + config->pir = cpu_to_be32(rate); + config->cir = cpu_to_be32(rate); + nfp_ctrl_tx(repr->app->ctrl, skb); + } + } repr_priv->qos_table.netdev_port_id = netdev_port_id; fl_priv->qos_rate_limiters++; if (fl_priv->qos_rate_limiters == 1) @@ -141,9 +205,10 @@ nfp_flower_remove_rate_limiter(struct nfp_app *app, struct net_device *netdev, struct nfp_flower_priv *fl_priv = app->priv; struct nfp_flower_repr_priv *repr_priv; struct nfp_police_config *config; + u32 netdev_port_id, i; struct nfp_repr *repr; struct sk_buff *skb; - u32 netdev_port_id; + bool pps_support; if (!nfp_netdev_is_nfp_repr(netdev)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port"); @@ -153,27 +218,38 @@ nfp_flower_remove_rate_limiter(struct nfp_app *app, struct net_device *netdev, netdev_port_id = nfp_repr_get_port_id(netdev); repr_priv = repr->app_priv; + pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS); if (!repr_priv->qos_table.netdev_port_id) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot remove qos entry that does not exist"); return -EOPNOTSUPP; } - skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config), - NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL); - if (!skb) - return -ENOMEM; - - /* Clear all qos associate data for this interface */ memset(&repr_priv->qos_table, 0, sizeof(struct nfp_fl_qos)); fl_priv->qos_rate_limiters--; if (!fl_priv->qos_rate_limiters) cancel_delayed_work_sync(&fl_priv->qos_stats_work); - - config = nfp_flower_cmsg_get_data(skb); - memset(config, 0, sizeof(struct nfp_police_config)); - config->head.port = cpu_to_be32(netdev_port_id); - nfp_ctrl_tx(repr->app->ctrl, skb); + for (i = 0 ; i < NFP_FL_QOS_TYPE_MAX; i++) { + if (i == NFP_FL_QOS_TYPE_PPS && !pps_support) + break; + /* 0:bps 1:pps + * Clear QoS data for this interface. + * There is no need to check if a specific QOS_TYPE was + * configured as the firmware handles clearing a QoS entry + * safely, even if it wasn't explicitly added. + */ + skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config), + NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + config = nfp_flower_cmsg_get_data(skb); + memset(config, 0, sizeof(struct nfp_police_config)); + if (i == NFP_FL_QOS_TYPE_PPS) + config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_PPS); + config->head.port = cpu_to_be32(netdev_port_id); + nfp_ctrl_tx(repr->app->ctrl, skb); + } return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index 76d13af46a7a..3e9baff07100 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -18,7 +18,6 @@ struct netdev_bpf; struct netlink_ext_ack; struct pci_dev; struct sk_buff; -struct sk_buff; struct nfp_app; struct nfp_cpp; struct nfp_pf; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index 713ee3041d49..bea978df7713 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -364,6 +364,7 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port) attrs.split = eth_port.is_split; attrs.splittable = !attrs.split; + attrs.lanes = eth_port.port_lanes; attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; attrs.phys.port_number = eth_port.label_port; attrs.phys.split_subport_number = eth_port.label_subport; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 9c9ae33d84ce..1b482446536d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -429,17 +429,6 @@ static int nfp_net_set_ringparam(struct net_device *netdev, return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt); } -__printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...) -{ - va_list args; - - va_start(args, fmt); - vsnprintf(data, ETH_GSTRING_LEN, fmt, args); - va_end(args); - - return data + ETH_GSTRING_LEN; -} - static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); @@ -454,29 +443,29 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) int i; for (i = 0; i < nn->max_r_vecs; i++) { - data = nfp_pr_et(data, "rvec_%u_rx_pkts", i); - data = nfp_pr_et(data, "rvec_%u_tx_pkts", i); - data = nfp_pr_et(data, "rvec_%u_tx_busy", i); + ethtool_sprintf(&data, "rvec_%u_rx_pkts", i); + ethtool_sprintf(&data, "rvec_%u_tx_pkts", i); + ethtool_sprintf(&data, "rvec_%u_tx_busy", i); } - data = nfp_pr_et(data, "hw_rx_csum_ok"); - data = nfp_pr_et(data, "hw_rx_csum_inner_ok"); - data = nfp_pr_et(data, "hw_rx_csum_complete"); - data = nfp_pr_et(data, "hw_rx_csum_err"); - data = nfp_pr_et(data, "rx_replace_buf_alloc_fail"); - data = nfp_pr_et(data, "rx_tls_decrypted_packets"); - data = nfp_pr_et(data, "hw_tx_csum"); - data = nfp_pr_et(data, "hw_tx_inner_csum"); - data = nfp_pr_et(data, "tx_gather"); - data = nfp_pr_et(data, "tx_lso"); - data = nfp_pr_et(data, "tx_tls_encrypted_packets"); - data = nfp_pr_et(data, "tx_tls_ooo"); - data = nfp_pr_et(data, "tx_tls_drop_no_sync_data"); - - data = nfp_pr_et(data, "hw_tls_no_space"); - data = nfp_pr_et(data, "rx_tls_resync_req_ok"); - data = nfp_pr_et(data, "rx_tls_resync_req_ign"); - data = nfp_pr_et(data, "rx_tls_resync_sent"); + ethtool_sprintf(&data, "hw_rx_csum_ok"); + ethtool_sprintf(&data, "hw_rx_csum_inner_ok"); + ethtool_sprintf(&data, "hw_rx_csum_complete"); + ethtool_sprintf(&data, "hw_rx_csum_err"); + ethtool_sprintf(&data, "rx_replace_buf_alloc_fail"); + ethtool_sprintf(&data, "rx_tls_decrypted_packets"); + ethtool_sprintf(&data, "hw_tx_csum"); + ethtool_sprintf(&data, "hw_tx_inner_csum"); + ethtool_sprintf(&data, "tx_gather"); + ethtool_sprintf(&data, "tx_lso"); + ethtool_sprintf(&data, "tx_tls_encrypted_packets"); + ethtool_sprintf(&data, "tx_tls_ooo"); + ethtool_sprintf(&data, "tx_tls_drop_no_sync_data"); + + ethtool_sprintf(&data, "hw_tls_no_space"); + ethtool_sprintf(&data, "rx_tls_resync_req_ok"); + ethtool_sprintf(&data, "rx_tls_resync_req_ign"); + ethtool_sprintf(&data, "rx_tls_resync_sent"); return data; } @@ -550,19 +539,19 @@ nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int num_vecs, bool repr) swap_off = repr * NN_ET_SWITCH_STATS_LEN; for (i = 0; i < NN_ET_SWITCH_STATS_LEN; i++) - data = nfp_pr_et(data, nfp_net_et_stats[i + swap_off].name); + ethtool_sprintf(&data, nfp_net_et_stats[i + swap_off].name); for (i = NN_ET_SWITCH_STATS_LEN; i < NN_ET_SWITCH_STATS_LEN * 2; i++) - data = nfp_pr_et(data, nfp_net_et_stats[i - swap_off].name); + ethtool_sprintf(&data, nfp_net_et_stats[i - swap_off].name); for (i = NN_ET_SWITCH_STATS_LEN * 2; i < NN_ET_GLOBAL_STATS_LEN; i++) - data = nfp_pr_et(data, nfp_net_et_stats[i].name); + ethtool_sprintf(&data, nfp_net_et_stats[i].name); for (i = 0; i < num_vecs; i++) { - data = nfp_pr_et(data, "rxq_%u_pkts", i); - data = nfp_pr_et(data, "rxq_%u_bytes", i); - data = nfp_pr_et(data, "txq_%u_pkts", i); - data = nfp_pr_et(data, "txq_%u_bytes", i); + ethtool_sprintf(&data, "rxq_%u_pkts", i); + ethtool_sprintf(&data, "rxq_%u_bytes", i); + ethtool_sprintf(&data, "txq_%u_pkts", i); + ethtool_sprintf(&data, "txq_%u_bytes", i); } return data; @@ -610,15 +599,15 @@ static u8 *nfp_vnic_get_tlv_stats_strings(struct nfp_net *nn, u8 *data) memcpy(data, nfp_tlv_stat_names[id], ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } else { - data = nfp_pr_et(data, "dev_unknown_stat%u", id); + ethtool_sprintf(&data, "dev_unknown_stat%u", id); } } for (i = 0; i < nn->max_r_vecs; i++) { - data = nfp_pr_et(data, "rxq_%u_pkts", i); - data = nfp_pr_et(data, "rxq_%u_bytes", i); - data = nfp_pr_et(data, "txq_%u_pkts", i); - data = nfp_pr_et(data, "txq_%u_bytes", i); + ethtool_sprintf(&data, "rxq_%u_pkts", i); + ethtool_sprintf(&data, "rxq_%u_bytes", i); + ethtool_sprintf(&data, "txq_%u_pkts", i); + ethtool_sprintf(&data, "txq_%u_bytes", i); } return data; @@ -666,7 +655,7 @@ static u8 *nfp_mac_get_stats_strings(struct net_device *netdev, u8 *data) return data; for (i = 0; i < ARRAY_SIZE(nfp_mac_et_stats); i++) - data = nfp_pr_et(data, "mac.%s", nfp_mac_et_stats[i].name); + ethtool_sprintf(&data, "mac.%s", nfp_mac_et_stats[i].name); return data; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index d7fd203bb180..ae4da189d955 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -92,8 +92,6 @@ struct nfp_port { extern const struct ethtool_ops nfp_port_ethtool_ops; -__printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...); - int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data); diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index d3cbb4215f5c..64c6842bd452 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1044,7 +1044,8 @@ static netdev_tx_t lpc_eth_hard_start_xmit(struct sk_buff *skb, if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) { /* This function should never be called when there are no - buffers */ + * buffers + */ netif_stop_queue(ndev); spin_unlock_irq(&pldat->lock); WARN(1, "BUG! TX request when no free TX buffers!\n"); @@ -1318,7 +1319,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size); /* Allocate a chunk of memory for the DMA ethernet buffers - and descriptors */ + * and descriptors + */ pldat->dma_buff_base_v = dma_alloc_coherent(dev, pldat->dma_buff_size, &dma_handle, @@ -1348,9 +1350,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) __lpc_get_mac(pldat, ndev->dev_addr); if (!is_valid_ether_addr(ndev->dev_addr)) { - const char *macaddr = of_get_mac_address(np); - if (!IS_ERR(macaddr)) - ether_addr_copy(ndev->dev_addr, macaddr); + of_get_mac_address(np, ndev->dev_addr); } if (!is_valid_ether_addr(ndev->dev_addr)) eth_hw_addr_random(ndev); @@ -1365,7 +1365,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) __lpc_mii_mngt_reset(pldat); /* Force default PHY interface setup in chip, this will probably be - changed by the PHY driver */ + * changed by the PHY driver + */ pldat->link = 0; pldat->speed = 100; pldat->duplex = DUPLEX_FULL; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 55cef5b16aa5..a6823c4d355d 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -612,15 +612,6 @@ void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter, void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter, struct pch_gbe_rx_ring *rx_ring); void pch_gbe_update_stats(struct pch_gbe_adapter *adapter); -u32 pch_ch_control_read(struct pci_dev *pdev); -void pch_ch_control_write(struct pci_dev *pdev, u32 val); -u32 pch_ch_event_read(struct pci_dev *pdev); -void pch_ch_event_write(struct pci_dev *pdev, u32 val); -u32 pch_src_uuid_lo_read(struct pci_dev *pdev); -u32 pch_src_uuid_hi_read(struct pci_dev *pdev); -u64 pch_rx_snap_read(struct pci_dev *pdev); -u64 pch_tx_snap_read(struct pci_dev *pdev); -int pch_set_station_address(u8 *addr, struct pci_dev *pdev); /* pch_gbe_param.c */ void pch_gbe_check_options(struct pch_gbe_adapter *adapter); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 140cee7c459d..334af49e5add 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/net_tstamp.h> #include <linux/ptp_classify.h> +#include <linux/ptp_pch.h> #include <linux/gpio.h> #define DRV_VERSION "1.01" diff --git a/drivers/net/ethernet/pensando/ionic/Makefile b/drivers/net/ethernet/pensando/ionic/Makefile index 8d3c2d3cb10d..4e7642a2d25f 100644 --- a/drivers/net/ethernet/pensando/ionic/Makefile +++ b/drivers/net/ethernet/pensando/ionic/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_IONIC) := ionic.o ionic-y := ionic_main.o ionic_bus_pci.o ionic_devlink.o ionic_dev.o \ ionic_debugfs.o ionic_lif.o ionic_rx_filter.o ionic_ethtool.o \ ionic_txrx.o ionic_stats.o ionic_fw.o +ionic-$(CONFIG_PTP_1588_CLOCK) += ionic_phc.o diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h index 084a924431d5..66204106f83e 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic.h +++ b/drivers/net/ethernet/pensando/ionic/ionic.h @@ -20,6 +20,10 @@ struct ionic_lif; #define DEVCMD_TIMEOUT 10 +#define IONIC_PHC_UPDATE_NS 10000000000 /* 10s in nanoseconds */ +#define NORMAL_PPB 1000000000 /* one billion parts per billion */ +#define SCALED_PPM (1000000ull << 16) /* 2^16 million parts per 2^16 million */ + struct ionic_vf { u16 index; u8 macaddr[6]; @@ -64,6 +68,8 @@ struct ionic_admin_ctx { union ionic_adminq_comp comp; }; +int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); +int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, int err); int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait); int ionic_set_dma_mask(struct ionic *ionic); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c index b0d8499d373b..e4a5416adc80 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c @@ -184,6 +184,10 @@ static int ionic_sriov_configure(struct pci_dev *pdev, int num_vfs) struct device *dev = ionic->dev; int ret = 0; + if (ionic->lif && + test_bit(IONIC_LIF_F_FW_RESET, ionic->lif->state)) + return -EBUSY; + if (num_vfs > 0) { ret = pci_enable_sriov(pdev, num_vfs); if (ret) { diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c index fb2b5bf179d7..1dfe962e22e0 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c @@ -14,18 +14,23 @@ static void ionic_watchdog_cb(struct timer_list *t) { struct ionic *ionic = from_timer(ionic, t, watchdog_timer); + struct ionic_lif *lif = ionic->lif; int hb; mod_timer(&ionic->watchdog_timer, round_jiffies(jiffies + ionic->watchdog_period)); - if (!ionic->lif) + if (!lif) return; hb = ionic_heartbeat_check(ionic); + dev_dbg(ionic->dev, "%s: hb %d running %d UP %d\n", + __func__, hb, netif_running(lif->netdev), + test_bit(IONIC_LIF_F_UP, lif->state)); - if (hb >= 0) - ionic_link_status_check_request(ionic->lif, CAN_NOT_SLEEP); + if (hb >= 0 && + !test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + ionic_link_status_check_request(lif, CAN_NOT_SLEEP); } void ionic_init_devinfo(struct ionic *ionic) @@ -74,6 +79,8 @@ int ionic_dev_setup(struct ionic *ionic) idev->intr_status = bar->vaddr + IONIC_BAR0_INTR_STATUS_OFFSET; idev->intr_ctrl = bar->vaddr + IONIC_BAR0_INTR_CTRL_OFFSET; + idev->hwstamp_regs = &idev->dev_info_regs->hwstamp; + sig = ioread32(&idev->dev_info_regs->signature); if (sig != IONIC_DEV_INFO_SIGNATURE) { dev_err(dev, "Incompatible firmware signature %x", sig); @@ -89,9 +96,17 @@ int ionic_dev_setup(struct ionic *ionic) return -EFAULT; } - idev->last_fw_status = 0xff; timer_setup(&ionic->watchdog_timer, ionic_watchdog_cb, 0); ionic->watchdog_period = IONIC_WATCHDOG_SECS * HZ; + + /* set times to ensure the first check will proceed */ + atomic_long_set(&idev->last_check_time, jiffies - 2 * HZ); + idev->last_hb_time = jiffies - 2 * ionic->watchdog_period; + /* init as ready, so no transition if the first check succeeds */ + idev->last_fw_hb = 0; + idev->fw_hb_ready = true; + idev->fw_status_ready = true; + mod_timer(&ionic->watchdog_timer, round_jiffies(jiffies + ionic->watchdog_period)); @@ -105,29 +120,38 @@ int ionic_dev_setup(struct ionic *ionic) int ionic_heartbeat_check(struct ionic *ionic) { struct ionic_dev *idev = &ionic->idev; - unsigned long hb_time; + unsigned long check_time, last_check_time; + bool fw_status_ready, fw_hb_ready; u8 fw_status; - u32 hb; + u32 fw_hb; - /* wait a little more than one second before testing again */ - hb_time = jiffies; - if (time_before(hb_time, (idev->last_hb_time + ionic->watchdog_period))) + /* wait a least one second before testing again */ + check_time = jiffies; + last_check_time = atomic_long_read(&idev->last_check_time); +do_check_time: + if (time_before(check_time, last_check_time + HZ)) return 0; + if (!atomic_long_try_cmpxchg_relaxed(&idev->last_check_time, + &last_check_time, check_time)) { + /* if called concurrently, only the first should proceed. */ + dev_dbg(ionic->dev, "%s: do_check_time again\n", __func__); + goto do_check_time; + } /* firmware is useful only if the running bit is set and * fw_status != 0xff (bad PCI read) */ fw_status = ioread8(&idev->dev_info_regs->fw_status); - if (fw_status != 0xff) - fw_status &= IONIC_FW_STS_F_RUNNING; /* use only the run bit */ + fw_status_ready = (fw_status != 0xff) && (fw_status & IONIC_FW_STS_F_RUNNING); /* is this a transition? */ - if (fw_status != idev->last_fw_status && - idev->last_fw_status != 0xff) { + if (fw_status_ready != idev->fw_status_ready) { struct ionic_lif *lif = ionic->lif; bool trigger = false; - if (!fw_status || fw_status == 0xff) { + idev->fw_status_ready = fw_status_ready; + + if (!fw_status_ready) { dev_info(ionic->dev, "FW stopped %u\n", fw_status); if (lif && !test_bit(IONIC_LIF_F_FW_RESET, lif->state)) trigger = true; @@ -141,44 +165,47 @@ int ionic_heartbeat_check(struct ionic *ionic) struct ionic_deferred_work *work; work = kzalloc(sizeof(*work), GFP_ATOMIC); - if (!work) { - dev_err(ionic->dev, "LIF reset trigger dropped\n"); - } else { + if (work) { work->type = IONIC_DW_TYPE_LIF_RESET; - if (fw_status & IONIC_FW_STS_F_RUNNING && - fw_status != 0xff) - work->fw_status = 1; + work->fw_status = fw_status_ready; ionic_lif_deferred_enqueue(&lif->deferred, work); } } } - idev->last_fw_status = fw_status; - if (!fw_status || fw_status == 0xff) + if (!fw_status_ready) return -ENXIO; - /* early FW has no heartbeat, else FW will return non-zero */ - hb = ioread32(&idev->dev_info_regs->fw_heartbeat); - if (!hb) + /* wait at least one watchdog period since the last heartbeat */ + last_check_time = idev->last_hb_time; + if (time_before(check_time, last_check_time + ionic->watchdog_period)) return 0; - /* are we stalled? */ - if (hb == idev->last_hb) { - /* only complain once for each stall seen */ - if (idev->last_hb_time != 1) { - dev_info(ionic->dev, "FW heartbeat stalled at %d\n", - idev->last_hb); - idev->last_hb_time = 1; - } + fw_hb = ioread32(&idev->dev_info_regs->fw_heartbeat); + fw_hb_ready = fw_hb != idev->last_fw_hb; - return -ENXIO; + /* early FW version had no heartbeat, so fake it */ + if (!fw_hb_ready && !fw_hb) + fw_hb_ready = true; + + dev_dbg(ionic->dev, "%s: fw_hb %u last_fw_hb %u ready %u\n", + __func__, fw_hb, idev->last_fw_hb, fw_hb_ready); + + idev->last_fw_hb = fw_hb; + + /* log a transition */ + if (fw_hb_ready != idev->fw_hb_ready) { + idev->fw_hb_ready = fw_hb_ready; + if (!fw_hb_ready) + dev_info(ionic->dev, "FW heartbeat stalled at %d\n", fw_hb); + else + dev_info(ionic->dev, "FW heartbeat restored at %d\n", fw_hb); } - if (idev->last_hb_time == 1) - dev_info(ionic->dev, "FW heartbeat restored at %d\n", hb); + if (!fw_hb_ready) + return -ENXIO; - idev->last_hb = hb; - idev->last_hb_time = hb_time; + idev->last_hb_time = check_time; return 0; } @@ -585,9 +612,9 @@ void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa) void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb, void *cb_arg) { - struct device *dev = q->lif->ionic->dev; struct ionic_desc_info *desc_info; struct ionic_lif *lif = q->lif; + struct device *dev = q->dev; desc_info = &q->info[q->head_idx]; desc_info->cb = cb; @@ -629,7 +656,7 @@ void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info, /* stop index must be for a descriptor that is not yet completed */ if (unlikely(!ionic_q_is_posted(q, stop_index))) - dev_err(q->lif->ionic->dev, + dev_err(q->dev, "ionic stop is not posted %s stop %u tail %u head %u\n", q->name, stop_index, q->tail_idx, q->head_idx); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h index 690768ff0143..c25cf9b744c5 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -4,6 +4,7 @@ #ifndef _IONIC_DEV_H_ #define _IONIC_DEV_H_ +#include <linux/atomic.h> #include <linux/mutex.h> #include <linux/workqueue.h> @@ -58,6 +59,7 @@ static_assert(sizeof(struct ionic_dev_getattr_cmd) == 64); static_assert(sizeof(struct ionic_dev_getattr_comp) == 16); static_assert(sizeof(struct ionic_dev_setattr_cmd) == 64); static_assert(sizeof(struct ionic_dev_setattr_comp) == 16); +static_assert(sizeof(struct ionic_lif_setphc_cmd) == 64); /* Port commands */ static_assert(sizeof(struct ionic_port_identify_cmd) == 64); @@ -134,10 +136,13 @@ struct ionic_devinfo { struct ionic_dev { union ionic_dev_info_regs __iomem *dev_info_regs; union ionic_dev_cmd_regs __iomem *dev_cmd_regs; + struct ionic_hwstamp_regs __iomem *hwstamp_regs; + atomic_long_t last_check_time; unsigned long last_hb_time; - u32 last_hb; - u8 last_fw_status; + u32 last_fw_hb; + bool fw_hb_ready; + bool fw_status_ready; u64 __iomem *db_pages; dma_addr_t phy_db_pages; @@ -170,11 +175,20 @@ typedef void (*ionic_desc_cb)(struct ionic_queue *q, struct ionic_desc_info *desc_info, struct ionic_cq_info *cq_info, void *cb_arg); -struct ionic_page_info { +#define IONIC_PAGE_SIZE PAGE_SIZE +#define IONIC_PAGE_SPLIT_SZ (PAGE_SIZE / 2) +#define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\ + __GFP_COMP | __GFP_MEMALLOC) + +struct ionic_buf_info { struct page *page; dma_addr_t dma_addr; + u32 page_offset; + u32 len; }; +#define IONIC_MAX_FRAGS (1 + IONIC_TX_MAX_SG_ELEMS_V1) + struct ionic_desc_info { union { void *desc; @@ -187,8 +201,9 @@ struct ionic_desc_info { struct ionic_txq_sg_desc *txq_sg_desc; struct ionic_rxq_sg_desc *rxq_sgl_desc; }; - unsigned int npages; - struct ionic_page_info pages[IONIC_RX_MAX_SG_ELEMS + 1]; + unsigned int bytes; + unsigned int nbufs; + struct ionic_buf_info bufs[IONIC_MAX_FRAGS]; ionic_desc_cb cb; void *cb_arg; }; @@ -199,10 +214,13 @@ struct ionic_queue { struct device *dev; struct ionic_lif *lif; struct ionic_desc_info *info; + u64 dbval; u16 head_idx; u16 tail_idx; unsigned int index; unsigned int num_descs; + unsigned int max_sg_elems; + u64 features; u64 dbell_count; u64 stop; u64 wake; @@ -211,7 +229,6 @@ struct ionic_queue { unsigned int type; unsigned int hw_index; unsigned int hw_type; - u64 dbval; union { void *base; struct ionic_txq_desc *txq; @@ -229,7 +246,7 @@ struct ionic_queue { unsigned int sg_desc_size; unsigned int pid; char name[IONIC_QUEUE_NAME_MAX_SZ]; -}; +} ____cacheline_aligned_in_smp; #define IONIC_INTR_INDEX_NOT_ASSIGNED -1 #define IONIC_INTR_NAME_MAX_SZ 32 @@ -256,7 +273,7 @@ struct ionic_cq { u64 compl_count; void *base; dma_addr_t base_pa; -}; +} ____cacheline_aligned_in_smp; struct ionic; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index 0832bedcb3b4..6583be570e45 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -29,11 +29,9 @@ static void ionic_get_stats_strings(struct ionic_lif *lif, u8 *buf) static void ionic_get_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *buf) { - struct ionic_lif *lif; + struct ionic_lif *lif = netdev_priv(netdev); u32 i; - lif = netdev_priv(netdev); - memset(buf, 0, stats->n_stats * sizeof(*buf)); for (i = 0; i < ionic_num_stats_grps; i++) ionic_stats_groups[i].get_values(lif, &buf); @@ -209,6 +207,14 @@ static int ionic_get_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseER_Full); break; + case IONIC_XCVR_PID_SFP_10GBASE_T: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + break; + case IONIC_XCVR_PID_SFP_1000BASE_T: + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + break; case IONIC_XCVR_PID_UNKNOWN: /* This means there's no module plugged in */ break; @@ -264,12 +270,10 @@ static int ionic_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *ks) { struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_dev *idev = &lif->ionic->idev; struct ionic *ionic = lif->ionic; - struct ionic_dev *idev; int err = 0; - idev = &lif->ionic->idev; - /* set autoneg */ if (ks->base.autoneg != idev->port_info->config.an_enable) { mutex_lock(&ionic->dev_cmd_lock); @@ -845,6 +849,98 @@ static int ionic_get_module_eeprom(struct net_device *netdev, return 0; } +static int ionic_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *info) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic *ionic = lif->ionic; + __le64 mask; + + if (!lif->phc || !lif->phc->ptp) + return ethtool_op_get_ts_info(netdev, info); + + info->phc_index = ptp_clock_index(lif->phc->ptp); + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + /* tx modes */ + + info->tx_types = BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + + mask = cpu_to_le64(BIT_ULL(IONIC_TXSTAMP_ONESTEP_SYNC)); + if (ionic->ident.lif.eth.hwstamp_tx_modes & mask) + info->tx_types |= BIT(HWTSTAMP_TX_ONESTEP_SYNC); + + mask = cpu_to_le64(BIT_ULL(IONIC_TXSTAMP_ONESTEP_P2P)); + if (ionic->ident.lif.eth.hwstamp_tx_modes & mask) + info->tx_types |= BIT(HWTSTAMP_TX_ONESTEP_P2P); + + /* rx filters */ + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_ALL); + + mask = cpu_to_le64(IONIC_PKT_CLS_NTP_ALL); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_NTP_ALL); + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP1_SYNC); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC); + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP1_DREQ); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ); + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP1_ALL); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT); + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L4_SYNC); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC); + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L4_DREQ); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L4_ALL); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT); + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L2_SYNC); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC); + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L2_DREQ); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ); + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L2_ALL); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT); + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_SYNC); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_SYNC); + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_DREQ); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); + + mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_ALL); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); + + return 0; +} + static int ionic_nway_reset(struct net_device *netdev) { struct ionic_lif *lif = netdev_priv(netdev); @@ -902,6 +998,7 @@ static const struct ethtool_ops ionic_ethtool_ops = { .set_pauseparam = ionic_set_pauseparam, .get_fecparam = ionic_get_fecparam, .set_fecparam = ionic_set_fecparam, + .get_ts_info = ionic_get_ts_info, .nway_reset = ionic_nway_reset, }; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h index 31ccfcdc2b0a..0478b48d9895 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_if.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h @@ -34,6 +34,7 @@ enum ionic_cmd_opcode { IONIC_CMD_LIF_RESET = 22, IONIC_CMD_LIF_GETATTR = 23, IONIC_CMD_LIF_SETATTR = 24, + IONIC_CMD_LIF_SETPHC = 25, IONIC_CMD_RX_MODE_SET = 30, IONIC_CMD_RX_FILTER_ADD = 31, @@ -269,6 +270,9 @@ union ionic_drv_identity { * value in usecs to device units using: * device units = usecs * mult / div * @eq_count: Number of shared event queues + * @hwstamp_mask: Bitmask for subtraction of hardware tick values. + * @hwstamp_mult: Hardware tick to nanosecond multiplier. + * @hwstamp_shift: Hardware tick to nanosecond divisor (power of two). */ union ionic_dev_identity { struct { @@ -283,6 +287,9 @@ union ionic_dev_identity { __le32 intr_coal_mult; __le32 intr_coal_div; __le32 eq_count; + __le64 hwstamp_mask; + __le32 hwstamp_mult; + __le32 hwstamp_shift; }; __le32 words[478]; }; @@ -320,7 +327,7 @@ struct ionic_lif_identify_comp { /** * enum ionic_lif_capability - LIF capabilities * @IONIC_LIF_CAP_ETH: LIF supports Ethernet - * @IONIC_LIF_CAP_RDMA: LIF support RDMA + * @IONIC_LIF_CAP_RDMA: LIF supports RDMA */ enum ionic_lif_capability { IONIC_LIF_CAP_ETH = BIT(0), @@ -346,6 +353,68 @@ enum ionic_logical_qtype { }; /** + * enum ionic_q_feature - Common Features for most queue types + * + * Common features use bits 0-15. Per-queue-type features use higher bits. + * + * @IONIC_QIDENT_F_CQ: Queue has completion ring + * @IONIC_QIDENT_F_SG: Queue has scatter/gather ring + * @IONIC_QIDENT_F_EQ: Queue can use event queue + * @IONIC_QIDENT_F_CMB: Queue is in cmb bar + * @IONIC_Q_F_2X_DESC: Double main descriptor size + * @IONIC_Q_F_2X_CQ_DESC: Double cq descriptor size + * @IONIC_Q_F_2X_SG_DESC: Double sg descriptor size + * @IONIC_Q_F_4X_DESC: Quadruple main descriptor size + * @IONIC_Q_F_4X_CQ_DESC: Quadruple cq descriptor size + * @IONIC_Q_F_4X_SG_DESC: Quadruple sg descriptor size + */ +enum ionic_q_feature { + IONIC_QIDENT_F_CQ = BIT_ULL(0), + IONIC_QIDENT_F_SG = BIT_ULL(1), + IONIC_QIDENT_F_EQ = BIT_ULL(2), + IONIC_QIDENT_F_CMB = BIT_ULL(3), + IONIC_Q_F_2X_DESC = BIT_ULL(4), + IONIC_Q_F_2X_CQ_DESC = BIT_ULL(5), + IONIC_Q_F_2X_SG_DESC = BIT_ULL(6), + IONIC_Q_F_4X_DESC = BIT_ULL(7), + IONIC_Q_F_4X_CQ_DESC = BIT_ULL(8), + IONIC_Q_F_4X_SG_DESC = BIT_ULL(9), +}; + +/** + * enum ionic_rxq_feature - RXQ-specific Features + * + * Per-queue-type features use bits 16 and higher. + * + * @IONIC_RXQ_F_HWSTAMP: Queue supports Hardware Timestamping + */ +enum ionic_rxq_feature { + IONIC_RXQ_F_HWSTAMP = BIT_ULL(16), +}; + +/** + * enum ionic_txq_feature - TXQ-specific Features + * + * Per-queue-type features use bits 16 and higher. + * + * @IONIC_TXQ_F_HWSTAMP: Queue supports Hardware Timestamping + */ +enum ionic_txq_feature { + IONIC_TXQ_F_HWSTAMP = BIT(16), +}; + +/** + * struct ionic_hwstamp_bits - Hardware timestamp decoding bits + * @IONIC_HWSTAMP_INVALID: Invalid hardware timestamp value + * @IONIC_HWSTAMP_CQ_NEGOFFSET: Timestamp field negative offset + * from the base cq descriptor. + */ +enum ionic_hwstamp_bits { + IONIC_HWSTAMP_INVALID = ~0ull, + IONIC_HWSTAMP_CQ_NEGOFFSET = 8, +}; + +/** * struct ionic_lif_logical_qtype - Descriptor of logical to HW queue type * @qtype: Hardware Queue Type * @qid_count: Number of Queue IDs of the logical type @@ -404,7 +473,9 @@ union ionic_lif_config { * @max_ucast_filters: Number of perfect unicast addresses supported * @max_mcast_filters: Number of perfect multicast addresses supported * @min_frame_size: Minimum size of frames to be sent - * @max_frame_size: Maximim size of frames to be sent + * @max_frame_size: Maximum size of frames to be sent + * @hwstamp_tx_modes: Bitmask of BIT_ULL(enum ionic_txstamp_mode) + * @hwstamp_rx_filters: Bitmask of enum ionic_pkt_class * @config: LIF config struct with features, mtu, mac, q counts * * @rdma: RDMA identify structure @@ -438,7 +509,10 @@ union ionic_lif_identity { __le16 rss_ind_tbl_sz; __le32 min_frame_size; __le32 max_frame_size; - u8 rsvd2[106]; + u8 rsvd2[2]; + __le64 hwstamp_tx_modes; + __le64 hwstamp_rx_filters; + u8 rsvd3[88]; union ionic_lif_config config; } __packed eth; @@ -529,7 +603,7 @@ struct ionic_q_identify_comp { * union ionic_q_identity - queue identity information * @version: Queue type version that can be used with FW * @supported: Bitfield of queue versions, first bit = ver 0 - * @features: Queue features + * @features: Queue features (enum ionic_q_feature, etc) * @desc_sz: Descriptor size * @comp_sz: Completion descriptor size * @sg_desc_sz: Scatter/Gather descriptor size @@ -541,10 +615,6 @@ union ionic_q_identity { u8 version; u8 supported; u8 rsvd[6]; -#define IONIC_QIDENT_F_CQ 0x01 /* queue has completion ring */ -#define IONIC_QIDENT_F_SG 0x02 /* queue has scatter/gather ring */ -#define IONIC_QIDENT_F_EQ 0x04 /* queue can use event queue */ -#define IONIC_QIDENT_F_CMB 0x08 /* queue is in cmb bar */ __le64 features; __le16 desc_sz; __le16 comp_sz; @@ -585,6 +655,7 @@ union ionic_q_identity { * @ring_base: Queue ring base address * @cq_ring_base: Completion queue ring base address * @sg_ring_base: Scatter/Gather ring base address + * @features: Mask of queue features to enable, if not in the flags above. */ struct ionic_q_init_cmd { u8 opcode; @@ -608,7 +679,8 @@ struct ionic_q_init_cmd { __le64 ring_base; __le64 cq_ring_base; __le64 sg_ring_base; - u8 rsvd2[20]; + u8 rsvd2[12]; + __le64 features; } __packed; /** @@ -692,7 +764,7 @@ enum ionic_txq_desc_opcode { * checksums are also updated. * * IONIC_TXQ_DESC_OPCODE_TSO: - * Device preforms TCP segmentation offload + * Device performs TCP segmentation offload * (TSO). @hdr_len is the number of bytes * to the end of TCP header (the offset to * the TCP payload). @mss is the desired @@ -982,13 +1054,13 @@ struct ionic_rxq_comp { }; enum ionic_pkt_type { - IONIC_PKT_TYPE_NON_IP = 0x000, - IONIC_PKT_TYPE_IPV4 = 0x001, - IONIC_PKT_TYPE_IPV4_TCP = 0x003, - IONIC_PKT_TYPE_IPV4_UDP = 0x005, - IONIC_PKT_TYPE_IPV6 = 0x008, - IONIC_PKT_TYPE_IPV6_TCP = 0x018, - IONIC_PKT_TYPE_IPV6_UDP = 0x028, + IONIC_PKT_TYPE_NON_IP = 0x00, + IONIC_PKT_TYPE_IPV4 = 0x01, + IONIC_PKT_TYPE_IPV4_TCP = 0x03, + IONIC_PKT_TYPE_IPV4_UDP = 0x05, + IONIC_PKT_TYPE_IPV6 = 0x08, + IONIC_PKT_TYPE_IPV6_TCP = 0x18, + IONIC_PKT_TYPE_IPV6_UDP = 0x28, /* below types are only used if encap offloads are enabled on lif */ IONIC_PKT_TYPE_ENCAP_NON_IP = 0x40, IONIC_PKT_TYPE_ENCAP_IPV4 = 0x41, @@ -1019,7 +1091,64 @@ enum ionic_eth_hw_features { IONIC_ETH_HW_TSO_UDP_CSUM = BIT(16), IONIC_ETH_HW_RX_CSUM_GENEVE = BIT(17), IONIC_ETH_HW_TX_CSUM_GENEVE = BIT(18), - IONIC_ETH_HW_TSO_GENEVE = BIT(19) + IONIC_ETH_HW_TSO_GENEVE = BIT(19), + IONIC_ETH_HW_TIMESTAMP = BIT(20), +}; + +/** + * enum ionic_pkt_class - Packet classification mask. + * + * Used with rx steering filter, packets indicated by the mask can be steered + * toward a specific receive queue. + * + * @IONIC_PKT_CLS_NTP_ALL: All NTP packets. + * @IONIC_PKT_CLS_PTP1_SYNC: PTPv1 sync + * @IONIC_PKT_CLS_PTP1_DREQ: PTPv1 delay-request + * @IONIC_PKT_CLS_PTP1_ALL: PTPv1 all packets + * @IONIC_PKT_CLS_PTP2_L4_SYNC: PTPv2-UDP sync + * @IONIC_PKT_CLS_PTP2_L4_DREQ: PTPv2-UDP delay-request + * @IONIC_PKT_CLS_PTP2_L4_ALL: PTPv2-UDP all packets + * @IONIC_PKT_CLS_PTP2_L2_SYNC: PTPv2-ETH sync + * @IONIC_PKT_CLS_PTP2_L2_DREQ: PTPv2-ETH delay-request + * @IONIC_PKT_CLS_PTP2_L2_ALL: PTPv2-ETH all packets + * @IONIC_PKT_CLS_PTP2_SYNC: PTPv2 sync + * @IONIC_PKT_CLS_PTP2_DREQ: PTPv2 delay-request + * @IONIC_PKT_CLS_PTP2_ALL: PTPv2 all packets + * @IONIC_PKT_CLS_PTP_SYNC: PTP sync + * @IONIC_PKT_CLS_PTP_DREQ: PTP delay-request + * @IONIC_PKT_CLS_PTP_ALL: PTP all packets + */ +enum ionic_pkt_class { + IONIC_PKT_CLS_NTP_ALL = BIT(0), + + IONIC_PKT_CLS_PTP1_SYNC = BIT(1), + IONIC_PKT_CLS_PTP1_DREQ = BIT(2), + IONIC_PKT_CLS_PTP1_ALL = BIT(3) | + IONIC_PKT_CLS_PTP1_SYNC | IONIC_PKT_CLS_PTP1_DREQ, + + IONIC_PKT_CLS_PTP2_L4_SYNC = BIT(4), + IONIC_PKT_CLS_PTP2_L4_DREQ = BIT(5), + IONIC_PKT_CLS_PTP2_L4_ALL = BIT(6) | + IONIC_PKT_CLS_PTP2_L4_SYNC | IONIC_PKT_CLS_PTP2_L4_DREQ, + + IONIC_PKT_CLS_PTP2_L2_SYNC = BIT(7), + IONIC_PKT_CLS_PTP2_L2_DREQ = BIT(8), + IONIC_PKT_CLS_PTP2_L2_ALL = BIT(9) | + IONIC_PKT_CLS_PTP2_L2_SYNC | IONIC_PKT_CLS_PTP2_L2_DREQ, + + IONIC_PKT_CLS_PTP2_SYNC = + IONIC_PKT_CLS_PTP2_L4_SYNC | IONIC_PKT_CLS_PTP2_L2_SYNC, + IONIC_PKT_CLS_PTP2_DREQ = + IONIC_PKT_CLS_PTP2_L4_DREQ | IONIC_PKT_CLS_PTP2_L2_DREQ, + IONIC_PKT_CLS_PTP2_ALL = + IONIC_PKT_CLS_PTP2_L4_ALL | IONIC_PKT_CLS_PTP2_L2_ALL, + + IONIC_PKT_CLS_PTP_SYNC = + IONIC_PKT_CLS_PTP1_SYNC | IONIC_PKT_CLS_PTP2_SYNC, + IONIC_PKT_CLS_PTP_DREQ = + IONIC_PKT_CLS_PTP1_DREQ | IONIC_PKT_CLS_PTP2_DREQ, + IONIC_PKT_CLS_PTP_ALL = + IONIC_PKT_CLS_PTP1_ALL | IONIC_PKT_CLS_PTP2_ALL, }; /** @@ -1111,6 +1240,8 @@ enum ionic_xcvr_pid { IONIC_XCVR_PID_QSFP_100G_CWDM4 = 69, IONIC_XCVR_PID_QSFP_100G_PSM4 = 70, IONIC_XCVR_PID_SFP_25GBASE_ACC = 71, + IONIC_XCVR_PID_SFP_10GBASE_T = 72, + IONIC_XCVR_PID_SFP_1000BASE_T = 73, }; /** @@ -1327,11 +1458,25 @@ enum ionic_stats_ctl_cmd { }; /** + * enum ionic_txstamp_mode - List of TX Timestamping Modes + * @IONIC_TXSTAMP_OFF: Disable TX hardware timetamping. + * @IONIC_TXSTAMP_ON: Enable local TX hardware timetamping. + * @IONIC_TXSTAMP_ONESTEP_SYNC: Modify TX PTP Sync packets. + * @IONIC_TXSTAMP_ONESTEP_P2P: Modify TX PTP Sync and PDelayResp. + */ +enum ionic_txstamp_mode { + IONIC_TXSTAMP_OFF = 0, + IONIC_TXSTAMP_ON = 1, + IONIC_TXSTAMP_ONESTEP_SYNC = 2, + IONIC_TXSTAMP_ONESTEP_P2P = 3, +}; + +/** * enum ionic_port_attr - List of device attributes * @IONIC_PORT_ATTR_STATE: Port state attribute * @IONIC_PORT_ATTR_SPEED: Port speed attribute * @IONIC_PORT_ATTR_MTU: Port MTU attribute - * @IONIC_PORT_ATTR_AUTONEG: Port autonegotation attribute + * @IONIC_PORT_ATTR_AUTONEG: Port autonegotiation attribute * @IONIC_PORT_ATTR_FEC: Port FEC attribute * @IONIC_PORT_ATTR_PAUSE: Port pause attribute * @IONIC_PORT_ATTR_LOOPBACK: Port loopback attribute @@ -1568,6 +1713,7 @@ enum ionic_rss_hash_types { * @IONIC_LIF_ATTR_FEATURES: LIF features attribute * @IONIC_LIF_ATTR_RSS: LIF RSS attribute * @IONIC_LIF_ATTR_STATS_CTRL: LIF statistics control attribute + * @IONIC_LIF_ATTR_TXSTAMP: LIF TX timestamping mode */ enum ionic_lif_attr { IONIC_LIF_ATTR_STATE = 0, @@ -1577,6 +1723,7 @@ enum ionic_lif_attr { IONIC_LIF_ATTR_FEATURES = 4, IONIC_LIF_ATTR_RSS = 5, IONIC_LIF_ATTR_STATS_CTRL = 6, + IONIC_LIF_ATTR_TXSTAMP = 7, }; /** @@ -1594,6 +1741,7 @@ enum ionic_lif_attr { * @key: The hash secret key * @addr: Address for the indirection table shared memory * @stats_ctl: stats control commands (enum ionic_stats_ctl_cmd) + * @txstamp: TX Timestamping Mode (enum ionic_txstamp_mode) */ struct ionic_lif_setattr_cmd { u8 opcode; @@ -1612,6 +1760,7 @@ struct ionic_lif_setattr_cmd { __le64 addr; } rss; u8 stats_ctl; + __le16 txstamp_mode; u8 rsvd[60]; } __packed; }; @@ -1656,6 +1805,7 @@ struct ionic_lif_getattr_cmd { * @mtu: Mtu * @mac: Station mac * @features: Features (enum ionic_eth_hw_features) + * @txstamp: TX Timestamping Mode (enum ionic_txstamp_mode) * @color: Color bit */ struct ionic_lif_getattr_comp { @@ -1667,11 +1817,35 @@ struct ionic_lif_getattr_comp { __le32 mtu; u8 mac[6]; __le64 features; + __le16 txstamp_mode; u8 rsvd2[11]; } __packed; u8 color; }; +/** + * struct ionic_lif_setphc_cmd - Set LIF PTP Hardware Clock + * @opcode: Opcode + * @lif_index: LIF index + * @tick: Hardware stamp tick of an instant in time. + * @nsec: Nanosecond stamp of the same instant. + * @frac: Fractional nanoseconds at the same instant. + * @mult: Cycle to nanosecond multiplier. + * @shift: Cycle to nanosecond divisor (power of two). + */ +struct ionic_lif_setphc_cmd { + u8 opcode; + u8 rsvd1; + __le16 lif_index; + u8 rsvd2[4]; + __le64 tick; + __le64 nsec; + __le64 frac; + __le32 mult; + __le32 shift; + u8 rsvd3[24]; +}; + enum ionic_rx_mode { IONIC_RX_MODE_F_UNICAST = BIT(0), IONIC_RX_MODE_F_MULTICAST = BIT(1), @@ -1704,9 +1878,10 @@ struct ionic_rx_mode_set_cmd { typedef struct ionic_admin_comp ionic_rx_mode_set_comp; enum ionic_rx_filter_match_type { - IONIC_RX_FILTER_MATCH_VLAN = 0, - IONIC_RX_FILTER_MATCH_MAC, - IONIC_RX_FILTER_MATCH_MAC_VLAN, + IONIC_RX_FILTER_MATCH_VLAN = 0x0, + IONIC_RX_FILTER_MATCH_MAC = 0x1, + IONIC_RX_FILTER_MATCH_MAC_VLAN = 0x2, + IONIC_RX_FILTER_STEER_PKTCLASS = 0x10, }; /** @@ -1723,6 +1898,7 @@ enum ionic_rx_filter_match_type { * @mac_vlan: MACVLAN filter * @vlan: VLAN ID * @addr: MAC address (network-byte order) + * @pkt_class: Packet classification filter */ struct ionic_rx_filter_add_cmd { u8 opcode; @@ -1741,8 +1917,9 @@ struct ionic_rx_filter_add_cmd { __le16 vlan; u8 addr[6]; } mac_vlan; + __le64 pkt_class; u8 rsvd[54]; - }; + } __packed; }; /** @@ -1951,8 +2128,8 @@ enum ionic_qos_sched_type { * @pfc_cos: Priority-Flow Control class of service * @dwrr_weight: QoS class scheduling weight * @strict_rlmt: Rate limit for strict priority scheduling - * @rw_dot1q_pcp: Rewrite dot1q pcp to this value (valid iff F_RW_DOT1Q_PCP) - * @rw_ip_dscp: Rewrite ip dscp to this value (valid iff F_RW_IP_DSCP) + * @rw_dot1q_pcp: Rewrite dot1q pcp to value (valid iff F_RW_DOT1Q_PCP) + * @rw_ip_dscp: Rewrite ip dscp to value (valid iff F_RW_IP_DSCP) * @dot1q_pcp: Dot1q pcp value * @ndscp: Number of valid dscp values in the ip_dscp field * @ip_dscp: IP dscp values @@ -2743,6 +2920,16 @@ union ionic_dev_cmd_comp { }; /** + * struct ionic_hwstamp_regs - Hardware current timestamp registers + * @tick_low: Low 32 bits of hardware timestamp + * @tick_high: High 32 bits of hardware timestamp + */ +struct ionic_hwstamp_regs { + u32 tick_low; + u32 tick_high; +}; + +/** * union ionic_dev_info_regs - Device info register format (read-only) * @signature: Signature value of 0x44455649 ('DEVI') * @version: Current version of info @@ -2752,6 +2939,7 @@ union ionic_dev_cmd_comp { * @fw_heartbeat: Firmware heartbeat counter * @serial_num: Serial number * @fw_version: Firmware version + * @hwstamp_regs: Hardware current timestamp registers */ union ionic_dev_info_regs { #define IONIC_DEVINFO_FWVERS_BUFLEN 32 @@ -2766,6 +2954,8 @@ union ionic_dev_info_regs { u32 fw_heartbeat; char fw_version[IONIC_DEVINFO_FWVERS_BUFLEN]; char serial_num[IONIC_DEVINFO_SERIAL_BUFLEN]; + u8 rsvd_pad1024[948]; + struct ionic_hwstamp_regs hwstamp; }; u32 words[512]; }; @@ -2813,6 +3003,7 @@ union ionic_adminq_cmd { struct ionic_q_control_cmd q_control; struct ionic_lif_setattr_cmd lif_setattr; struct ionic_lif_getattr_cmd lif_getattr; + struct ionic_lif_setphc_cmd lif_setphc; struct ionic_rx_mode_set_cmd rx_mode_set; struct ionic_rx_filter_add_cmd rx_filter_add; struct ionic_rx_filter_del_cmd rx_filter_del; @@ -2829,6 +3020,7 @@ union ionic_adminq_comp { struct ionic_q_init_comp q_init; struct ionic_lif_setattr_comp lif_setattr; struct ionic_lif_getattr_comp lif_getattr; + struct ionic_admin_comp lif_setphc; struct ionic_rx_filter_add_comp rx_filter_add; struct ionic_fw_control_comp fw_control; }; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 11140915c2da..af3a5368529c 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -120,23 +120,34 @@ static void ionic_link_status_check(struct ionic_lif *lif) if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) return; + /* Don't put carrier back up if we're in a broken state */ + if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) { + clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); + return; + } + link_status = le16_to_cpu(lif->info->status.link_status); link_up = link_status == IONIC_PORT_OPER_STATUS_UP; if (link_up) { - if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) { + int err = 0; + + if (netdev->flags & IFF_UP && netif_running(netdev)) { mutex_lock(&lif->queue_lock); - ionic_start_queues(lif); + err = ionic_start_queues(lif); + if (err && err != -EBUSY) { + netdev_err(lif->netdev, + "Failed to start queues: %d\n", err); + set_bit(IONIC_LIF_F_BROKEN, lif->state); + netif_carrier_off(lif->netdev); + } mutex_unlock(&lif->queue_lock); } - if (!netif_carrier_ok(netdev)) { - u32 link_speed; - + if (!err && !netif_carrier_ok(netdev)) { ionic_port_identify(lif->ionic); - link_speed = le32_to_cpu(lif->info->status.link_speed); netdev_info(netdev, "Link up - %d Gbps\n", - link_speed / 1000); + le32_to_cpu(lif->info->status.link_speed) / 1000); netif_carrier_on(netdev); } } else { @@ -145,7 +156,7 @@ static void ionic_link_status_check(struct ionic_lif *lif) netif_carrier_off(netdev); } - if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) { + if (netdev->flags & IFF_UP && netif_running(netdev)) { mutex_lock(&lif->queue_lock); ionic_stop_queues(lif); mutex_unlock(&lif->queue_lock); @@ -382,6 +393,8 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) static void ionic_qcqs_free(struct ionic_lif *lif) { struct device *dev = lif->ionic->dev; + struct ionic_qcq *adminqcq; + unsigned long irqflags; if (lif->notifyqcq) { ionic_qcq_free(lif, lif->notifyqcq); @@ -390,9 +403,14 @@ static void ionic_qcqs_free(struct ionic_lif *lif) } if (lif->adminqcq) { - ionic_qcq_free(lif, lif->adminqcq); - devm_kfree(dev, lif->adminqcq); + spin_lock_irqsave(&lif->adminq_lock, irqflags); + adminqcq = READ_ONCE(lif->adminqcq); lif->adminqcq = NULL; + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); + if (adminqcq) { + ionic_qcq_free(lif, adminqcq); + devm_kfree(dev, adminqcq); + } } if (lif->rxqcqs) { @@ -495,6 +513,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, goto err_out; } + new->q.dev = dev; new->flags = flags; new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info), @@ -506,6 +525,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, } new->q.type = type; + new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems; err = ionic_q_init(lif, idev, &new->q, index, name, num_descs, desc_size, sg_desc_size, pid); @@ -656,20 +676,20 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif) err = -ENOMEM; lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif, - sizeof(struct ionic_qcq *), GFP_KERNEL); + sizeof(*lif->txqcqs), GFP_KERNEL); if (!lif->txqcqs) goto err_out; lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif, - sizeof(struct ionic_qcq *), GFP_KERNEL); + sizeof(*lif->rxqcqs), GFP_KERNEL); if (!lif->rxqcqs) goto err_out; - lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif, - sizeof(struct ionic_tx_stats), GFP_KERNEL); + lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif + 1, + sizeof(*lif->txqstats), GFP_KERNEL); if (!lif->txqstats) goto err_out; - lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif, - sizeof(struct ionic_rx_stats), GFP_KERNEL); + lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif + 1, + sizeof(*lif->rxqstats), GFP_KERNEL); if (!lif->rxqstats) goto err_out; @@ -711,15 +731,14 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) .ring_base = cpu_to_le64(q->base_pa), .cq_ring_base = cpu_to_le64(cq->base_pa), .sg_ring_base = cpu_to_le64(q->sg_base_pa), + .features = cpu_to_le64(q->features), }, }; unsigned int intr_index; int err; - if (qcq->flags & IONIC_QCQ_F_INTR) - intr_index = qcq->intr.index; - else - intr_index = lif->rxqcqs[q->index]->intr.index; + intr_index = qcq->intr.index; + ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index); dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid); @@ -773,6 +792,7 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) .ring_base = cpu_to_le64(q->base_pa), .cq_ring_base = cpu_to_le64(cq->base_pa), .sg_ring_base = cpu_to_le64(q->sg_base_pa), + .features = cpu_to_le64(q->features), }, }; int err; @@ -810,6 +830,254 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) return 0; } +int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif) +{ + unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz; + unsigned int txq_i, flags; + struct ionic_qcq *txq; + u64 features; + int err; + + mutex_lock(&lif->queue_lock); + + if (lif->hwstamp_txq) + goto out; + + features = IONIC_Q_F_2X_CQ_DESC | IONIC_TXQ_F_HWSTAMP; + + num_desc = IONIC_MIN_TXRX_DESC; + desc_sz = sizeof(struct ionic_txq_desc); + comp_sz = 2 * sizeof(struct ionic_txq_comp); + + if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && + lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == sizeof(struct ionic_txq_sg_desc_v1)) + sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); + else + sg_desc_sz = sizeof(struct ionic_txq_sg_desc); + + txq_i = lif->ionic->ntxqs_per_lif; + flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; + + err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags, + num_desc, desc_sz, comp_sz, sg_desc_sz, + lif->kern_pid, &txq); + if (err) + goto err_qcq_alloc; + + txq->q.features = features; + + ionic_link_qcq_interrupts(lif->adminqcq, txq); + ionic_debugfs_add_qcq(lif, txq); + + lif->hwstamp_txq = txq; + + if (netif_running(lif->netdev)) { + err = ionic_lif_txq_init(lif, txq); + if (err) + goto err_qcq_init; + + if (test_bit(IONIC_LIF_F_UP, lif->state)) { + err = ionic_qcq_enable(txq); + if (err) + goto err_qcq_enable; + } + } + +out: + mutex_unlock(&lif->queue_lock); + + return 0; + +err_qcq_enable: + ionic_lif_qcq_deinit(lif, txq); +err_qcq_init: + lif->hwstamp_txq = NULL; + ionic_debugfs_del_qcq(txq); + ionic_qcq_free(lif, txq); + devm_kfree(lif->ionic->dev, txq); +err_qcq_alloc: + mutex_unlock(&lif->queue_lock); + return err; +} + +int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif) +{ + unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz; + unsigned int rxq_i, flags; + struct ionic_qcq *rxq; + u64 features; + int err; + + mutex_lock(&lif->queue_lock); + + if (lif->hwstamp_rxq) + goto out; + + features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP; + + num_desc = IONIC_MIN_TXRX_DESC; + desc_sz = sizeof(struct ionic_rxq_desc); + comp_sz = 2 * sizeof(struct ionic_rxq_comp); + sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); + + rxq_i = lif->ionic->nrxqs_per_lif; + flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG; + + err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags, + num_desc, desc_sz, comp_sz, sg_desc_sz, + lif->kern_pid, &rxq); + if (err) + goto err_qcq_alloc; + + rxq->q.features = features; + + ionic_link_qcq_interrupts(lif->adminqcq, rxq); + ionic_debugfs_add_qcq(lif, rxq); + + lif->hwstamp_rxq = rxq; + + if (netif_running(lif->netdev)) { + err = ionic_lif_rxq_init(lif, rxq); + if (err) + goto err_qcq_init; + + if (test_bit(IONIC_LIF_F_UP, lif->state)) { + ionic_rx_fill(&rxq->q); + err = ionic_qcq_enable(rxq); + if (err) + goto err_qcq_enable; + } + } + +out: + mutex_unlock(&lif->queue_lock); + + return 0; + +err_qcq_enable: + ionic_lif_qcq_deinit(lif, rxq); +err_qcq_init: + lif->hwstamp_rxq = NULL; + ionic_debugfs_del_qcq(rxq); + ionic_qcq_free(lif, rxq); + devm_kfree(lif->ionic->dev, rxq); +err_qcq_alloc: + mutex_unlock(&lif->queue_lock); + return err; +} + +int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all) +{ + struct ionic_queue_params qparam; + + ionic_init_queue_params(lif, &qparam); + + if (rx_all) + qparam.rxq_features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP; + else + qparam.rxq_features = 0; + + /* if we're not running, just set the values and return */ + if (!netif_running(lif->netdev)) { + lif->rxq_features = qparam.rxq_features; + return 0; + } + + return ionic_reconfigure_queues(lif, &qparam); +} + +int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .index = cpu_to_le16(lif->index), + .attr = IONIC_LIF_ATTR_TXSTAMP, + .txstamp_mode = cpu_to_le16(txstamp_mode), + }, + }; + + return ionic_adminq_post_wait(lif, &ctx); +} + +static void ionic_lif_del_hwstamp_rxfilt(struct ionic_lif *lif) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_filter_del = { + .opcode = IONIC_CMD_RX_FILTER_DEL, + .lif_index = cpu_to_le16(lif->index), + }, + }; + struct ionic_rx_filter *f; + u32 filter_id; + int err; + + spin_lock_bh(&lif->rx_filters.lock); + + f = ionic_rx_filter_rxsteer(lif); + if (!f) { + spin_unlock_bh(&lif->rx_filters.lock); + return; + } + + filter_id = f->filter_id; + ionic_rx_filter_free(lif, f); + + spin_unlock_bh(&lif->rx_filters.lock); + + netdev_dbg(lif->netdev, "rx_filter del RXSTEER (id %d)\n", filter_id); + + ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(filter_id); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err && err != -EEXIST) + netdev_dbg(lif->netdev, "failed to delete rx_filter RXSTEER (id %d)\n", filter_id); +} + +static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_filter_add = { + .opcode = IONIC_CMD_RX_FILTER_ADD, + .lif_index = cpu_to_le16(lif->index), + .match = cpu_to_le16(IONIC_RX_FILTER_STEER_PKTCLASS), + .pkt_class = cpu_to_le64(pkt_class), + }, + }; + u8 qtype; + u32 qid; + int err; + + if (!lif->hwstamp_rxq) + return -EINVAL; + + qtype = lif->hwstamp_rxq->q.type; + ctx.cmd.rx_filter_add.qtype = qtype; + + qid = lif->hwstamp_rxq->q.index; + ctx.cmd.rx_filter_add.qid = cpu_to_le32(qid); + + netdev_dbg(lif->netdev, "rx_filter add RXSTEER\n"); + err = ionic_adminq_post_wait(lif, &ctx); + if (err && err != -EEXIST) + return err; + + return ionic_rx_filter_save(lif, 0, qid, 0, &ctx); +} + +int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) +{ + ionic_lif_del_hwstamp_rxfilt(lif); + + if (!pkt_class) + return 0; + + return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class); +} + static bool ionic_notifyq_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) { @@ -837,7 +1105,7 @@ static bool ionic_notifyq_service(struct ionic_cq *cq, switch (le16_to_cpu(comp->event.ecode)) { case IONIC_EVENT_LINK_CHANGE: - ionic_link_status_check_request(lif, false); + ionic_link_status_check_request(lif, CAN_NOT_SLEEP); break; case IONIC_EVENT_RESET: work = kzalloc(sizeof(*work), GFP_ATOMIC); @@ -875,30 +1143,43 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget) struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr; struct ionic_lif *lif = napi_to_cq(napi)->lif; struct ionic_dev *idev = &lif->ionic->idev; + unsigned long irqflags; unsigned int flags = 0; + int rx_work = 0; + int tx_work = 0; int n_work = 0; int a_work = 0; int work_done; + int credits; if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED) n_work = ionic_cq_service(&lif->notifyqcq->cq, budget, ionic_notifyq_service, NULL, NULL); + spin_lock_irqsave(&lif->adminq_lock, irqflags); if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED) a_work = ionic_cq_service(&lif->adminqcq->cq, budget, ionic_adminq_service, NULL, NULL); + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); + + if (lif->hwstamp_rxq) + rx_work = ionic_cq_service(&lif->hwstamp_rxq->cq, budget, + ionic_rx_service, NULL, NULL); + + if (lif->hwstamp_txq) + tx_work = ionic_cq_service(&lif->hwstamp_txq->cq, budget, + ionic_tx_service, NULL, NULL); - work_done = max(n_work, a_work); + work_done = max(max(n_work, a_work), max(rx_work, tx_work)); if (work_done < budget && napi_complete_done(napi, work_done)) { flags |= IONIC_INTR_CRED_UNMASK; - lif->adminqcq->cq.bound_intr->rearm_count++; + intr->rearm_count++; } if (work_done || flags) { flags |= IONIC_INTR_CRED_RESET_COALESCE; - ionic_intr_credits(idev->intr_ctrl, - intr->index, - n_work + a_work, flags); + credits = n_work + a_work + rx_work + tx_work; + ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags); } return work_done; @@ -1258,6 +1539,10 @@ static int ionic_set_nic_features(struct ionic_lif *lif, int err; ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features); + + if (lif->phc) + ctx.cmd.lif_setattr.features |= cpu_to_le64(IONIC_ETH_HW_TIMESTAMP); + err = ionic_adminq_post_wait(lif, &ctx); if (err) return err; @@ -1305,6 +1590,8 @@ static int ionic_set_nic_features(struct ionic_lif *lif, dev_dbg(dev, "feature ETH_HW_TSO_UDP\n"); if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n"); + if (lif->hw_features & IONIC_ETH_HW_TIMESTAMP) + dev_dbg(dev, "feature ETH_HW_TIMESTAMP\n"); return 0; } @@ -1441,7 +1728,7 @@ static int ionic_start_queues_reconfig(struct ionic_lif *lif) */ err = ionic_txrx_init(lif); mutex_unlock(&lif->queue_lock); - ionic_link_status_check_request(lif, true); + ionic_link_status_check_request(lif, CAN_SLEEP); netif_device_attach(lif->netdev); return err; @@ -1480,7 +1767,8 @@ static void ionic_tx_timeout_work(struct work_struct *ws) { struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work); - netdev_info(lif->netdev, "Tx Timeout recovery\n"); + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return; /* if we were stopped before this scheduled job was launched, * don't bother the queues as they are already stopped. @@ -1496,6 +1784,7 @@ static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct ionic_lif *lif = netdev_priv(netdev); + netdev_info(lif->netdev, "Tx Timeout triggered - txq %d\n", txqueue); schedule_work(&lif->tx_timeout_work); } @@ -1645,11 +1934,17 @@ static void ionic_txrx_disable(struct ionic_lif *lif) err = ionic_qcq_disable(lif->txqcqs[i], (err != -ETIMEDOUT)); } + if (lif->hwstamp_txq) + err = ionic_qcq_disable(lif->hwstamp_txq, (err != -ETIMEDOUT)); + if (lif->rxqcqs) { for (i = 0; i < lif->nxqs; i++) err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT)); } + if (lif->hwstamp_rxq) + err = ionic_qcq_disable(lif->hwstamp_rxq, (err != -ETIMEDOUT)); + ionic_lif_quiesce(lif); } @@ -1672,6 +1967,17 @@ static void ionic_txrx_deinit(struct ionic_lif *lif) } } lif->rx_mode = 0; + + if (lif->hwstamp_txq) { + ionic_lif_qcq_deinit(lif, lif->hwstamp_txq); + ionic_tx_flush(&lif->hwstamp_txq->cq); + ionic_tx_empty(&lif->hwstamp_txq->q); + } + + if (lif->hwstamp_rxq) { + ionic_lif_qcq_deinit(lif, lif->hwstamp_rxq); + ionic_rx_empty(&lif->hwstamp_rxq->q); + } } static void ionic_txrx_free(struct ionic_lif *lif) @@ -1693,15 +1999,30 @@ static void ionic_txrx_free(struct ionic_lif *lif) lif->rxqcqs[i] = NULL; } } + + if (lif->hwstamp_txq) { + ionic_qcq_free(lif, lif->hwstamp_txq); + devm_kfree(lif->ionic->dev, lif->hwstamp_txq); + lif->hwstamp_txq = NULL; + } + + if (lif->hwstamp_rxq) { + ionic_qcq_free(lif, lif->hwstamp_rxq); + devm_kfree(lif->ionic->dev, lif->hwstamp_rxq); + lif->hwstamp_rxq = NULL; + } } static int ionic_txrx_alloc(struct ionic_lif *lif) { - unsigned int sg_desc_sz; - unsigned int flags; - unsigned int i; + unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz; + unsigned int flags, i; int err = 0; + num_desc = lif->ntxq_descs; + desc_sz = sizeof(struct ionic_txq_desc); + comp_sz = sizeof(struct ionic_txq_comp); + if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == sizeof(struct ionic_txq_sg_desc_v1)) @@ -1714,10 +2035,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif) flags |= IONIC_QCQ_F_INTR; for (i = 0; i < lif->nxqs; i++) { err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, - lif->ntxq_descs, - sizeof(struct ionic_txq_desc), - sizeof(struct ionic_txq_comp), - sg_desc_sz, + num_desc, desc_sz, comp_sz, sg_desc_sz, lif->kern_pid, &lif->txqcqs[i]); if (err) goto err_out; @@ -1734,16 +2052,24 @@ static int ionic_txrx_alloc(struct ionic_lif *lif) } flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR; + + num_desc = lif->nrxq_descs; + desc_sz = sizeof(struct ionic_rxq_desc); + comp_sz = sizeof(struct ionic_rxq_comp); + sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); + + if (lif->rxq_features & IONIC_Q_F_2X_CQ_DESC) + comp_sz *= 2; + for (i = 0; i < lif->nxqs; i++) { err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, - lif->nrxq_descs, - sizeof(struct ionic_rxq_desc), - sizeof(struct ionic_rxq_comp), - sizeof(struct ionic_rxq_sg_desc), + num_desc, desc_sz, comp_sz, sg_desc_sz, lif->kern_pid, &lif->rxqcqs[i]); if (err) goto err_out; + lif->rxqcqs[i]->q.features = lif->rxq_features; + ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, lif->rxqcqs[i]->intr.index, lif->rx_coalesce_hw); @@ -1822,8 +2148,26 @@ static int ionic_txrx_enable(struct ionic_lif *lif) } } + if (lif->hwstamp_rxq) { + ionic_rx_fill(&lif->hwstamp_rxq->q); + err = ionic_qcq_enable(lif->hwstamp_rxq); + if (err) + goto err_out_hwstamp_rx; + } + + if (lif->hwstamp_txq) { + err = ionic_qcq_enable(lif->hwstamp_txq); + if (err) + goto err_out_hwstamp_tx; + } + return 0; +err_out_hwstamp_tx: + if (lif->hwstamp_rxq) + derr = ionic_qcq_disable(lif->hwstamp_rxq, (derr != -ETIMEDOUT)); +err_out_hwstamp_rx: + i = lif->nxqs; err_out: while (i--) { derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT)); @@ -1837,6 +2181,12 @@ static int ionic_start_queues(struct ionic_lif *lif) { int err; + if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) + return -EIO; + + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return -EBUSY; + if (test_and_set_bit(IONIC_LIF_F_UP, lif->state)) return 0; @@ -1855,13 +2205,17 @@ static int ionic_open(struct net_device *netdev) struct ionic_lif *lif = netdev_priv(netdev); int err; + /* If recovering from a broken state, clear the bit and we'll try again */ + if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state)) + netdev_info(netdev, "clearing broken state\n"); + err = ionic_txrx_alloc(lif); if (err) return err; err = ionic_txrx_init(lif); if (err) - goto err_out; + goto err_txrx_free; err = netif_set_real_num_tx_queues(netdev, lif->nxqs); if (err) @@ -1882,7 +2236,7 @@ static int ionic_open(struct net_device *netdev) err_txrx_deinit: ionic_txrx_deinit(lif); -err_out: +err_txrx_free: ionic_txrx_free(lif); return err; } @@ -1910,6 +2264,20 @@ static int ionic_stop(struct net_device *netdev) return 0; } +static int ionic_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct ionic_lif *lif = netdev_priv(netdev); + + switch (cmd) { + case SIOCSHWTSTAMP: + return ionic_lif_hwstamp_set(lif, ifr); + case SIOCGHWTSTAMP: + return ionic_lif_hwstamp_get(lif, ifr); + default: + return -EOPNOTSUPP; + } +} + static int ionic_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivf) { @@ -2158,6 +2526,7 @@ static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set) static const struct net_device_ops ionic_netdev_ops = { .ndo_open = ionic_open, .ndo_stop = ionic_stop, + .ndo_do_ioctl = ionic_do_ioctl, .ndo_start_xmit = ionic_start_xmit, .ndo_get_stats64 = ionic_get_stats64, .ndo_set_rx_mode = ionic_ndo_set_rx_mode, @@ -2181,7 +2550,9 @@ static const struct net_device_ops ionic_netdev_ops = { static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b) { /* only swapping the queues, not the napi, flags, or other stuff */ + swap(a->q.features, b->q.features); swap(a->q.num_descs, b->q.num_descs); + swap(a->q.desc_size, b->q.desc_size); swap(a->q.base, b->q.base); swap(a->q.base_pa, b->q.base_pa); swap(a->q.info, b->q.info); @@ -2189,6 +2560,7 @@ static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b) swap(a->q_base_pa, b->q_base_pa); swap(a->q_size, b->q_size); + swap(a->q.sg_desc_size, b->q.sg_desc_size); swap(a->q.sg_base, b->q.sg_base); swap(a->q.sg_base_pa, b->q.sg_base_pa); swap(a->sg_base, b->sg_base); @@ -2196,23 +2568,26 @@ static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b) swap(a->sg_size, b->sg_size); swap(a->cq.num_descs, b->cq.num_descs); + swap(a->cq.desc_size, b->cq.desc_size); swap(a->cq.base, b->cq.base); swap(a->cq.base_pa, b->cq.base_pa); swap(a->cq.info, b->cq.info); swap(a->cq_base, b->cq_base); swap(a->cq_base_pa, b->cq_base_pa); swap(a->cq_size, b->cq_size); + + ionic_debugfs_del_qcq(a); + ionic_debugfs_add_qcq(a->q.lif, a); } int ionic_reconfigure_queues(struct ionic_lif *lif, struct ionic_queue_params *qparam) { + unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz; struct ionic_qcq **tx_qcqs = NULL; struct ionic_qcq **rx_qcqs = NULL; - unsigned int sg_desc_sz; - unsigned int flags; + unsigned int flags, i; int err = -ENOMEM; - unsigned int i; /* allocate temporary qcq arrays to hold new queue structs */ if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) { @@ -2221,7 +2596,9 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, if (!tx_qcqs) goto err_out; } - if (qparam->nxqs != lif->nxqs || qparam->nrxq_descs != lif->nrxq_descs) { + if (qparam->nxqs != lif->nxqs || + qparam->nrxq_descs != lif->nrxq_descs || + qparam->rxq_features != lif->rxq_features) { rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif, sizeof(struct ionic_qcq *), GFP_KERNEL); if (!rx_qcqs) @@ -2231,21 +2608,22 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, /* allocate new desc_info and rings, but leave the interrupt setup * until later so as to not mess with the still-running queues */ - if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && - lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == - sizeof(struct ionic_txq_sg_desc_v1)) - sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); - else - sg_desc_sz = sizeof(struct ionic_txq_sg_desc); - if (tx_qcqs) { + num_desc = qparam->ntxq_descs; + desc_sz = sizeof(struct ionic_txq_desc); + comp_sz = sizeof(struct ionic_txq_comp); + + if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && + lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == + sizeof(struct ionic_txq_sg_desc_v1)) + sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); + else + sg_desc_sz = sizeof(struct ionic_txq_sg_desc); + for (i = 0; i < qparam->nxqs; i++) { flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR; err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, - qparam->ntxq_descs, - sizeof(struct ionic_txq_desc), - sizeof(struct ionic_txq_comp), - sg_desc_sz, + num_desc, desc_sz, comp_sz, sg_desc_sz, lif->kern_pid, &tx_qcqs[i]); if (err) goto err_out; @@ -2253,16 +2631,23 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, } if (rx_qcqs) { + num_desc = qparam->nrxq_descs; + desc_sz = sizeof(struct ionic_rxq_desc); + comp_sz = sizeof(struct ionic_rxq_comp); + sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); + + if (qparam->rxq_features & IONIC_Q_F_2X_CQ_DESC) + comp_sz *= 2; + for (i = 0; i < qparam->nxqs; i++) { flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR; err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, - qparam->nrxq_descs, - sizeof(struct ionic_rxq_desc), - sizeof(struct ionic_rxq_comp), - sizeof(struct ionic_rxq_sg_desc), + num_desc, desc_sz, comp_sz, sg_desc_sz, lif->kern_pid, &rx_qcqs[i]); if (err) goto err_out; + + rx_qcqs[i]->q.features = qparam->rxq_features; } } @@ -2349,9 +2734,10 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, } swap(lif->nxqs, qparam->nxqs); + swap(lif->rxq_features, qparam->rxq_features); err_out_reinit_unlock: - /* re-init the queues, but don't loose an error code */ + /* re-init the queues, but don't lose an error code */ if (err) ionic_start_queues_reconfig(lif); else @@ -2450,7 +2836,6 @@ int ionic_lif_alloc(struct ionic *ionic) lif->index = 0; lif->ntxq_descs = IONIC_DEF_TXRX_DESC; lif->nrxq_descs = IONIC_DEF_TXRX_DESC; - lif->tx_budget = IONIC_TX_BUDGET_DEFAULT; /* Convert the default coalesce value to actual hw resolution */ lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT; @@ -2501,6 +2886,8 @@ int ionic_lif_alloc(struct ionic *ionic) } netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE); + ionic_lif_alloc_phc(lif); + return 0; err_out_free_qcqs: @@ -2601,10 +2988,13 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif) } clear_bit(IONIC_LIF_F_FW_RESET, lif->state); - ionic_link_status_check_request(lif, true); + ionic_link_status_check_request(lif, CAN_SLEEP); netif_device_attach(lif->netdev); dev_info(ionic->dev, "FW Up: LIFs restarted\n"); + /* restore the hardware timestamping queues */ + ionic_lif_hwstamp_replay(lif); + return; err_txrx_free: @@ -2621,6 +3011,8 @@ void ionic_lif_free(struct ionic_lif *lif) { struct device *dev = lif->ionic->dev; + ionic_lif_free_phc(lif); + /* free rss indirection table */ dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl, lif->rss_ind_tbl_pa); @@ -2957,6 +3349,8 @@ int ionic_lif_register(struct ionic_lif *lif) { int err; + ionic_lif_register_phc(lif); + INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work); lif->ionic->nb.notifier_call = ionic_lif_notify; @@ -2969,10 +3363,11 @@ int ionic_lif_register(struct ionic_lif *lif) err = register_netdev(lif->netdev); if (err) { dev_err(lif->ionic->dev, "Cannot register net device, aborting\n"); + ionic_lif_unregister_phc(lif); return err; } - ionic_link_status_check_request(lif, true); + ionic_link_status_check_request(lif, CAN_SLEEP); lif->registered = true; ionic_lif_set_netdev_info(lif); @@ -2989,6 +3384,9 @@ void ionic_lif_unregister(struct ionic_lif *lif) if (lif->netdev->reg_state == NETREG_REGISTERED) unregister_netdev(lif->netdev); + + ionic_lif_unregister_phc(lif); + lif->registered = false; } @@ -3128,6 +3526,16 @@ int ionic_lif_size(struct ionic *ionic) ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]); nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]); + /* reserve last queue id for hardware timestamping */ + if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) { + if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) { + lc->features &= cpu_to_le64(~IONIC_ETH_HW_TIMESTAMP); + } else { + ntxqs_per_lif -= 1; + nrxqs_per_lif -= 1; + } + } + nxqs = min(ntxqs_per_lif, nrxqs_per_lif); nxqs = min(nxqs, num_online_cpus()); neqs = min(neqs_per_lif, num_online_cpus()); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index 563dba384a53..346506f01715 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -4,6 +4,9 @@ #ifndef _IONIC_LIF_H_ #define _IONIC_LIF_H_ +#include <linux/ptp_clock_kernel.h> +#include <linux/timecounter.h> +#include <uapi/linux/net_tstamp.h> #include <linux/dim.h> #include <linux/pci.h> #include "ionic_rx_filter.h" @@ -36,6 +39,8 @@ struct ionic_tx_stats { u64 crc32_csum; u64 sg_cntr[IONIC_MAX_NUM_SG_CNTR]; u64 dma_map_err; + u64 hwstamp_valid; + u64 hwstamp_invalid; }; struct ionic_rx_stats { @@ -49,6 +54,8 @@ struct ionic_rx_stats { u64 csum_error; u64 dma_map_err; u64 alloc_err; + u64 hwstamp_valid; + u64 hwstamp_invalid; }; #define IONIC_QCQ_F_INITED BIT(0) @@ -125,6 +132,10 @@ struct ionic_lif_sw_stats { u64 rx_csum_none; u64 rx_csum_complete; u64 rx_csum_error; + u64 tx_hwstamp_valid; + u64 tx_hwstamp_invalid; + u64 rx_hwstamp_valid; + u64 rx_hwstamp_invalid; u64 hw_tx_dropped; u64 hw_rx_dropped; u64 hw_rx_over_errors; @@ -139,6 +150,7 @@ enum ionic_lif_state_flags { IONIC_LIF_F_LINK_CHECK_REQUESTED, IONIC_LIF_F_FW_RESET, IONIC_LIF_F_SPLIT_INTR, + IONIC_LIF_F_BROKEN, IONIC_LIF_F_TX_DIM_INTR, IONIC_LIF_F_RX_DIM_INTR, @@ -157,40 +169,45 @@ struct ionic_qtype_info { u16 sg_desc_stride; }; +struct ionic_phc; + #define IONIC_LIF_NAME_MAX_SZ 32 struct ionic_lif { - char name[IONIC_LIF_NAME_MAX_SZ]; - struct list_head list; struct net_device *netdev; DECLARE_BITMAP(state, IONIC_LIF_F_STATE_SIZE); struct ionic *ionic; - bool registered; unsigned int index; unsigned int hw_index; - unsigned int kern_pid; - u64 __iomem *kern_dbpage; struct mutex queue_lock; /* lock for queue structures */ spinlock_t adminq_lock; /* lock for AdminQ operations */ struct ionic_qcq *adminqcq; struct ionic_qcq *notifyqcq; struct ionic_qcq **txqcqs; + struct ionic_qcq *hwstamp_txq; struct ionic_tx_stats *txqstats; struct ionic_qcq **rxqcqs; + struct ionic_qcq *hwstamp_rxq; struct ionic_rx_stats *rxqstats; + struct ionic_deferred deferred; + struct work_struct tx_timeout_work; u64 last_eid; + unsigned int kern_pid; + u64 __iomem *kern_dbpage; unsigned int neqs; unsigned int nxqs; unsigned int ntxq_descs; unsigned int nrxq_descs; u32 rx_copybreak; - u32 tx_budget; + u64 rxq_features; unsigned int rx_mode; u64 hw_features; + bool registered; bool mc_overflow; - unsigned int nmcast; bool uc_overflow; u16 lif_type; + unsigned int nmcast; unsigned int nucast; + char name[IONIC_LIF_NAME_MAX_SZ]; union ionic_lif_identity *identity; struct ionic_lif_info *info; @@ -205,16 +222,34 @@ struct ionic_lif { u32 rss_ind_tbl_sz; struct ionic_rx_filters rx_filters; - struct ionic_deferred deferred; - unsigned long *dbid_inuse; - unsigned int dbid_count; - struct dentry *dentry; u32 rx_coalesce_usecs; /* what the user asked for */ u32 rx_coalesce_hw; /* what the hw is using */ u32 tx_coalesce_usecs; /* what the user asked for */ u32 tx_coalesce_hw; /* what the hw is using */ + unsigned long *dbid_inuse; + unsigned int dbid_count; - struct work_struct tx_timeout_work; + struct ionic_phc *phc; + + struct dentry *dentry; +}; + +struct ionic_phc { + spinlock_t lock; /* lock for cc and tc */ + struct cyclecounter cc; + struct timecounter tc; + + struct mutex config_lock; /* lock for ts_config */ + struct hwtstamp_config ts_config; + u64 ts_config_rx_filt; + u32 ts_config_tx_mode; + + u32 init_cc_mult; + long aux_work_delay; + + struct ptp_clock_info ptp_info; + struct ptp_clock *ptp; + struct ionic_lif *lif; }; struct ionic_queue_params { @@ -222,6 +257,7 @@ struct ionic_queue_params { unsigned int ntxq_descs; unsigned int nrxq_descs; unsigned int intr_split; + u64 rxq_features; }; static inline void ionic_init_queue_params(struct ionic_lif *lif, @@ -231,6 +267,7 @@ static inline void ionic_init_queue_params(struct ionic_lif *lif, qparam->ntxq_descs = lif->ntxq_descs; qparam->nrxq_descs = lif->nrxq_descs; qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); + qparam->rxq_features = lif->rxq_features; } static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs) @@ -263,6 +300,49 @@ void ionic_lif_unregister(struct ionic_lif *lif); int ionic_lif_identify(struct ionic *ionic, u8 lif_type, union ionic_lif_identity *lif_ident); int ionic_lif_size(struct ionic *ionic); + +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +int ionic_lif_hwstamp_replay(struct ionic_lif *lif); +int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr); +int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr); +ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 counter); +void ionic_lif_register_phc(struct ionic_lif *lif); +void ionic_lif_unregister_phc(struct ionic_lif *lif); +void ionic_lif_alloc_phc(struct ionic_lif *lif); +void ionic_lif_free_phc(struct ionic_lif *lif); +#else +static inline int ionic_lif_hwstamp_replay(struct ionic_lif *lif) +{ + return -EOPNOTSUPP; +} + +static inline int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr) +{ + return -EOPNOTSUPP; +} + +static inline int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr) +{ + return -EOPNOTSUPP; +} + +static inline ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 counter) +{ + return ns_to_ktime(0); +} + +static inline void ionic_lif_register_phc(struct ionic_lif *lif) {} +static inline void ionic_lif_unregister_phc(struct ionic_lif *lif) {} +static inline void ionic_lif_alloc_phc(struct ionic_lif *lif) {} +static inline void ionic_lif_free_phc(struct ionic_lif *lif) {} +#endif + +int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif); +int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif); +int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all); +int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode); +int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class); + int ionic_lif_rss_config(struct ionic_lif *lif, u16 types, const u8 *key, const u32 *indir); int ionic_reconfigure_queues(struct ionic_lif *lif, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index fbc57de6683e..61cfe2120817 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -148,6 +148,8 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode) return "IONIC_CMD_LIF_SETATTR"; case IONIC_CMD_LIF_GETATTR: return "IONIC_CMD_LIF_GETATTR"; + case IONIC_CMD_LIF_SETPHC: + return "IONIC_CMD_LIF_SETPHC"; case IONIC_CMD_RX_MODE_SET: return "IONIC_CMD_RX_MODE_SET"; case IONIC_CMD_RX_FILTER_ADD: @@ -187,10 +189,17 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode) static void ionic_adminq_flush(struct ionic_lif *lif) { - struct ionic_queue *q = &lif->adminqcq->q; struct ionic_desc_info *desc_info; + unsigned long irqflags; + struct ionic_queue *q; - spin_lock(&lif->adminq_lock); + spin_lock_irqsave(&lif->adminq_lock, irqflags); + if (!lif->adminqcq) { + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); + return; + } + + q = &lif->adminqcq->q; while (q->tail_idx != q->head_idx) { desc_info = &q->info[q->tail_idx]; @@ -199,7 +208,7 @@ static void ionic_adminq_flush(struct ionic_lif *lif) desc_info->cb_arg = NULL; q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); } - spin_unlock(&lif->adminq_lock); + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); } static int ionic_adminq_check_err(struct ionic_lif *lif, @@ -234,35 +243,36 @@ static void ionic_adminq_cb(struct ionic_queue *q, { struct ionic_admin_ctx *ctx = cb_arg; struct ionic_admin_comp *comp; - struct device *dev; if (!ctx) return; comp = cq_info->cq_desc; - dev = &q->lif->netdev->dev; memcpy(&ctx->comp, comp, sizeof(*comp)); - dev_dbg(dev, "comp admin queue command:\n"); + dev_dbg(q->dev, "comp admin queue command:\n"); dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1, &ctx->comp, sizeof(ctx->comp), true); complete_all(&ctx->work); } -static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) +int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) { struct ionic_desc_info *desc_info; + unsigned long irqflags; struct ionic_queue *q; int err = 0; - if (!lif->adminqcq) + spin_lock_irqsave(&lif->adminq_lock, irqflags); + if (!lif->adminqcq) { + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); return -EIO; + } q = &lif->adminqcq->q; - spin_lock(&lif->adminq_lock); if (!ionic_q_has_space(q, 1)) { err = -ENOSPC; goto err_out; @@ -282,19 +292,17 @@ static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) ionic_q_post(q, true, ionic_adminq_cb, ctx); err_out: - spin_unlock(&lif->adminq_lock); + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); return err; } -int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) +int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, int err) { struct net_device *netdev = lif->netdev; unsigned long remaining; const char *name; - int err; - err = ionic_adminq_post(lif, ctx); if (err) { if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { name = ionic_opcode_to_str(ctx->cmd.cmd.opcode); @@ -309,6 +317,15 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) return ionic_adminq_check_err(lif, ctx, (remaining == 0)); } +int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) +{ + int err; + + err = ionic_adminq_post(lif, ctx); + + return ionic_adminq_wait(lif, ctx, err); +} + static void ionic_dev_cmd_clean(struct ionic *ionic) { union __iomem ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c new file mode 100644 index 000000000000..a87c87e86aef --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c @@ -0,0 +1,615 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2021 Pensando Systems, Inc */ + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> + +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_lif.h" +#include "ionic_ethtool.h" + +static int ionic_hwstamp_tx_mode(int config_tx_type) +{ + switch (config_tx_type) { + case HWTSTAMP_TX_OFF: + return IONIC_TXSTAMP_OFF; + case HWTSTAMP_TX_ON: + return IONIC_TXSTAMP_ON; + case HWTSTAMP_TX_ONESTEP_SYNC: + return IONIC_TXSTAMP_ONESTEP_SYNC; + case HWTSTAMP_TX_ONESTEP_P2P: + return IONIC_TXSTAMP_ONESTEP_P2P; + default: + return -ERANGE; + } +} + +static u64 ionic_hwstamp_rx_filt(int config_rx_filter) +{ + switch (config_rx_filter) { + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + return IONIC_PKT_CLS_PTP1_ALL; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + return IONIC_PKT_CLS_PTP1_SYNC; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + return IONIC_PKT_CLS_PTP1_SYNC | IONIC_PKT_CLS_PTP1_DREQ; + + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + return IONIC_PKT_CLS_PTP2_L4_ALL; + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + return IONIC_PKT_CLS_PTP2_L4_SYNC; + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + return IONIC_PKT_CLS_PTP2_L4_SYNC | IONIC_PKT_CLS_PTP2_L4_DREQ; + + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + return IONIC_PKT_CLS_PTP2_L2_ALL; + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + return IONIC_PKT_CLS_PTP2_L2_SYNC; + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + return IONIC_PKT_CLS_PTP2_L2_SYNC | IONIC_PKT_CLS_PTP2_L2_DREQ; + + case HWTSTAMP_FILTER_PTP_V2_EVENT: + return IONIC_PKT_CLS_PTP2_ALL; + case HWTSTAMP_FILTER_PTP_V2_SYNC: + return IONIC_PKT_CLS_PTP2_SYNC; + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + return IONIC_PKT_CLS_PTP2_SYNC | IONIC_PKT_CLS_PTP2_DREQ; + + case HWTSTAMP_FILTER_NTP_ALL: + return IONIC_PKT_CLS_NTP_ALL; + + default: + return 0; + } +} + +static int ionic_lif_hwstamp_set_ts_config(struct ionic_lif *lif, + struct hwtstamp_config *new_ts) +{ + struct ionic *ionic = lif->ionic; + struct hwtstamp_config *config; + struct hwtstamp_config ts; + int tx_mode = 0; + u64 rx_filt = 0; + int err, err2; + bool rx_all; + __le64 mask; + + if (!lif->phc || !lif->phc->ptp) + return -EOPNOTSUPP; + + mutex_lock(&lif->phc->config_lock); + + if (new_ts) { + config = new_ts; + } else { + /* If called with new_ts == NULL, replay the previous request + * primarily for recovery after a FW_RESET. + * We saved the previous configuration request info, so copy + * the previous request for reference, clear the current state + * to match the device's reset state, and run with it. + */ + config = &ts; + memcpy(config, &lif->phc->ts_config, sizeof(*config)); + memset(&lif->phc->ts_config, 0, sizeof(lif->phc->ts_config)); + lif->phc->ts_config_tx_mode = 0; + lif->phc->ts_config_rx_filt = 0; + } + + tx_mode = ionic_hwstamp_tx_mode(config->tx_type); + if (tx_mode < 0) { + err = tx_mode; + goto err_queues; + } + + mask = cpu_to_le64(BIT_ULL(tx_mode)); + if ((ionic->ident.lif.eth.hwstamp_tx_modes & mask) != mask) { + err = -ERANGE; + goto err_queues; + } + + rx_filt = ionic_hwstamp_rx_filt(config->rx_filter); + rx_all = config->rx_filter != HWTSTAMP_FILTER_NONE && !rx_filt; + + mask = cpu_to_le64(rx_filt); + if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) != mask) { + rx_filt = 0; + rx_all = true; + config->rx_filter = HWTSTAMP_FILTER_ALL; + } + + dev_dbg(ionic->dev, "config_rx_filter %d rx_filt %#llx rx_all %d\n", + config->rx_filter, rx_filt, rx_all); + + if (tx_mode) { + err = ionic_lif_create_hwstamp_txq(lif); + if (err) + goto err_queues; + } + + if (rx_filt) { + err = ionic_lif_create_hwstamp_rxq(lif); + if (err) + goto err_queues; + } + + if (tx_mode != lif->phc->ts_config_tx_mode) { + err = ionic_lif_set_hwstamp_txmode(lif, tx_mode); + if (err) + goto err_txmode; + } + + if (rx_filt != lif->phc->ts_config_rx_filt) { + err = ionic_lif_set_hwstamp_rxfilt(lif, rx_filt); + if (err) + goto err_rxfilt; + } + + if (rx_all != (lif->phc->ts_config.rx_filter == HWTSTAMP_FILTER_ALL)) { + err = ionic_lif_config_hwstamp_rxq_all(lif, rx_all); + if (err) + goto err_rxall; + } + + memcpy(&lif->phc->ts_config, config, sizeof(*config)); + lif->phc->ts_config_rx_filt = rx_filt; + lif->phc->ts_config_tx_mode = tx_mode; + + mutex_unlock(&lif->phc->config_lock); + + return 0; + +err_rxall: + if (rx_filt != lif->phc->ts_config_rx_filt) { + rx_filt = lif->phc->ts_config_rx_filt; + err2 = ionic_lif_set_hwstamp_rxfilt(lif, rx_filt); + if (err2) + dev_err(ionic->dev, + "Failed to revert rx timestamp filter: %d\n", err2); + } +err_rxfilt: + if (tx_mode != lif->phc->ts_config_tx_mode) { + tx_mode = lif->phc->ts_config_tx_mode; + err2 = ionic_lif_set_hwstamp_txmode(lif, tx_mode); + if (err2) + dev_err(ionic->dev, + "Failed to revert tx timestamp mode: %d\n", err2); + } +err_txmode: + /* special queues remain allocated, just unused */ +err_queues: + mutex_unlock(&lif->phc->config_lock); + return err; +} + +int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int err; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = ionic_lif_hwstamp_set_ts_config(lif, &config); + if (err) { + netdev_info(lif->netdev, "hwstamp set failed: %d\n", err); + return err; + } + + if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) + return -EFAULT; + + return 0; +} + +int ionic_lif_hwstamp_replay(struct ionic_lif *lif) +{ + int err; + + err = ionic_lif_hwstamp_set_ts_config(lif, NULL); + if (err) + netdev_info(lif->netdev, "hwstamp replay failed: %d\n", err); + + return err; +} + +int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr) +{ + struct hwtstamp_config config; + + if (!lif->phc || !lif->phc->ptp) + return -EOPNOTSUPP; + + mutex_lock(&lif->phc->config_lock); + memcpy(&config, &lif->phc->ts_config, sizeof(config)); + mutex_unlock(&lif->phc->config_lock); + + if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) + return -EFAULT; + return 0; +} + +static u64 ionic_hwstamp_read(struct ionic *ionic, + struct ptp_system_timestamp *sts) +{ + u32 tick_high_before, tick_high, tick_low; + + /* read and discard low part to defeat hw staging of high part */ + (void)ioread32(&ionic->idev.hwstamp_regs->tick_low); + + tick_high_before = ioread32(&ionic->idev.hwstamp_regs->tick_high); + + ptp_read_system_prets(sts); + tick_low = ioread32(&ionic->idev.hwstamp_regs->tick_low); + ptp_read_system_postts(sts); + + tick_high = ioread32(&ionic->idev.hwstamp_regs->tick_high); + + /* If tick_high changed, re-read tick_low once more. Assume tick_high + * cannot change again so soon as in the span of re-reading tick_low. + */ + if (tick_high != tick_high_before) { + ptp_read_system_prets(sts); + tick_low = ioread32(&ionic->idev.hwstamp_regs->tick_low); + ptp_read_system_postts(sts); + } + + return (u64)tick_low | ((u64)tick_high << 32); +} + +static u64 ionic_cc_read(const struct cyclecounter *cc) +{ + struct ionic_phc *phc = container_of(cc, struct ionic_phc, cc); + struct ionic *ionic = phc->lif->ionic; + + return ionic_hwstamp_read(ionic, NULL); +} + +static int ionic_setphc_cmd(struct ionic_phc *phc, struct ionic_admin_ctx *ctx) +{ + ctx->work = COMPLETION_INITIALIZER_ONSTACK(ctx->work); + + ctx->cmd.lif_setphc.opcode = IONIC_CMD_LIF_SETPHC; + ctx->cmd.lif_setphc.lif_index = cpu_to_le16(phc->lif->index); + + ctx->cmd.lif_setphc.tick = cpu_to_le64(phc->tc.cycle_last); + ctx->cmd.lif_setphc.nsec = cpu_to_le64(phc->tc.nsec); + ctx->cmd.lif_setphc.frac = cpu_to_le64(phc->tc.frac); + ctx->cmd.lif_setphc.mult = cpu_to_le32(phc->cc.mult); + ctx->cmd.lif_setphc.shift = cpu_to_le32(phc->cc.shift); + + return ionic_adminq_post(phc->lif, ctx); +} + +static int ionic_phc_adjfine(struct ptp_clock_info *info, long scaled_ppm) +{ + struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info); + struct ionic_admin_ctx ctx = {}; + unsigned long irqflags; + s64 adj; + int err; + + /* Reject phc adjustments during device upgrade */ + if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state)) + return -EBUSY; + + /* Adjustment value scaled by 2^16 million */ + adj = (s64)scaled_ppm * phc->init_cc_mult; + + /* Adjustment value to scale */ + adj /= (s64)SCALED_PPM; + + /* Final adjusted multiplier */ + adj += phc->init_cc_mult; + + spin_lock_irqsave(&phc->lock, irqflags); + + /* update the point-in-time basis to now, before adjusting the rate */ + timecounter_read(&phc->tc); + phc->cc.mult = adj; + + /* Setphc commands are posted in-order, sequenced by phc->lock. We + * need to drop the lock before waiting for the command to complete. + */ + err = ionic_setphc_cmd(phc, &ctx); + + spin_unlock_irqrestore(&phc->lock, irqflags); + + return ionic_adminq_wait(phc->lif, &ctx, err); +} + +static int ionic_phc_adjtime(struct ptp_clock_info *info, s64 delta) +{ + struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info); + struct ionic_admin_ctx ctx = {}; + unsigned long irqflags; + int err; + + /* Reject phc adjustments during device upgrade */ + if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state)) + return -EBUSY; + + spin_lock_irqsave(&phc->lock, irqflags); + + timecounter_adjtime(&phc->tc, delta); + + /* Setphc commands are posted in-order, sequenced by phc->lock. We + * need to drop the lock before waiting for the command to complete. + */ + err = ionic_setphc_cmd(phc, &ctx); + + spin_unlock_irqrestore(&phc->lock, irqflags); + + return ionic_adminq_wait(phc->lif, &ctx, err); +} + +static int ionic_phc_settime64(struct ptp_clock_info *info, + const struct timespec64 *ts) +{ + struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info); + struct ionic_admin_ctx ctx = {}; + unsigned long irqflags; + int err; + u64 ns; + + /* Reject phc adjustments during device upgrade */ + if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state)) + return -EBUSY; + + ns = timespec64_to_ns(ts); + + spin_lock_irqsave(&phc->lock, irqflags); + + timecounter_init(&phc->tc, &phc->cc, ns); + + /* Setphc commands are posted in-order, sequenced by phc->lock. We + * need to drop the lock before waiting for the command to complete. + */ + err = ionic_setphc_cmd(phc, &ctx); + + spin_unlock_irqrestore(&phc->lock, irqflags); + + return ionic_adminq_wait(phc->lif, &ctx, err); +} + +static int ionic_phc_gettimex64(struct ptp_clock_info *info, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info); + struct ionic *ionic = phc->lif->ionic; + unsigned long irqflags; + u64 tick, ns; + + /* Do not attempt to read device time during upgrade */ + if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state)) + return -EBUSY; + + spin_lock_irqsave(&phc->lock, irqflags); + + tick = ionic_hwstamp_read(ionic, sts); + + ns = timecounter_cyc2time(&phc->tc, tick); + + spin_unlock_irqrestore(&phc->lock, irqflags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static long ionic_phc_aux_work(struct ptp_clock_info *info) +{ + struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info); + struct ionic_admin_ctx ctx = {}; + unsigned long irqflags; + int err; + + /* Do not update phc during device upgrade, but keep polling to resume + * after upgrade. Since we don't update the point in time basis, there + * is no expectation that we are maintaining the phc time during the + * upgrade. After upgrade, it will need to be readjusted back to the + * correct time by the ptp daemon. + */ + if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state)) + return phc->aux_work_delay; + + spin_lock_irqsave(&phc->lock, irqflags); + + /* update point-in-time basis to now */ + timecounter_read(&phc->tc); + + /* Setphc commands are posted in-order, sequenced by phc->lock. We + * need to drop the lock before waiting for the command to complete. + */ + err = ionic_setphc_cmd(phc, &ctx); + + spin_unlock_irqrestore(&phc->lock, irqflags); + + ionic_adminq_wait(phc->lif, &ctx, err); + + return phc->aux_work_delay; +} + +ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 tick) +{ + unsigned long irqflags; + u64 ns; + + if (!lif->phc) + return 0; + + spin_lock_irqsave(&lif->phc->lock, irqflags); + ns = timecounter_cyc2time(&lif->phc->tc, tick); + spin_unlock_irqrestore(&lif->phc->lock, irqflags); + + return ns_to_ktime(ns); +} + +static const struct ptp_clock_info ionic_ptp_info = { + .owner = THIS_MODULE, + .name = "ionic_ptp", + .adjfine = ionic_phc_adjfine, + .adjtime = ionic_phc_adjtime, + .gettimex64 = ionic_phc_gettimex64, + .settime64 = ionic_phc_settime64, + .do_aux_work = ionic_phc_aux_work, +}; + +void ionic_lif_register_phc(struct ionic_lif *lif) +{ + if (!lif->phc || !(lif->hw_features & IONIC_ETH_HW_TIMESTAMP)) + return; + + lif->phc->ptp = ptp_clock_register(&lif->phc->ptp_info, lif->ionic->dev); + + if (IS_ERR(lif->phc->ptp)) { + dev_warn(lif->ionic->dev, "Cannot register phc device: %ld\n", + PTR_ERR(lif->phc->ptp)); + + lif->phc->ptp = NULL; + } + + if (lif->phc->ptp) + ptp_schedule_worker(lif->phc->ptp, lif->phc->aux_work_delay); +} + +void ionic_lif_unregister_phc(struct ionic_lif *lif) +{ + if (!lif->phc || !lif->phc->ptp) + return; + + ptp_clock_unregister(lif->phc->ptp); + + lif->phc->ptp = NULL; +} + +void ionic_lif_alloc_phc(struct ionic_lif *lif) +{ + struct ionic *ionic = lif->ionic; + struct ionic_phc *phc; + u64 delay, diff, mult; + u64 frac = 0; + u64 features; + u32 shift; + + if (!ionic->idev.hwstamp_regs) + return; + + features = le64_to_cpu(ionic->ident.lif.eth.config.features); + if (!(features & IONIC_ETH_HW_TIMESTAMP)) + return; + + phc = devm_kzalloc(ionic->dev, sizeof(*phc), GFP_KERNEL); + if (!phc) + return; + + phc->lif = lif; + + phc->cc.read = ionic_cc_read; + phc->cc.mask = le64_to_cpu(ionic->ident.dev.hwstamp_mask); + phc->cc.mult = le32_to_cpu(ionic->ident.dev.hwstamp_mult); + phc->cc.shift = le32_to_cpu(ionic->ident.dev.hwstamp_shift); + + if (!phc->cc.mult) { + dev_err(lif->ionic->dev, + "Invalid device PHC mask multiplier %u, disabling HW timestamp support\n", + phc->cc.mult); + devm_kfree(lif->ionic->dev, phc); + lif->phc = NULL; + return; + } + + dev_dbg(lif->ionic->dev, "Device PHC mask %#llx mult %u shift %u\n", + phc->cc.mask, phc->cc.mult, phc->cc.shift); + + spin_lock_init(&phc->lock); + mutex_init(&phc->config_lock); + + /* max ticks is limited by the multiplier, or by the update period. */ + if (phc->cc.shift + 2 + ilog2(IONIC_PHC_UPDATE_NS) >= 64) { + /* max ticks that do not overflow when multiplied by max + * adjusted multiplier (twice the initial multiplier) + */ + diff = U64_MAX / phc->cc.mult / 2; + } else { + /* approx ticks at four times the update period */ + diff = (u64)IONIC_PHC_UPDATE_NS << (phc->cc.shift + 2); + diff = DIV_ROUND_UP(diff, phc->cc.mult); + } + + /* transform to bitmask */ + diff |= diff >> 1; + diff |= diff >> 2; + diff |= diff >> 4; + diff |= diff >> 8; + diff |= diff >> 16; + diff |= diff >> 32; + + /* constrain to the hardware bitmask, and use this as the bitmask */ + diff &= phc->cc.mask; + phc->cc.mask = diff; + + /* the wrap period is now defined by diff (or phc->cc.mask) + * + * we will update the time basis at about 1/4 the wrap period, so + * should not see a difference of more than +/- diff/4. + * + * this is sufficient not see a difference of more than +/- diff/2, as + * required by timecounter_cyc2time, to detect an old time stamp. + * + * adjust the initial multiplier, being careful to avoid overflow: + * - do not overflow 63 bits: init_cc_mult * SCALED_PPM + * - do not overflow 64 bits: max_mult * (diff / 2) + * + * we want to increase the initial multiplier as much as possible, to + * allow for more precise adjustment in ionic_phc_adjfine. + * + * only adjust the multiplier if we can double it or more. + */ + mult = U64_MAX / 2 / max(diff / 2, SCALED_PPM); + shift = mult / phc->cc.mult; + if (shift >= 2) { + /* initial multiplier will be 2^n of hardware cc.mult */ + shift = fls(shift); + /* increase cc.mult and cc.shift by the same 2^n and n. */ + phc->cc.mult <<= shift; + phc->cc.shift += shift; + } + + dev_dbg(lif->ionic->dev, "Initial PHC mask %#llx mult %u shift %u\n", + phc->cc.mask, phc->cc.mult, phc->cc.shift); + + /* frequency adjustments are relative to the initial multiplier */ + phc->init_cc_mult = phc->cc.mult; + + timecounter_init(&phc->tc, &phc->cc, ktime_get_real_ns()); + + /* Update cycle_last at 1/4 the wrap period, or IONIC_PHC_UPDATE_NS */ + delay = min_t(u64, IONIC_PHC_UPDATE_NS, + cyclecounter_cyc2ns(&phc->cc, diff / 4, 0, &frac)); + dev_dbg(lif->ionic->dev, "Work delay %llu ms\n", delay / NSEC_PER_MSEC); + + phc->aux_work_delay = nsecs_to_jiffies(delay); + + phc->ptp_info = ionic_ptp_info; + + /* We have allowed to adjust the multiplier up to +/- 1 part per 1. + * Here expressed as NORMAL_PPB (1 billion parts per billion). + */ + phc->ptp_info.max_adj = NORMAL_PPB; + + lif->phc = phc; +} + +void ionic_lif_free_phc(struct ionic_lif *lif) +{ + if (!lif->phc) + return; + + mutex_destroy(&lif->phc->config_lock); + + devm_kfree(lif->ionic->dev, lif->phc); + lif->phc = NULL; +} diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c index cd0076fc3044..d71316d9ded2 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c @@ -140,6 +140,9 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, case IONIC_RX_FILTER_MATCH_MAC_VLAN: key = le16_to_cpu(ac->mac_vlan.vlan); break; + case IONIC_RX_FILTER_STEER_PKTCLASS: + key = 0; + break; default: return -EINVAL; } @@ -210,3 +213,21 @@ struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif, return NULL; } + +struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif) +{ + struct ionic_rx_filter *f; + struct hlist_head *head; + unsigned int key; + + key = hash_32(0, IONIC_RX_FILTER_HASH_BITS); + head = &lif->rx_filters.by_hash[key]; + + hlist_for_each_entry(f, head, by_hash) { + if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_STEER_PKTCLASS) + continue; + return f; + } + + return NULL; +} diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h index cf8f4c0a961c..1ead48be3c83 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h @@ -31,5 +31,6 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, u32 hash, struct ionic_admin_ctx *ctx); struct ionic_rx_filter *ionic_rx_filter_by_vlan(struct ionic_lif *lif, u16 vid); struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif, const u8 *addr); +struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif); #endif /* _IONIC_RX_FILTER_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c index 6ae75b771a15..58a854666c62 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c @@ -130,6 +130,8 @@ static const struct ionic_stat_desc ionic_tx_stats_desc[] = { IONIC_TX_STAT_DESC(frags), IONIC_TX_STAT_DESC(tso), IONIC_TX_STAT_DESC(tso_bytes), + IONIC_TX_STAT_DESC(hwstamp_valid), + IONIC_TX_STAT_DESC(hwstamp_invalid), IONIC_TX_STAT_DESC(csum_none), IONIC_TX_STAT_DESC(csum), IONIC_TX_STAT_DESC(vlan_inserted), @@ -143,6 +145,8 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = { IONIC_RX_STAT_DESC(csum_none), IONIC_RX_STAT_DESC(csum_complete), IONIC_RX_STAT_DESC(csum_error), + IONIC_RX_STAT_DESC(hwstamp_valid), + IONIC_RX_STAT_DESC(hwstamp_invalid), IONIC_RX_STAT_DESC(dropped), IONIC_RX_STAT_DESC(vlan_stripped), }; @@ -177,33 +181,54 @@ static const struct ionic_stat_desc ionic_dbg_napi_stats_desc[] = { #define MAX_Q(lif) ((lif)->netdev->real_num_tx_queues) +static void ionic_add_lif_txq_stats(struct ionic_lif *lif, int q_num, + struct ionic_lif_sw_stats *stats) +{ + struct ionic_tx_stats *txstats = &lif->txqstats[q_num]; + + stats->tx_packets += txstats->pkts; + stats->tx_bytes += txstats->bytes; + stats->tx_tso += txstats->tso; + stats->tx_tso_bytes += txstats->tso_bytes; + stats->tx_csum_none += txstats->csum_none; + stats->tx_csum += txstats->csum; + stats->tx_hwstamp_valid += txstats->hwstamp_valid; + stats->tx_hwstamp_invalid += txstats->hwstamp_invalid; +} + +static void ionic_add_lif_rxq_stats(struct ionic_lif *lif, int q_num, + struct ionic_lif_sw_stats *stats) +{ + struct ionic_rx_stats *rxstats = &lif->rxqstats[q_num]; + + stats->rx_packets += rxstats->pkts; + stats->rx_bytes += rxstats->bytes; + stats->rx_csum_none += rxstats->csum_none; + stats->rx_csum_complete += rxstats->csum_complete; + stats->rx_csum_error += rxstats->csum_error; + stats->rx_hwstamp_valid += rxstats->hwstamp_valid; + stats->rx_hwstamp_invalid += rxstats->hwstamp_invalid; +} + static void ionic_get_lif_stats(struct ionic_lif *lif, struct ionic_lif_sw_stats *stats) { - struct ionic_tx_stats *txstats; - struct ionic_rx_stats *rxstats; struct rtnl_link_stats64 ns; int q_num; memset(stats, 0, sizeof(*stats)); for (q_num = 0; q_num < MAX_Q(lif); q_num++) { - txstats = &lif->txqstats[q_num]; - stats->tx_packets += txstats->pkts; - stats->tx_bytes += txstats->bytes; - stats->tx_tso += txstats->tso; - stats->tx_tso_bytes += txstats->tso_bytes; - stats->tx_csum_none += txstats->csum_none; - stats->tx_csum += txstats->csum; - - rxstats = &lif->rxqstats[q_num]; - stats->rx_packets += rxstats->pkts; - stats->rx_bytes += rxstats->bytes; - stats->rx_csum_none += rxstats->csum_none; - stats->rx_csum_complete += rxstats->csum_complete; - stats->rx_csum_error += rxstats->csum_error; + ionic_add_lif_txq_stats(lif, q_num, stats); + ionic_add_lif_rxq_stats(lif, q_num, stats); } + if (lif->hwstamp_txq) + ionic_add_lif_txq_stats(lif, lif->hwstamp_txq->q.index, stats); + + if (lif->hwstamp_rxq) + ionic_add_lif_rxq_stats(lif, lif->hwstamp_rxq->q.index, stats); + ionic_get_stats64(lif->netdev, &ns); stats->hw_tx_dropped = ns.tx_dropped; stats->hw_rx_dropped = ns.rx_dropped; @@ -214,30 +239,30 @@ static void ionic_get_lif_stats(struct ionic_lif *lif, static u64 ionic_sw_stats_get_count(struct ionic_lif *lif) { - u64 total = 0; - - /* lif stats */ - total += IONIC_NUM_LIF_STATS; + u64 total = 0, tx_queues = MAX_Q(lif), rx_queues = MAX_Q(lif); - /* tx stats */ - total += MAX_Q(lif) * IONIC_NUM_TX_STATS; + if (lif->hwstamp_txq) + tx_queues += 1; - /* rx stats */ - total += MAX_Q(lif) * IONIC_NUM_RX_STATS; + if (lif->hwstamp_rxq) + rx_queues += 1; - /* port stats */ + total += IONIC_NUM_LIF_STATS; total += IONIC_NUM_PORT_STATS; + total += tx_queues * IONIC_NUM_TX_STATS; + total += rx_queues * IONIC_NUM_RX_STATS; + if (test_bit(IONIC_LIF_F_UP, lif->state) && test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) { /* tx debug stats */ - total += MAX_Q(lif) * (IONIC_NUM_DBG_CQ_STATS + + total += tx_queues * (IONIC_NUM_DBG_CQ_STATS + IONIC_NUM_TX_Q_STATS + IONIC_NUM_DBG_INTR_STATS + IONIC_MAX_NUM_SG_CNTR); /* rx debug stats */ - total += MAX_Q(lif) * (IONIC_NUM_DBG_CQ_STATS + + total += rx_queues * (IONIC_NUM_DBG_CQ_STATS + IONIC_NUM_DBG_INTR_STATS + IONIC_NUM_DBG_NAPI_STATS + IONIC_MAX_NUM_NAPI_CNTR); @@ -246,97 +271,167 @@ static u64 ionic_sw_stats_get_count(struct ionic_lif *lif) return total; } +static void ionic_sw_stats_get_tx_strings(struct ionic_lif *lif, u8 **buf, + int q_num) +{ + int i; + + for (i = 0; i < IONIC_NUM_TX_STATS; i++) + ethtool_sprintf(buf, "tx_%d_%s", q_num, + ionic_tx_stats_desc[i].name); + + if (!test_bit(IONIC_LIF_F_UP, lif->state) || + !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) + return; + + for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) + ethtool_sprintf(buf, "txq_%d_%s", q_num, + ionic_txq_stats_desc[i].name); + for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) + ethtool_sprintf(buf, "txq_%d_cq_%s", q_num, + ionic_dbg_cq_stats_desc[i].name); + for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) + ethtool_sprintf(buf, "txq_%d_intr_%s", q_num, + ionic_dbg_intr_stats_desc[i].name); + for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) + ethtool_sprintf(buf, "txq_%d_sg_cntr_%d", q_num, i); +} + +static void ionic_sw_stats_get_rx_strings(struct ionic_lif *lif, u8 **buf, + int q_num) +{ + int i; + + for (i = 0; i < IONIC_NUM_RX_STATS; i++) + ethtool_sprintf(buf, "rx_%d_%s", q_num, + ionic_rx_stats_desc[i].name); + + if (!test_bit(IONIC_LIF_F_UP, lif->state) || + !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) + return; + + for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) + ethtool_sprintf(buf, "rxq_%d_cq_%s", q_num, + ionic_dbg_cq_stats_desc[i].name); + for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) + ethtool_sprintf(buf, "rxq_%d_intr_%s", q_num, + ionic_dbg_intr_stats_desc[i].name); + for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) + ethtool_sprintf(buf, "rxq_%d_napi_%s", q_num, + ionic_dbg_napi_stats_desc[i].name); + for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) + ethtool_sprintf(buf, "rxq_%d_napi_work_done_%d", q_num, i); +} + static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf) { int i, q_num; - for (i = 0; i < IONIC_NUM_LIF_STATS; i++) { - snprintf(*buf, ETH_GSTRING_LEN, ionic_lif_stats_desc[i].name); - *buf += ETH_GSTRING_LEN; + for (i = 0; i < IONIC_NUM_LIF_STATS; i++) + ethtool_sprintf(buf, ionic_lif_stats_desc[i].name); + + for (i = 0; i < IONIC_NUM_PORT_STATS; i++) + ethtool_sprintf(buf, ionic_port_stats_desc[i].name); + + for (q_num = 0; q_num < MAX_Q(lif); q_num++) + ionic_sw_stats_get_tx_strings(lif, buf, q_num); + + if (lif->hwstamp_txq) + ionic_sw_stats_get_tx_strings(lif, buf, lif->hwstamp_txq->q.index); + + for (q_num = 0; q_num < MAX_Q(lif); q_num++) + ionic_sw_stats_get_rx_strings(lif, buf, q_num); + + if (lif->hwstamp_rxq) + ionic_sw_stats_get_rx_strings(lif, buf, lif->hwstamp_rxq->q.index); +} + +static void ionic_sw_stats_get_txq_values(struct ionic_lif *lif, u64 **buf, + int q_num) +{ + struct ionic_tx_stats *txstats; + struct ionic_qcq *txqcq; + int i; + + txstats = &lif->txqstats[q_num]; + + for (i = 0; i < IONIC_NUM_TX_STATS; i++) { + **buf = IONIC_READ_STAT64(txstats, &ionic_tx_stats_desc[i]); + (*buf)++; } - for (i = 0; i < IONIC_NUM_PORT_STATS; i++) { - snprintf(*buf, ETH_GSTRING_LEN, - ionic_port_stats_desc[i].name); - *buf += ETH_GSTRING_LEN; + if (!test_bit(IONIC_LIF_F_UP, lif->state) || + !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) + return; + + txqcq = lif->txqcqs[q_num]; + for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) { + **buf = IONIC_READ_STAT64(&txqcq->q, + &ionic_txq_stats_desc[i]); + (*buf)++; } + for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { + **buf = IONIC_READ_STAT64(&txqcq->cq, + &ionic_dbg_cq_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) { + **buf = IONIC_READ_STAT64(&txqcq->intr, + &ionic_dbg_intr_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) { + **buf = IONIC_READ_STAT64(&txqcq->napi_stats, + &ionic_dbg_napi_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) { + **buf = txqcq->napi_stats.work_done_cntr[i]; + (*buf)++; + } + for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) { + **buf = txstats->sg_cntr[i]; + (*buf)++; + } +} - for (q_num = 0; q_num < MAX_Q(lif); q_num++) { - for (i = 0; i < IONIC_NUM_TX_STATS; i++) { - snprintf(*buf, ETH_GSTRING_LEN, "tx_%d_%s", - q_num, ionic_tx_stats_desc[i].name); - *buf += ETH_GSTRING_LEN; - } - - if (test_bit(IONIC_LIF_F_UP, lif->state) && - test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) { - for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) { - snprintf(*buf, ETH_GSTRING_LEN, - "txq_%d_%s", - q_num, - ionic_txq_stats_desc[i].name); - *buf += ETH_GSTRING_LEN; - } - for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { - snprintf(*buf, ETH_GSTRING_LEN, - "txq_%d_cq_%s", - q_num, - ionic_dbg_cq_stats_desc[i].name); - *buf += ETH_GSTRING_LEN; - } - for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) { - snprintf(*buf, ETH_GSTRING_LEN, - "txq_%d_intr_%s", - q_num, - ionic_dbg_intr_stats_desc[i].name); - *buf += ETH_GSTRING_LEN; - } - for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) { - snprintf(*buf, ETH_GSTRING_LEN, - "txq_%d_sg_cntr_%d", - q_num, i); - *buf += ETH_GSTRING_LEN; - } - } +static void ionic_sw_stats_get_rxq_values(struct ionic_lif *lif, u64 **buf, + int q_num) +{ + struct ionic_rx_stats *rxstats; + struct ionic_qcq *rxqcq; + int i; + + rxstats = &lif->rxqstats[q_num]; + + for (i = 0; i < IONIC_NUM_RX_STATS; i++) { + **buf = IONIC_READ_STAT64(rxstats, &ionic_rx_stats_desc[i]); + (*buf)++; } - for (q_num = 0; q_num < MAX_Q(lif); q_num++) { - for (i = 0; i < IONIC_NUM_RX_STATS; i++) { - snprintf(*buf, ETH_GSTRING_LEN, - "rx_%d_%s", - q_num, ionic_rx_stats_desc[i].name); - *buf += ETH_GSTRING_LEN; - } - - if (test_bit(IONIC_LIF_F_UP, lif->state) && - test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) { - for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { - snprintf(*buf, ETH_GSTRING_LEN, - "rxq_%d_cq_%s", - q_num, - ionic_dbg_cq_stats_desc[i].name); - *buf += ETH_GSTRING_LEN; - } - for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) { - snprintf(*buf, ETH_GSTRING_LEN, - "rxq_%d_intr_%s", - q_num, - ionic_dbg_intr_stats_desc[i].name); - *buf += ETH_GSTRING_LEN; - } - for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) { - snprintf(*buf, ETH_GSTRING_LEN, - "rxq_%d_napi_%s", - q_num, - ionic_dbg_napi_stats_desc[i].name); - *buf += ETH_GSTRING_LEN; - } - for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) { - snprintf(*buf, ETH_GSTRING_LEN, - "rxq_%d_napi_work_done_%d", - q_num, i); - *buf += ETH_GSTRING_LEN; - } - } + + if (!test_bit(IONIC_LIF_F_UP, lif->state) || + !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) + return; + + rxqcq = lif->rxqcqs[q_num]; + for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { + **buf = IONIC_READ_STAT64(&rxqcq->cq, + &ionic_dbg_cq_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) { + **buf = IONIC_READ_STAT64(&rxqcq->intr, + &ionic_dbg_intr_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) { + **buf = IONIC_READ_STAT64(&rxqcq->napi_stats, + &ionic_dbg_napi_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) { + **buf = rxqcq->napi_stats.work_done_cntr[i]; + (*buf)++; } } @@ -344,9 +439,6 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf) { struct ionic_port_stats *port_stats; struct ionic_lif_sw_stats lif_stats; - struct ionic_qcq *txqcq, *rxqcq; - struct ionic_tx_stats *txstats; - struct ionic_rx_stats *rxstats; int i, q_num; ionic_get_lif_stats(lif, &lif_stats); @@ -363,73 +455,17 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf) (*buf)++; } - for (q_num = 0; q_num < MAX_Q(lif); q_num++) { - txstats = &lif->txqstats[q_num]; - - for (i = 0; i < IONIC_NUM_TX_STATS; i++) { - **buf = IONIC_READ_STAT64(txstats, - &ionic_tx_stats_desc[i]); - (*buf)++; - } - - if (test_bit(IONIC_LIF_F_UP, lif->state) && - test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) { - txqcq = lif->txqcqs[q_num]; - for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) { - **buf = IONIC_READ_STAT64(&txqcq->q, - &ionic_txq_stats_desc[i]); - (*buf)++; - } - for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { - **buf = IONIC_READ_STAT64(&txqcq->cq, - &ionic_dbg_cq_stats_desc[i]); - (*buf)++; - } - for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) { - **buf = IONIC_READ_STAT64(&txqcq->intr, - &ionic_dbg_intr_stats_desc[i]); - (*buf)++; - } - for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) { - **buf = txstats->sg_cntr[i]; - (*buf)++; - } - } - } + for (q_num = 0; q_num < MAX_Q(lif); q_num++) + ionic_sw_stats_get_txq_values(lif, buf, q_num); - for (q_num = 0; q_num < MAX_Q(lif); q_num++) { - rxstats = &lif->rxqstats[q_num]; - - for (i = 0; i < IONIC_NUM_RX_STATS; i++) { - **buf = IONIC_READ_STAT64(rxstats, - &ionic_rx_stats_desc[i]); - (*buf)++; - } - - if (test_bit(IONIC_LIF_F_UP, lif->state) && - test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) { - rxqcq = lif->rxqcqs[q_num]; - for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { - **buf = IONIC_READ_STAT64(&rxqcq->cq, - &ionic_dbg_cq_stats_desc[i]); - (*buf)++; - } - for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) { - **buf = IONIC_READ_STAT64(&rxqcq->intr, - &ionic_dbg_intr_stats_desc[i]); - (*buf)++; - } - for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) { - **buf = IONIC_READ_STAT64(&rxqcq->napi_stats, - &ionic_dbg_napi_stats_desc[i]); - (*buf)++; - } - for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) { - **buf = rxqcq->napi_stats.work_done_cntr[i]; - (*buf)++; - } - } - } + if (lif->hwstamp_txq) + ionic_sw_stats_get_txq_values(lif, buf, lif->hwstamp_txq->q.index); + + for (q_num = 0; q_num < MAX_Q(lif); q_num++) + ionic_sw_stats_get_rxq_values(lif, buf, q_num); + + if (lif->hwstamp_rxq) + ionic_sw_stats_get_rxq_values(lif, buf, lif->hwstamp_rxq->q.index); } const struct ionic_stats_group_intf ionic_stats_groups[] = { diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 4087311f7082..08934888575c 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -10,14 +10,6 @@ #include "ionic_lif.h" #include "ionic_txrx.h" -static void ionic_rx_clean(struct ionic_queue *q, - struct ionic_desc_info *desc_info, - struct ionic_cq_info *cq_info, - void *cb_arg); - -static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); - -static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, ionic_desc_cb cb_func, void *cb_arg) @@ -40,72 +32,149 @@ static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q) return netdev_get_tx_queue(q->lif->netdev, q->index); } -static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q, - unsigned int len, bool frags) +static void ionic_rx_buf_reset(struct ionic_buf_info *buf_info) +{ + buf_info->page = NULL; + buf_info->page_offset = 0; + buf_info->dma_addr = 0; +} + +static int ionic_rx_page_alloc(struct ionic_queue *q, + struct ionic_buf_info *buf_info) { - struct ionic_lif *lif = q->lif; + struct net_device *netdev = q->lif->netdev; struct ionic_rx_stats *stats; - struct net_device *netdev; - struct sk_buff *skb; + struct device *dev; - netdev = lif->netdev; - stats = &q->lif->rxqstats[q->index]; + dev = q->dev; + stats = q_to_rx_stats(q); - if (frags) - skb = napi_get_frags(&q_to_qcq(q)->napi); - else - skb = netdev_alloc_skb_ip_align(netdev, len); + if (unlikely(!buf_info)) { + net_err_ratelimited("%s: %s invalid buf_info in alloc\n", + netdev->name, q->name); + return -EINVAL; + } - if (unlikely(!skb)) { - net_warn_ratelimited("%s: SKB alloc failed on %s!\n", - netdev->name, q->name); + buf_info->page = alloc_pages(IONIC_PAGE_GFP_MASK, 0); + if (unlikely(!buf_info->page)) { + net_err_ratelimited("%s: %s page alloc failed\n", + netdev->name, q->name); stats->alloc_err++; - return NULL; + return -ENOMEM; } + buf_info->page_offset = 0; - return skb; + buf_info->dma_addr = dma_map_page(dev, buf_info->page, buf_info->page_offset, + IONIC_PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) { + __free_pages(buf_info->page, 0); + ionic_rx_buf_reset(buf_info); + net_err_ratelimited("%s: %s dma map failed\n", + netdev->name, q->name); + stats->dma_map_err++; + return -EIO; + } + + return 0; +} + +static void ionic_rx_page_free(struct ionic_queue *q, + struct ionic_buf_info *buf_info) +{ + struct net_device *netdev = q->lif->netdev; + struct device *dev = q->dev; + + if (unlikely(!buf_info)) { + net_err_ratelimited("%s: %s invalid buf_info in free\n", + netdev->name, q->name); + return; + } + + if (!buf_info->page) + return; + + dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE); + __free_pages(buf_info->page, 0); + ionic_rx_buf_reset(buf_info); +} + +static bool ionic_rx_buf_recycle(struct ionic_queue *q, + struct ionic_buf_info *buf_info, u32 used) +{ + u32 size; + + /* don't re-use pages allocated in low-mem condition */ + if (page_is_pfmemalloc(buf_info->page)) + return false; + + /* don't re-use buffers from non-local numa nodes */ + if (page_to_nid(buf_info->page) != numa_mem_id()) + return false; + + size = ALIGN(used, IONIC_PAGE_SPLIT_SZ); + buf_info->page_offset += size; + if (buf_info->page_offset >= IONIC_PAGE_SIZE) + return false; + + get_page(buf_info->page); + + return true; } static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, struct ionic_desc_info *desc_info, - struct ionic_cq_info *cq_info) + struct ionic_rxq_comp *comp) { - struct ionic_rxq_comp *comp = cq_info->cq_desc; - struct device *dev = q->lif->ionic->dev; - struct ionic_page_info *page_info; + struct net_device *netdev = q->lif->netdev; + struct ionic_buf_info *buf_info; + struct ionic_rx_stats *stats; + struct device *dev = q->dev; struct sk_buff *skb; unsigned int i; u16 frag_len; u16 len; - page_info = &desc_info->pages[0]; + stats = q_to_rx_stats(q); + + buf_info = &desc_info->bufs[0]; len = le16_to_cpu(comp->len); - prefetch(page_address(page_info->page) + NET_IP_ALIGN); + prefetch(buf_info->page); - skb = ionic_rx_skb_alloc(q, len, true); - if (unlikely(!skb)) + skb = napi_get_frags(&q_to_qcq(q)->napi); + if (unlikely(!skb)) { + net_warn_ratelimited("%s: SKB alloc failed on %s!\n", + netdev->name, q->name); + stats->alloc_err++; return NULL; + } i = comp->num_sg_elems + 1; do { - if (unlikely(!page_info->page)) { - struct napi_struct *napi = &q_to_qcq(q)->napi; - - napi->skb = NULL; + if (unlikely(!buf_info->page)) { dev_kfree_skb(skb); return NULL; } - frag_len = min(len, (u16)PAGE_SIZE); + frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset); len -= frag_len; - dma_unmap_page(dev, dma_unmap_addr(page_info, dma_addr), - PAGE_SIZE, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(dev, + buf_info->dma_addr + buf_info->page_offset, + frag_len, DMA_FROM_DEVICE); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - page_info->page, 0, frag_len, PAGE_SIZE); - page_info->page = NULL; - page_info++; + buf_info->page, buf_info->page_offset, frag_len, + IONIC_PAGE_SIZE); + + if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) { + dma_unmap_page(dev, buf_info->dma_addr, + IONIC_PAGE_SIZE, DMA_FROM_DEVICE); + ionic_rx_buf_reset(buf_info); + } + + buf_info++; + i--; } while (i > 0); @@ -114,30 +183,37 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q, struct ionic_desc_info *desc_info, - struct ionic_cq_info *cq_info) + struct ionic_rxq_comp *comp) { - struct ionic_rxq_comp *comp = cq_info->cq_desc; - struct device *dev = q->lif->ionic->dev; - struct ionic_page_info *page_info; + struct net_device *netdev = q->lif->netdev; + struct ionic_buf_info *buf_info; + struct ionic_rx_stats *stats; + struct device *dev = q->dev; struct sk_buff *skb; u16 len; - page_info = &desc_info->pages[0]; + stats = q_to_rx_stats(q); + + buf_info = &desc_info->bufs[0]; len = le16_to_cpu(comp->len); - skb = ionic_rx_skb_alloc(q, len, false); - if (unlikely(!skb)) + skb = napi_alloc_skb(&q_to_qcq(q)->napi, len); + if (unlikely(!skb)) { + net_warn_ratelimited("%s: SKB alloc failed on %s!\n", + netdev->name, q->name); + stats->alloc_err++; return NULL; + } - if (unlikely(!page_info->page)) { + if (unlikely(!buf_info->page)) { dev_kfree_skb(skb); return NULL; } - dma_sync_single_for_cpu(dev, dma_unmap_addr(page_info, dma_addr), + dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset, len, DMA_FROM_DEVICE); - skb_copy_to_linear_data(skb, page_address(page_info->page), len); - dma_sync_single_for_device(dev, dma_unmap_addr(page_info, dma_addr), + skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len); + dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset, len, DMA_FROM_DEVICE); skb_put(skb, len); @@ -151,14 +227,15 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_cq_info *cq_info, void *cb_arg) { - struct ionic_rxq_comp *comp = cq_info->cq_desc; + struct net_device *netdev = q->lif->netdev; struct ionic_qcq *qcq = q_to_qcq(q); struct ionic_rx_stats *stats; - struct net_device *netdev; + struct ionic_rxq_comp *comp; struct sk_buff *skb; + comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp); + stats = q_to_rx_stats(q); - netdev = q->lif->netdev; if (comp->status) { stats->dropped++; @@ -169,9 +246,9 @@ static void ionic_rx_clean(struct ionic_queue *q, stats->bytes += le16_to_cpu(comp->len); if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak) - skb = ionic_rx_copybreak(q, desc_info, cq_info); + skb = ionic_rx_copybreak(q, desc_info, comp); else - skb = ionic_rx_frags(q, desc_info, cq_info); + skb = ionic_rx_frags(q, desc_info, comp); if (unlikely(!skb)) { stats->dropped++; @@ -219,17 +296,39 @@ static void ionic_rx_clean(struct ionic_queue *q, stats->vlan_stripped++; } + if (unlikely(q->features & IONIC_RXQ_F_HWSTAMP)) { + __le64 *cq_desc_hwstamp; + u64 hwstamp; + + cq_desc_hwstamp = + cq_info->cq_desc + + qcq->cq.desc_size - + sizeof(struct ionic_rxq_comp) - + IONIC_HWSTAMP_CQ_NEGOFFSET; + + hwstamp = le64_to_cpu(*cq_desc_hwstamp); + + if (hwstamp != IONIC_HWSTAMP_INVALID) { + skb_hwtstamps(skb)->hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp); + stats->hwstamp_valid++; + } else { + stats->hwstamp_invalid++; + } + } + if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak) napi_gro_receive(&qcq->napi, skb); else napi_gro_frags(&qcq->napi); } -static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) +bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) { - struct ionic_rxq_comp *comp = cq_info->cq_desc; struct ionic_queue *q = cq->bound_q; struct ionic_desc_info *desc_info; + struct ionic_rxq_comp *comp; + + comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp); if (!color_match(comp->pkt_type_color, cq->done_color)) return false; @@ -253,138 +352,75 @@ static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) return true; } -static int ionic_rx_page_alloc(struct ionic_queue *q, - struct ionic_page_info *page_info) -{ - struct ionic_lif *lif = q->lif; - struct ionic_rx_stats *stats; - struct net_device *netdev; - struct device *dev; - - netdev = lif->netdev; - dev = lif->ionic->dev; - stats = q_to_rx_stats(q); - - if (unlikely(!page_info)) { - net_err_ratelimited("%s: %s invalid page_info in alloc\n", - netdev->name, q->name); - return -EINVAL; - } - - page_info->page = dev_alloc_page(); - if (unlikely(!page_info->page)) { - net_err_ratelimited("%s: %s page alloc failed\n", - netdev->name, q->name); - stats->alloc_err++; - return -ENOMEM; - } - - page_info->dma_addr = dma_map_page(dev, page_info->page, 0, PAGE_SIZE, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(dev, page_info->dma_addr))) { - put_page(page_info->page); - page_info->dma_addr = 0; - page_info->page = NULL; - net_err_ratelimited("%s: %s dma map failed\n", - netdev->name, q->name); - stats->dma_map_err++; - return -EIO; - } - - return 0; -} - -static void ionic_rx_page_free(struct ionic_queue *q, - struct ionic_page_info *page_info) -{ - struct ionic_lif *lif = q->lif; - struct net_device *netdev; - struct device *dev; - - netdev = lif->netdev; - dev = lif->ionic->dev; - - if (unlikely(!page_info)) { - net_err_ratelimited("%s: %s invalid page_info in free\n", - netdev->name, q->name); - return; - } - - if (unlikely(!page_info->page)) { - net_err_ratelimited("%s: %s invalid page in free\n", - netdev->name, q->name); - return; - } - - dma_unmap_page(dev, page_info->dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); - - put_page(page_info->page); - page_info->dma_addr = 0; - page_info->page = NULL; -} - void ionic_rx_fill(struct ionic_queue *q) { struct net_device *netdev = q->lif->netdev; struct ionic_desc_info *desc_info; - struct ionic_page_info *page_info; struct ionic_rxq_sg_desc *sg_desc; struct ionic_rxq_sg_elem *sg_elem; + struct ionic_buf_info *buf_info; struct ionic_rxq_desc *desc; unsigned int remain_len; - unsigned int seg_len; + unsigned int frag_len; unsigned int nfrags; unsigned int i, j; unsigned int len; len = netdev->mtu + ETH_HLEN + VLAN_HLEN; - nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE; for (i = ionic_q_space_avail(q); i; i--) { + nfrags = 0; remain_len = len; desc_info = &q->info[q->head_idx]; desc = desc_info->desc; - sg_desc = desc_info->sg_desc; - page_info = &desc_info->pages[0]; + buf_info = &desc_info->bufs[0]; - if (page_info->page) { /* recycle the buffer */ - ionic_rxq_post(q, false, ionic_rx_clean, NULL); - continue; - } - - /* fill main descriptor - pages[0] */ - desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG : - IONIC_RXQ_DESC_OPCODE_SIMPLE; - desc_info->npages = nfrags; - if (unlikely(ionic_rx_page_alloc(q, page_info))) { - desc->addr = 0; - desc->len = 0; - return; + if (!buf_info->page) { /* alloc a new buffer? */ + if (unlikely(ionic_rx_page_alloc(q, buf_info))) { + desc->addr = 0; + desc->len = 0; + return; + } } - desc->addr = cpu_to_le64(page_info->dma_addr); - seg_len = min_t(unsigned int, PAGE_SIZE, len); - desc->len = cpu_to_le16(seg_len); - remain_len -= seg_len; - page_info++; - /* fill sg descriptors - pages[1..n] */ - for (j = 0; j < nfrags - 1; j++) { - if (page_info->page) /* recycle the sg buffer */ - continue; + /* fill main descriptor - buf[0] */ + desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset); + frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset); + desc->len = cpu_to_le16(frag_len); + remain_len -= frag_len; + buf_info++; + nfrags++; + /* fill sg descriptors - buf[1..n] */ + sg_desc = desc_info->sg_desc; + for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) { sg_elem = &sg_desc->elems[j]; - if (unlikely(ionic_rx_page_alloc(q, page_info))) { - sg_elem->addr = 0; - sg_elem->len = 0; - return; + if (!buf_info->page) { /* alloc a new sg buffer? */ + if (unlikely(ionic_rx_page_alloc(q, buf_info))) { + sg_elem->addr = 0; + sg_elem->len = 0; + return; + } } - sg_elem->addr = cpu_to_le64(page_info->dma_addr); - seg_len = min_t(unsigned int, PAGE_SIZE, remain_len); - sg_elem->len = cpu_to_le16(seg_len); - remain_len -= seg_len; - page_info++; + + sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset); + frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE - buf_info->page_offset); + sg_elem->len = cpu_to_le16(frag_len); + remain_len -= frag_len; + buf_info++; + nfrags++; } + /* clear end sg element as a sentinel */ + if (j < q->max_sg_elems) { + sg_elem = &sg_desc->elems[j]; + memset(sg_elem, 0, sizeof(*sg_elem)); + } + + desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG : + IONIC_RXQ_DESC_OPCODE_SIMPLE; + desc_info->nbufs = nfrags; + ionic_rxq_post(q, false, ionic_rx_clean, NULL); } @@ -395,21 +431,24 @@ void ionic_rx_fill(struct ionic_queue *q) void ionic_rx_empty(struct ionic_queue *q) { struct ionic_desc_info *desc_info; - struct ionic_page_info *page_info; + struct ionic_buf_info *buf_info; unsigned int i, j; for (i = 0; i < q->num_descs; i++) { desc_info = &q->info[i]; for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) { - page_info = &desc_info->pages[j]; - if (page_info->page) - ionic_rx_page_free(q, page_info); + buf_info = &desc_info->bufs[j]; + if (buf_info->page) + ionic_rx_page_free(q, buf_info); } - desc_info->npages = 0; + desc_info->nbufs = 0; desc_info->cb = NULL; desc_info->cb_arg = NULL; } + + q->head_idx = 0; + q->tail_idx = 0; } static void ionic_dim_update(struct ionic_qcq *qcq) @@ -525,7 +564,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) idev = &lif->ionic->idev; txcq = &lif->txqcqs[qi]->cq; - tx_work_done = ionic_cq_service(txcq, lif->tx_budget, + tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT, ionic_tx_service, NULL, NULL); rx_work_done = ionic_cq_service(rxcq, budget, @@ -558,7 +597,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, void *data, size_t len) { struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct device *dev = q->lif->ionic->dev; + struct device *dev = q->dev; dma_addr_t dma_addr; dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE); @@ -576,7 +615,7 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, size_t offset, size_t len) { struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct device *dev = q->lif->ionic->dev; + struct device *dev = q->dev; dma_addr_t dma_addr; dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE); @@ -588,62 +627,130 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, return dma_addr; } +static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb, + struct ionic_desc_info *desc_info) +{ + struct ionic_buf_info *buf_info = desc_info->bufs; + struct ionic_tx_stats *stats = q_to_tx_stats(q); + struct device *dev = q->dev; + dma_addr_t dma_addr; + unsigned int nfrags; + skb_frag_t *frag; + int frag_idx; + + dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); + if (dma_mapping_error(dev, dma_addr)) { + stats->dma_map_err++; + return -EIO; + } + buf_info->dma_addr = dma_addr; + buf_info->len = skb_headlen(skb); + buf_info++; + + frag = skb_shinfo(skb)->frags; + nfrags = skb_shinfo(skb)->nr_frags; + for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) { + dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag)); + if (dma_mapping_error(dev, dma_addr)) { + stats->dma_map_err++; + goto dma_fail; + } + buf_info->dma_addr = dma_addr; + buf_info->len = skb_frag_size(frag); + buf_info++; + } + + desc_info->nbufs = 1 + nfrags; + + return 0; + +dma_fail: + /* unwind the frag mappings and the head mapping */ + while (frag_idx > 0) { + frag_idx--; + buf_info--; + dma_unmap_page(dev, buf_info->dma_addr, + buf_info->len, DMA_TO_DEVICE); + } + dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE); + return -EIO; +} + static void ionic_tx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info, struct ionic_cq_info *cq_info, void *cb_arg) { - struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc; - struct ionic_txq_sg_elem *elem = sg_desc->elems; + struct ionic_buf_info *buf_info = desc_info->bufs; struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct ionic_txq_desc *desc = desc_info->desc; - struct device *dev = q->lif->ionic->dev; - u8 opcode, flags, nsge; - u16 queue_index; + struct ionic_qcq *qcq = q_to_qcq(q); + struct sk_buff *skb = cb_arg; + struct device *dev = q->dev; unsigned int i; - u64 addr; + u16 qi; + + if (desc_info->nbufs) { + dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr, + buf_info->len, DMA_TO_DEVICE); + buf_info++; + for (i = 1; i < desc_info->nbufs; i++, buf_info++) + dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr, + buf_info->len, DMA_TO_DEVICE); + } - decode_txq_desc_cmd(le64_to_cpu(desc->cmd), - &opcode, &flags, &nsge, &addr); + if (!skb) + return; - /* use unmap_single only if either this is not TSO, - * or this is first descriptor of a TSO - */ - if (opcode != IONIC_TXQ_DESC_OPCODE_TSO || - flags & IONIC_TXQ_DESC_FLAG_TSO_SOT) - dma_unmap_single(dev, (dma_addr_t)addr, - le16_to_cpu(desc->len), DMA_TO_DEVICE); - else - dma_unmap_page(dev, (dma_addr_t)addr, - le16_to_cpu(desc->len), DMA_TO_DEVICE); - - for (i = 0; i < nsge; i++, elem++) - dma_unmap_page(dev, (dma_addr_t)le64_to_cpu(elem->addr), - le16_to_cpu(elem->len), DMA_TO_DEVICE); - - if (cb_arg) { - struct sk_buff *skb = cb_arg; - u32 len = skb->len; - - queue_index = skb_get_queue_mapping(skb); - if (unlikely(__netif_subqueue_stopped(q->lif->netdev, - queue_index))) { - netif_wake_subqueue(q->lif->netdev, queue_index); - q->wake++; + qi = skb_get_queue_mapping(skb); + + if (unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) { + if (cq_info) { + struct skb_shared_hwtstamps hwts = {}; + __le64 *cq_desc_hwstamp; + u64 hwstamp; + + cq_desc_hwstamp = + cq_info->cq_desc + + qcq->cq.desc_size - + sizeof(struct ionic_txq_comp) - + IONIC_HWSTAMP_CQ_NEGOFFSET; + + hwstamp = le64_to_cpu(*cq_desc_hwstamp); + + if (hwstamp != IONIC_HWSTAMP_INVALID) { + hwts.hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp); + + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + skb_tstamp_tx(skb, &hwts); + + stats->hwstamp_valid++; + } else { + stats->hwstamp_invalid++; + } } - dev_kfree_skb_any(skb); - stats->clean++; - netdev_tx_completed_queue(q_to_ndq(q), 1, len); + + } else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) { + netif_wake_subqueue(q->lif->netdev, qi); + q->wake++; } + + desc_info->bytes = skb->len; + stats->clean++; + + dev_consume_skb_any(skb); } -static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) +bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) { - struct ionic_txq_comp *comp = cq_info->cq_desc; struct ionic_queue *q = cq->bound_q; struct ionic_desc_info *desc_info; + struct ionic_txq_comp *comp; + int bytes = 0; + int pkts = 0; u16 index; + comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp); + if (!color_match(comp->color, cq->done_color)) return false; @@ -652,13 +759,21 @@ static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) */ do { desc_info = &q->info[q->tail_idx]; + desc_info->bytes = 0; index = q->tail_idx; q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg); + if (desc_info->cb_arg) { + pkts++; + bytes += desc_info->bytes; + } desc_info->cb = NULL; desc_info->cb_arg = NULL; } while (index != le16_to_cpu(comp->comp_index)); + if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) + netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes); + return true; } @@ -677,15 +792,25 @@ void ionic_tx_flush(struct ionic_cq *cq) void ionic_tx_empty(struct ionic_queue *q) { struct ionic_desc_info *desc_info; + int bytes = 0; + int pkts = 0; /* walk the not completed tx entries, if any */ while (q->head_idx != q->tail_idx) { desc_info = &q->info[q->tail_idx]; + desc_info->bytes = 0; q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg); + if (desc_info->cb_arg) { + pkts++; + bytes += desc_info->bytes; + } desc_info->cb = NULL; desc_info->cb_arg = NULL; } + + if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) + netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes); } static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb) @@ -756,50 +881,34 @@ static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc desc->hdr_len = cpu_to_le16(hdrlen); desc->mss = cpu_to_le16(mss); - if (done) { + if (start) { skb_tx_timestamp(skb); - netdev_tx_sent_queue(q_to_ndq(q), skb->len); - ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb); + if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) + netdev_tx_sent_queue(q_to_ndq(q), skb->len); + ionic_txq_post(q, false, ionic_tx_clean, skb); } else { - ionic_txq_post(q, false, ionic_tx_clean, NULL); + ionic_txq_post(q, done, NULL, NULL); } } -static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q, - struct ionic_txq_sg_elem **elem) -{ - struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc; - struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc; - - *elem = sg_desc->elems; - return desc; -} - static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) { struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct ionic_desc_info *rewind_desc_info; - struct device *dev = q->lif->ionic->dev; + struct ionic_desc_info *desc_info; + struct ionic_buf_info *buf_info; struct ionic_txq_sg_elem *elem; struct ionic_txq_desc *desc; - unsigned int frag_left = 0; - unsigned int offset = 0; - u16 abort = q->head_idx; - unsigned int len_left; + unsigned int chunk_len; + unsigned int frag_rem; + unsigned int tso_rem; + unsigned int seg_rem; dma_addr_t desc_addr; + dma_addr_t frag_addr; unsigned int hdrlen; - unsigned int nfrags; - unsigned int seglen; - u64 total_bytes = 0; - u64 total_pkts = 0; - u16 rewind = abort; - unsigned int left; unsigned int len; unsigned int mss; - skb_frag_t *frag; bool start, done; bool outer_csum; - dma_addr_t addr; bool has_vlan; u16 desc_len; u8 desc_nsge; @@ -807,9 +916,14 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) bool encap; int err; + desc_info = &q->info[q->head_idx]; + buf_info = desc_info->bufs; + + if (unlikely(ionic_tx_map_skb(q, skb, desc_info))) + return -EIO; + + len = skb->len; mss = skb_shinfo(skb)->gso_size; - nfrags = skb_shinfo(skb)->nr_frags; - len_left = skb->len - skb_headlen(skb); outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) || (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); has_vlan = !!skb_vlan_tag_present(skb); @@ -834,125 +948,75 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) else hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); - seglen = hdrlen + mss; - left = skb_headlen(skb); + tso_rem = len; + seg_rem = min(tso_rem, hdrlen + mss); - desc = ionic_tx_tso_next(q, &elem); - start = true; + frag_addr = 0; + frag_rem = 0; - /* Chop skb->data up into desc segments */ + start = true; - while (left > 0) { - len = min(seglen, left); - frag_left = seglen - len; - desc_addr = ionic_tx_map_single(q, skb->data + offset, len); - if (dma_mapping_error(dev, desc_addr)) - goto err_out_abort; - desc_len = len; + while (tso_rem > 0) { + desc = NULL; + elem = NULL; + desc_addr = 0; + desc_len = 0; desc_nsge = 0; - left -= len; - offset += len; - if (nfrags > 0 && frag_left > 0) - continue; - done = (nfrags == 0 && left == 0); - ionic_tx_tso_post(q, desc, skb, - desc_addr, desc_nsge, desc_len, - hdrlen, mss, - outer_csum, - vlan_tci, has_vlan, - start, done); - total_pkts++; - total_bytes += start ? len : len + hdrlen; - desc = ionic_tx_tso_next(q, &elem); - start = false; - seglen = mss; - } - - /* Chop skb frags into desc segments */ - - for (frag = skb_shinfo(skb)->frags; len_left; frag++) { - offset = 0; - left = skb_frag_size(frag); - len_left -= left; - nfrags--; - stats->frags++; - - while (left > 0) { - if (frag_left > 0) { - len = min(frag_left, left); - frag_left -= len; - addr = ionic_tx_map_frag(q, frag, offset, len); - if (dma_mapping_error(dev, addr)) - goto err_out_abort; - elem->addr = cpu_to_le64(addr); - elem->len = cpu_to_le16(len); + /* use fragments until we have enough to post a single descriptor */ + while (seg_rem > 0) { + /* if the fragment is exhausted then move to the next one */ + if (frag_rem == 0) { + /* grab the next fragment */ + frag_addr = buf_info->dma_addr; + frag_rem = buf_info->len; + buf_info++; + } + chunk_len = min(frag_rem, seg_rem); + if (!desc) { + /* fill main descriptor */ + desc = desc_info->txq_desc; + elem = desc_info->txq_sg_desc->elems; + desc_addr = frag_addr; + desc_len = chunk_len; + } else { + /* fill sg descriptor */ + elem->addr = cpu_to_le64(frag_addr); + elem->len = cpu_to_le16(chunk_len); elem++; desc_nsge++; - left -= len; - offset += len; - if (nfrags > 0 && frag_left > 0) - continue; - done = (nfrags == 0 && left == 0); - ionic_tx_tso_post(q, desc, skb, desc_addr, - desc_nsge, desc_len, - hdrlen, mss, outer_csum, - vlan_tci, has_vlan, - start, done); - total_pkts++; - total_bytes += start ? len : len + hdrlen; - desc = ionic_tx_tso_next(q, &elem); - start = false; - } else { - len = min(mss, left); - frag_left = mss - len; - desc_addr = ionic_tx_map_frag(q, frag, - offset, len); - if (dma_mapping_error(dev, desc_addr)) - goto err_out_abort; - desc_len = len; - desc_nsge = 0; - left -= len; - offset += len; - if (nfrags > 0 && frag_left > 0) - continue; - done = (nfrags == 0 && left == 0); - ionic_tx_tso_post(q, desc, skb, desc_addr, - desc_nsge, desc_len, - hdrlen, mss, outer_csum, - vlan_tci, has_vlan, - start, done); - total_pkts++; - total_bytes += start ? len : len + hdrlen; - desc = ionic_tx_tso_next(q, &elem); - start = false; } + frag_addr += chunk_len; + frag_rem -= chunk_len; + tso_rem -= chunk_len; + seg_rem -= chunk_len; } + seg_rem = min(tso_rem, mss); + done = (tso_rem == 0); + /* post descriptor */ + ionic_tx_tso_post(q, desc, skb, + desc_addr, desc_nsge, desc_len, + hdrlen, mss, outer_csum, vlan_tci, has_vlan, + start, done); + start = false; + /* Buffer information is stored with the first tso descriptor */ + desc_info = &q->info[q->head_idx]; + desc_info->nbufs = 0; } - stats->pkts += total_pkts; - stats->bytes += total_bytes; + stats->pkts += DIV_ROUND_UP(len - hdrlen, mss); + stats->bytes += len; stats->tso++; - stats->tso_bytes += total_bytes; + stats->tso_bytes = len; return 0; - -err_out_abort: - while (rewind != q->head_idx) { - rewind_desc_info = &q->info[rewind]; - ionic_tx_clean(q, rewind_desc_info, NULL, NULL); - rewind = (rewind + 1) & (q->num_descs - 1); - } - q->head_idx = abort; - - return -ENOMEM; } -static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb) +static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb, + struct ionic_desc_info *desc_info) { - struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc; + struct ionic_txq_desc *desc = desc_info->txq_desc; + struct ionic_buf_info *buf_info = desc_info->bufs; struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct device *dev = q->lif->ionic->dev; - dma_addr_t dma_addr; bool has_vlan; u8 flags = 0; bool encap; @@ -961,23 +1025,22 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb) has_vlan = !!skb_vlan_tag_present(skb); encap = skb->encapsulation; - dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); - if (dma_mapping_error(dev, dma_addr)) - return -ENOMEM; - flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL, - flags, skb_shinfo(skb)->nr_frags, dma_addr); + flags, skb_shinfo(skb)->nr_frags, + buf_info->dma_addr); desc->cmd = cpu_to_le64(cmd); - desc->len = cpu_to_le16(skb_headlen(skb)); - desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb)); - desc->csum_offset = cpu_to_le16(skb->csum_offset); + desc->len = cpu_to_le16(buf_info->len); if (has_vlan) { desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); stats->vlan_inserted++; + } else { + desc->vlan_tci = 0; } + desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb)); + desc->csum_offset = cpu_to_le16(skb->csum_offset); if (skb_csum_is_sctp(skb)) stats->crc32_csum++; @@ -987,12 +1050,12 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb) return 0; } -static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb) +static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb, + struct ionic_desc_info *desc_info) { - struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc; + struct ionic_txq_desc *desc = desc_info->txq_desc; + struct ionic_buf_info *buf_info = desc_info->bufs; struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct device *dev = q->lif->ionic->dev; - dma_addr_t dma_addr; bool has_vlan; u8 flags = 0; bool encap; @@ -1001,67 +1064,66 @@ static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb) has_vlan = !!skb_vlan_tag_present(skb); encap = skb->encapsulation; - dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); - if (dma_mapping_error(dev, dma_addr)) - return -ENOMEM; - flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE, - flags, skb_shinfo(skb)->nr_frags, dma_addr); + flags, skb_shinfo(skb)->nr_frags, + buf_info->dma_addr); desc->cmd = cpu_to_le64(cmd); - desc->len = cpu_to_le16(skb_headlen(skb)); + desc->len = cpu_to_le16(buf_info->len); if (has_vlan) { desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); stats->vlan_inserted++; + } else { + desc->vlan_tci = 0; } + desc->csum_start = 0; + desc->csum_offset = 0; stats->csum_none++; return 0; } -static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb) +static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb, + struct ionic_desc_info *desc_info) { - struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc; - unsigned int len_left = skb->len - skb_headlen(skb); + struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc; + struct ionic_buf_info *buf_info = &desc_info->bufs[1]; struct ionic_txq_sg_elem *elem = sg_desc->elems; struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct device *dev = q->lif->ionic->dev; - dma_addr_t dma_addr; - skb_frag_t *frag; - u16 len; + unsigned int i; - for (frag = skb_shinfo(skb)->frags; len_left; frag++, elem++) { - len = skb_frag_size(frag); - elem->len = cpu_to_le16(len); - dma_addr = ionic_tx_map_frag(q, frag, 0, len); - if (dma_mapping_error(dev, dma_addr)) - return -ENOMEM; - elem->addr = cpu_to_le64(dma_addr); - len_left -= len; - stats->frags++; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) { + elem->addr = cpu_to_le64(buf_info->dma_addr); + elem->len = cpu_to_le16(buf_info->len); } + stats->frags += skb_shinfo(skb)->nr_frags; + return 0; } static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb) { + struct ionic_desc_info *desc_info = &q->info[q->head_idx]; struct ionic_tx_stats *stats = q_to_tx_stats(q); int err; + if (unlikely(ionic_tx_map_skb(q, skb, desc_info))) + return -EIO; + /* set up the initial descriptor */ if (skb->ip_summed == CHECKSUM_PARTIAL) - err = ionic_tx_calc_csum(q, skb); + err = ionic_tx_calc_csum(q, skb, desc_info); else - err = ionic_tx_calc_no_csum(q, skb); + err = ionic_tx_calc_no_csum(q, skb, desc_info); if (err) return err; /* add frags */ - err = ionic_tx_skb_frags(q, skb); + err = ionic_tx_skb_frags(q, skb, desc_info); if (err) return err; @@ -1069,7 +1131,8 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb) stats->pkts++; stats->bytes += skb->len; - netdev_tx_sent_queue(q_to_ndq(q), skb->len); + if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) + netdev_tx_sent_queue(q_to_ndq(q), skb->len); ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb); return 0; @@ -1077,7 +1140,6 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb) static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) { - int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems; struct ionic_tx_stats *stats = q_to_tx_stats(q); int ndescs; int err; @@ -1088,7 +1150,8 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) else ndescs = 1; - if (skb_shinfo(skb)->nr_frags <= sg_elems) + /* If non-TSO, just need 1 desc and nr_frags sg elems */ + if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems) return ndescs; /* Too many frags, so linearize */ @@ -1121,6 +1184,42 @@ static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs) return stopped; } +static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_queue *q = &lif->hwstamp_txq->q; + int err, ndescs; + + /* Does not stop/start txq, because we post to a separate tx queue + * for timestamping, and if a packet can't be posted immediately to + * the timestamping queue, it is dropped. + */ + + ndescs = ionic_tx_descs_needed(q, skb); + if (unlikely(ndescs < 0)) + goto err_out_drop; + + if (unlikely(!ionic_q_has_space(q, ndescs))) + goto err_out_drop; + + skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP; + if (skb_is_gso(skb)) + err = ionic_tx_tso(q, skb); + else + err = ionic_tx(q, skb); + + if (err) + goto err_out_drop; + + return NETDEV_TX_OK; + +err_out_drop: + q->drop++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev) { u16 queue_index = skb_get_queue_mapping(skb); @@ -1134,6 +1233,10 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; } + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) + if (lif->hwstamp_txq && lif->phc->ts_config_tx_mode) + return ionic_start_hwstamp_xmit(skb, netdev); + if (unlikely(queue_index >= lif->nxqs)) queue_index = 0; q = &lif->txqcqs[queue_index]->q; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h index 7667b72232b8..d7cbaad8a6fb 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h @@ -14,4 +14,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget); int ionic_txrx_napi(struct napi_struct *napi, int budget); netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev); +bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); +bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); + #endif /* _IONIC_TXRX_H_ */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 07824bf9d68d..dfaf10edfabf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -396,6 +396,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, tpa_param->tpa_ipv6_en_flg = 1; tpa_param->tpa_pkt_split_flg = 1; tpa_param->tpa_gro_consistent_flg = 1; + break; default: break; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index b8dc5c4591ef..ed2b6fe5a78d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -4734,6 +4734,7 @@ void qed_inform_vf_link_state(struct qed_hwfn *hwfn) */ link.speed = (hwfn->cdev->num_hwfns > 1) ? 100000 : 40000; + break; default: /* In auto mode pass PF link image to VF */ break; diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 8c47a9d2a965..8e150dd4f899 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -345,7 +345,7 @@ int qede_xdp_transmit(struct net_device *dev, int n_frames, struct qede_tx_queue *xdp_tx; struct xdp_frame *xdpf; dma_addr_t mapping; - int i, drops = 0; + int i, nxmit = 0; u16 xdp_prod; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) @@ -364,18 +364,13 @@ int qede_xdp_transmit(struct net_device *dev, int n_frames, mapping = dma_map_single(dmadev, xdpf->data, xdpf->len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dmadev, mapping))) { - xdp_return_frame_rx_napi(xdpf); - drops++; - - continue; - } + if (unlikely(dma_mapping_error(dmadev, mapping))) + break; if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len, - NULL, xdpf))) { - xdp_return_frame_rx_napi(xdpf); - drops++; - } + NULL, xdpf))) + break; + nxmit++; } if (flags & XDP_XMIT_FLUSH) { @@ -387,7 +382,7 @@ int qede_xdp_transmit(struct net_device *dev, int n_frames, spin_unlock(&xdp_tx->xdp_tx_lock); - return n_frames - drops; + return nxmit; } int qede_txq_has_work(struct qede_tx_queue *txq) @@ -1214,12 +1209,9 @@ static int qede_rx_build_jumbo(struct qede_dev *edev, dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, DMA_FROM_DEVICE); - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, - bd->data, rxq->rx_headroom, cur_size); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, bd->data, + rxq->rx_headroom, cur_size, PAGE_SIZE); - skb->truesize += PAGE_SIZE; - skb->data_len += cur_size; - skb->len += cur_size; pkt_len -= cur_size; } diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 4d952036ba82..01ac1e93d27a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -7,7 +7,6 @@ #include <linux/crash_dump.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/version.h> #include <linux/device.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 117188e3c7de..87b8c032195d 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -1437,6 +1437,7 @@ netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt, { struct emac_tpd tpd; u32 prod_idx; + int len; memset(&tpd, 0, sizeof(tpd)); @@ -1456,9 +1457,10 @@ netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt, if (skb_network_offset(skb) != ETH_HLEN) TPD_TYP_SET(&tpd, 1); + len = skb->len; emac_tx_fill_tpd(adpt, tx_q, skb, &tpd); - netdev_sent_queue(adpt->netdev, skb->len); + netdev_sent_queue(adpt->netdev, len); /* Make sure the are enough free descriptors to hold one * maximum-sized SKB. We need one desc for each fragment, diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 5a3b65a6eb4f..ab9b02574a15 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -885,7 +885,7 @@ qca_spi_probe(struct spi_device *spi) struct net_device *qcaspi_devs = NULL; u8 legacy_mode = 0; u16 signature; - const char *mac; + int ret; if (!spi->dev.of_node) { dev_err(&spi->dev, "Missing device tree\n"); @@ -962,12 +962,8 @@ qca_spi_probe(struct spi_device *spi) spi_set_drvdata(spi, qcaspi_devs); - mac = of_get_mac_address(spi->dev.of_node); - - if (!IS_ERR(mac)) - ether_addr_copy(qca->net_dev->dev_addr, mac); - - if (!is_valid_ether_addr(qca->net_dev->dev_addr)) { + ret = of_get_mac_address(spi->dev.of_node, qca->net_dev->dev_addr); + if (ret) { eth_hw_addr_random(qca->net_dev); dev_info(&spi->dev, "Using random MAC address: %pM\n", qca->net_dev->dev_addr); diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c index 362b4f5c162c..bcdeca7b3366 100644 --- a/drivers/net/ethernet/qualcomm/qca_uart.c +++ b/drivers/net/ethernet/qualcomm/qca_uart.c @@ -323,7 +323,6 @@ static int qca_uart_probe(struct serdev_device *serdev) { struct net_device *qcauart_dev = alloc_etherdev(sizeof(struct qcauart)); struct qcauart *qca; - const char *mac; u32 speed = 115200; int ret; @@ -348,12 +347,8 @@ static int qca_uart_probe(struct serdev_device *serdev) of_property_read_u32(serdev->dev.of_node, "current-speed", &speed); - mac = of_get_mac_address(serdev->dev.of_node); - - if (!IS_ERR(mac)) - ether_addr_copy(qca->net_dev->dev_addr, mac); - - if (!is_valid_ether_addr(qca->net_dev->dev_addr)) { + ret = of_get_mac_address(serdev->dev.of_node, qca->net_dev->dev_addr); + if (ret) { eth_hw_addr_random(qca->net_dev); dev_info(&serdev->dev, "Using random MAC address: %pM\n", qca->net_dev->dev_addr); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index 3d00b3232308..0be5ac7ab261 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -56,20 +56,22 @@ static void __rmnet_map_ingress_handler(struct sk_buff *skb, struct rmnet_port *port) { + struct rmnet_map_header *map_header = (void *)skb->data; struct rmnet_endpoint *ep; u16 len, pad; u8 mux_id; - if (RMNET_MAP_GET_CD_BIT(skb)) { + if (map_header->flags & MAP_CMD_FLAG) { + /* Packet contains a MAP command (not data) */ if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS) return rmnet_map_command(skb, port); goto free_skb; } - mux_id = RMNET_MAP_GET_MUX_ID(skb); - pad = RMNET_MAP_GET_PAD(skb); - len = RMNET_MAP_GET_LENGTH(skb) - pad; + mux_id = map_header->mux_id; + pad = map_header->flags & MAP_PAD_LEN_MASK; + len = ntohs(map_header->pkt_len) - pad; if (mux_id >= RMNET_MAX_LOGICAL_EP) goto free_skb; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h index 576501db2a0b..2aea153f4247 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h @@ -32,18 +32,6 @@ enum rmnet_map_commands { RMNET_MAP_COMMAND_ENUM_LENGTH }; -#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \ - (Y)->data)->mux_id) -#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \ - (Y)->data)->cd_bit) -#define RMNET_MAP_GET_PAD(Y) (((struct rmnet_map_header *) \ - (Y)->data)->pad_len) -#define RMNET_MAP_GET_CMD_START(Y) ((struct rmnet_map_control_command *) \ - ((Y)->data + \ - sizeof(struct rmnet_map_header))) -#define RMNET_MAP_GET_LENGTH(Y) (ntohs(((struct rmnet_map_header *) \ - (Y)->data)->pkt_len)) - #define RMNET_MAP_COMMAND_REQUEST 0 #define RMNET_MAP_COMMAND_ACK 1 #define RMNET_MAP_COMMAND_UNSUPPORTED 2 diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c index beaee4962128..add0f5ade2e6 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c @@ -12,12 +12,13 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb, struct rmnet_port *port, int enable) { + struct rmnet_map_header *map_header = (void *)skb->data; struct rmnet_endpoint *ep; struct net_device *vnd; u8 mux_id; int r; - mux_id = RMNET_MAP_GET_MUX_ID(skb); + mux_id = map_header->mux_id; if (mux_id >= RMNET_MAX_LOGICAL_EP) { kfree_skb(skb); @@ -49,6 +50,7 @@ static void rmnet_map_send_ack(struct sk_buff *skb, unsigned char type, struct rmnet_port *port) { + struct rmnet_map_header *map_header = (void *)skb->data; struct rmnet_map_control_command *cmd; struct net_device *dev = skb->dev; @@ -58,7 +60,8 @@ static void rmnet_map_send_ack(struct sk_buff *skb, skb->protocol = htons(ETH_P_MAP); - cmd = RMNET_MAP_GET_CMD_START(skb); + /* Command data immediately follows the MAP header */ + cmd = (struct rmnet_map_control_command *)(map_header + 1); cmd->cmd_type = type & 0x03; netif_tx_lock(dev); @@ -71,11 +74,13 @@ static void rmnet_map_send_ack(struct sk_buff *skb, */ void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port) { + struct rmnet_map_header *map_header = (void *)skb->data; struct rmnet_map_control_command *cmd; unsigned char command_name; unsigned char rc = 0; - cmd = RMNET_MAP_GET_CMD_START(skb); + /* Command data immediately follows the MAP header */ + cmd = (struct rmnet_map_control_command *)(map_header + 1); command_name = cmd->command_name; switch (command_name) { diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c index 21d38167f961..0ac2ff828320 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c @@ -197,22 +197,16 @@ rmnet_map_ipv4_ul_csum_header(void *iphdr, struct rmnet_map_ul_csum_header *ul_header, struct sk_buff *skb) { - struct iphdr *ip4h = (struct iphdr *)iphdr; - __be16 *hdr = (__be16 *)ul_header, offset; + struct iphdr *ip4h = iphdr; + u16 val; - offset = htons((__force u16)(skb_transport_header(skb) - - (unsigned char *)iphdr)); - ul_header->csum_start_offset = offset; - ul_header->csum_insert_offset = skb->csum_offset; - ul_header->csum_enabled = 1; + val = MAP_CSUM_UL_ENABLED_FLAG; if (ip4h->protocol == IPPROTO_UDP) - ul_header->udp_ind = 1; - else - ul_header->udp_ind = 0; + val |= MAP_CSUM_UL_UDP_FLAG; + val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK; - /* Changing remaining fields to network order */ - hdr++; - *hdr = htons((__force u16)*hdr); + ul_header->csum_start_offset = htons(skb_network_header_len(skb)); + ul_header->csum_info = htons(val); skb->ip_summed = CHECKSUM_NONE; @@ -239,23 +233,16 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr, struct rmnet_map_ul_csum_header *ul_header, struct sk_buff *skb) { - struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr; - __be16 *hdr = (__be16 *)ul_header, offset; - - offset = htons((__force u16)(skb_transport_header(skb) - - (unsigned char *)ip6hdr)); - ul_header->csum_start_offset = offset; - ul_header->csum_insert_offset = skb->csum_offset; - ul_header->csum_enabled = 1; + struct ipv6hdr *ip6h = ip6hdr; + u16 val; + val = MAP_CSUM_UL_ENABLED_FLAG; if (ip6h->nexthdr == IPPROTO_UDP) - ul_header->udp_ind = 1; - else - ul_header->udp_ind = 0; + val |= MAP_CSUM_UL_UDP_FLAG; + val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK; - /* Changing remaining fields to network order */ - hdr++; - *hdr = htons((__force u16)*hdr); + ul_header->csum_start_offset = htons(skb_network_header_len(skb)); + ul_header->csum_info = htons(val); skb->ip_summed = CHECKSUM_NONE; @@ -284,6 +271,7 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, return map_header; } + BUILD_BUG_ON(MAP_PAD_LEN_MASK < 3); padding = ALIGN(map_datalen, 4) - map_datalen; if (padding == 0) @@ -297,7 +285,8 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, done: map_header->pkt_len = htons(map_datalen + padding); - map_header->pad_len = padding & 0x3F; + /* This is a data packet, so the CMD bit is 0 */ + map_header->flags = padding & MAP_PAD_LEN_MASK; return map_header; } @@ -319,7 +308,7 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, return NULL; maph = (struct rmnet_map_header *)skb->data; - packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header); + packet_len = ntohs(maph->pkt_len) + sizeof(*maph); if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) packet_len += sizeof(struct rmnet_map_dl_csum_trailer); @@ -328,7 +317,7 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, return NULL; /* Some hardware can send us empty frames. Catch them */ - if (ntohs(maph->pkt_len) == 0) + if (!maph->pkt_len) return NULL; skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC); @@ -361,7 +350,7 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len) csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len); - if (!csum_trailer->valid) { + if (!(csum_trailer->flags & MAP_CSUM_DL_VALID_FLAG)) { priv->stats.csum_valid_unset++; return -EINVAL; } @@ -421,10 +410,7 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, } sw_csum: - ul_header->csum_start_offset = 0; - ul_header->csum_insert_offset = 0; - ul_header->csum_enabled = 0; - ul_header->udp_ind = 0; + memset(ul_header, 0, sizeof(*ul_header)); priv->stats.csum_sw++; } diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 1df2c002c9f6..3e86fbe21431 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -1586,12 +1586,10 @@ DECLARE_RTL_COND(rtl_counters_cond) static void rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd) { - dma_addr_t paddr = tp->counters_phys_addr; - u32 cmd; + u32 cmd = lower_32_bits(tp->counters_phys_addr); - RTL_W32(tp, CounterAddrHigh, (u64)paddr >> 32); + RTL_W32(tp, CounterAddrHigh, upper_32_bits(tp->counters_phys_addr)); rtl_pci_commit(tp); - cmd = (u64)paddr & DMA_BIT_MASK(32); RTL_W32(tp, CounterAddrLow, cmd); RTL_W32(tp, CounterAddrLow, cmd | counter_cmd); @@ -1903,6 +1901,41 @@ static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data) return ret; } +static void rtl8169_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *data) +{ + data->rx_max_pending = NUM_RX_DESC; + data->rx_pending = NUM_RX_DESC; + data->tx_max_pending = NUM_TX_DESC; + data->tx_pending = NUM_TX_DESC; +} + +static void rtl8169_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + bool tx_pause, rx_pause; + + phy_get_pause(tp->phydev, &tx_pause, &rx_pause); + + data->autoneg = tp->phydev->autoneg; + data->tx_pause = tx_pause ? 1 : 0; + data->rx_pause = rx_pause ? 1 : 0; +} + +static int rtl8169_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (dev->mtu > ETH_DATA_LEN) + return -EOPNOTSUPP; + + phy_set_asym_pause(tp->phydev, data->rx_pause, data->tx_pause); + + return 0; +} + static const struct ethtool_ops rtl8169_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES, @@ -1923,6 +1956,9 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .set_eee = rtl8169_set_eee, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_ringparam = rtl8169_get_ringparam, + .get_pauseparam = rtl8169_get_pauseparam, + .set_pauseparam = rtl8169_set_pauseparam, }; static void rtl_enable_eee(struct rtl8169_private *tp) @@ -2352,11 +2388,13 @@ static void rtl_jumbo_config(struct rtl8169_private *tp) pcie_set_readrq(tp->pci_dev, readrq); /* Chip doesn't support pause in jumbo mode */ - linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, - tp->phydev->advertising, !jumbo); - linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, - tp->phydev->advertising, !jumbo); - phy_start_aneg(tp->phydev); + if (jumbo) { + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, + tp->phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + tp->phydev->advertising); + phy_start_aneg(tp->phydev); + } } DECLARE_RTL_COND(rtl_chipcmd_cond) @@ -2735,11 +2773,6 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) __rtl_hw_start_8168cp(tp); } -static void rtl_hw_start_8168c_3(struct rtl8169_private *tp) -{ - rtl_hw_start_8168c_2(tp); -} - static void rtl_hw_start_8168c_4(struct rtl8169_private *tp) { rtl_set_def_aspm_entry_latency(tp); @@ -3652,7 +3685,7 @@ static void rtl_hw_config(struct rtl8169_private *tp) [RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1, [RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1, [RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2, - [RTL_GIGA_MAC_VER_21] = rtl_hw_start_8168c_3, + [RTL_GIGA_MAC_VER_21] = rtl_hw_start_8168c_2, [RTL_GIGA_MAC_VER_22] = rtl_hw_start_8168c_4, [RTL_GIGA_MAC_VER_23] = rtl_hw_start_8168cp_2, [RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3, @@ -4662,6 +4695,7 @@ static void rtl8169_down(struct rtl8169_private *tp) static void rtl8169_up(struct rtl8169_private *tp) { pci_set_master(tp->pci_dev); + phy_init_hw(tp->phydev); phy_resume(tp->phydev); rtl8169_init_phy(tp); napi_enable(&tp->napi); @@ -5087,6 +5121,10 @@ static int r8169_mdio_register(struct rtl8169_private *tp) return -EUNATCH; } + tp->phydev->mac_managed_pm = 1; + + phy_support_asym_pause(tp->phydev); + /* PHY will be woken up in rtl_open() */ phy_suspend(tp->phydev); diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index cb47e68c1a3e..86a1eb0634e8 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -993,6 +993,7 @@ struct ravb_private { struct platform_device *pdev; void __iomem *addr; struct clk *clk; + struct clk *refclk; struct mdiobb_ctrl mdiobb; u32 num_rx_ring[NUM_RX_QUEUE]; u32 num_tx_ring[NUM_TX_QUEUE]; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index eb0c03bdb12d..4afff320dfd0 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -109,11 +109,13 @@ static void ravb_set_buffer_align(struct sk_buff *skb) * Ethernet AVB device doesn't have ROM for MAC address. * This function gets the MAC address that was used by a bootloader. */ -static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac) +static void ravb_read_mac_address(struct device_node *np, + struct net_device *ndev) { - if (!IS_ERR(mac)) { - ether_addr_copy(ndev->dev_addr, mac); - } else { + int ret; + + ret = of_get_mac_address(np, ndev->dev_addr); + if (ret) { u32 mahr = ravb_read(ndev, MAHR); u32 malr = ravb_read(ndev, MALR); @@ -911,31 +913,20 @@ static int ravb_poll(struct napi_struct *napi, int budget) int q = napi - priv->napi; int mask = BIT(q); int quota = budget; - u32 ris0, tis; - for (;;) { - tis = ravb_read(ndev, TIS); - ris0 = ravb_read(ndev, RIS0); - if (!((ris0 & mask) || (tis & mask))) - break; + /* Processing RX Descriptor Ring */ + /* Clear RX interrupt */ + ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); + if (ravb_rx(ndev, "a, q)) + goto out; - /* Processing RX Descriptor Ring */ - if (ris0 & mask) { - /* Clear RX interrupt */ - ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); - if (ravb_rx(ndev, "a, q)) - goto out; - } - /* Processing TX Descriptor Ring */ - if (tis & mask) { - spin_lock_irqsave(&priv->lock, flags); - /* Clear TX interrupt */ - ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); - ravb_tx_free(ndev, q, true); - netif_wake_subqueue(ndev, q); - spin_unlock_irqrestore(&priv->lock, flags); - } - } + /* Processing RX Descriptor Ring */ + spin_lock_irqsave(&priv->lock, flags); + /* Clear TX interrupt */ + ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); + ravb_tx_free(ndev, q, true); + netif_wake_subqueue(ndev, q); + spin_unlock_irqrestore(&priv->lock, flags); napi_complete(napi); @@ -2148,6 +2139,13 @@ static int ravb_probe(struct platform_device *pdev) goto out_release; } + priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk"); + if (IS_ERR(priv->refclk)) { + error = PTR_ERR(priv->refclk); + goto out_release; + } + clk_prepare_enable(priv->refclk); + ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); ndev->min_mtu = ETH_MIN_MTU; @@ -2164,7 +2162,7 @@ static int ravb_probe(struct platform_device *pdev) /* Set GTI value */ error = ravb_set_gti(ndev); if (error) - goto out_release; + goto out_disable_refclk; /* Request GTI loading */ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); @@ -2183,7 +2181,7 @@ static int ravb_probe(struct platform_device *pdev) "Cannot allocate desc base address table (size %d bytes)\n", priv->desc_bat_size); error = -ENOMEM; - goto out_release; + goto out_disable_refclk; } for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++) priv->desc_bat[q].die_dt = DT_EOS; @@ -2200,7 +2198,7 @@ static int ravb_probe(struct platform_device *pdev) priv->msg_enable = RAVB_DEF_MSG_ENABLE; /* Read and set MAC address */ - ravb_read_mac_address(ndev, of_get_mac_address(np)); + ravb_read_mac_address(np, ndev); if (!is_valid_ether_addr(ndev->dev_addr)) { dev_warn(&pdev->dev, "no valid MAC address supplied, using a random one\n"); @@ -2243,6 +2241,8 @@ out_dma_free: /* Stop PTP Clock driver */ if (chip_id != RCAR_GEN2) ravb_ptp_stop(ndev); +out_disable_refclk: + clk_disable_unprepare(priv->refclk); out_release: free_netdev(ndev); @@ -2260,6 +2260,8 @@ static int ravb_remove(struct platform_device *pdev) if (priv->chip_id != RCAR_GEN2) ravb_ptp_stop(ndev); + clk_disable_unprepare(priv->refclk); + dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, priv->desc_bat_dma); /* Set reset mode */ diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index f029c7c03804..c5b154868c1f 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -560,7 +560,7 @@ static struct sh_eth_cpu_data r7s72100_data = { EESR_TDE, .fdr_value = 0x0000070f, - .trscer_err_mask = DESC_I_RINT8 | DESC_I_RINT5, + .trscer_err_mask = TRSCER_RMAFCE | TRSCER_RRFCE, .no_psr = 1, .apr = 1, @@ -701,7 +701,7 @@ static struct sh_eth_cpu_data rcar_gen2_data = { EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, .fdr_value = 0x00000f0f, - .trscer_err_mask = DESC_I_RINT8, + .trscer_err_mask = TRSCER_RMAFCE, .apr = 1, .mpr = 1, @@ -782,7 +782,7 @@ static struct sh_eth_cpu_data r7s9210_data = { .fdr_value = 0x0000070f, - .trscer_err_mask = DESC_I_RINT8 | DESC_I_RINT5, + .trscer_err_mask = TRSCER_RMAFCE | TRSCER_RRFCE, .apr = 1, .mpr = 1, @@ -1094,7 +1094,7 @@ static struct sh_eth_cpu_data sh771x_data = { EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, - .trscer_err_mask = DESC_I_RINT8, + .trscer_err_mask = TRSCER_RMAFCE, .tsu = 1, .dual_port = 1, @@ -1749,7 +1749,7 @@ static void sh_eth_emac_interrupt(struct net_device *ndev) link_stat = sh_eth_read(ndev, PSR); if (mdp->ether_link_active_low) link_stat = ~link_stat; - if (!(link_stat & PHY_ST_LINK)) { + if (!(link_stat & PSR_LMON)) { sh_eth_rcv_snd_disable(ndev); } else { /* Link Up */ @@ -3170,7 +3170,6 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev) struct device_node *np = dev->of_node; struct sh_eth_plat_data *pdata; phy_interface_t interface; - const char *mac_addr; int ret; pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); @@ -3182,9 +3181,7 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev) return NULL; pdata->phy_interface = interface; - mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) - ether_addr_copy(pdata->mac_addr, mac_addr); + of_get_mac_address(np, pdata->mac_addr); pdata->no_ether_link = of_property_read_bool(np, "renesas,no-ether-link"); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index c1b3751b12c4..a5c07c6ff44a 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -171,7 +171,7 @@ enum GECMR_BIT { }; /* EDMR */ -enum DMAC_M_BIT { +enum EDMR_BIT { EDMR_NBST = 0x80, EDMR_EL = 0x40, /* Litte endian */ EDMR_DL1 = 0x20, EDMR_DL0 = 0x10, @@ -180,13 +180,13 @@ enum DMAC_M_BIT { }; /* EDTRR */ -enum DMAC_T_BIT { +enum EDTRR_BIT { EDTRR_TRNS_GETHER = 0x03, EDTRR_TRNS_ETHER = 0x01, }; /* EDRRR */ -enum EDRRR_R_BIT { +enum EDRRR_BIT { EDRRR_R = 0x01, }; @@ -208,7 +208,7 @@ enum PIR_BIT { }; /* PSR */ -enum PHY_STATUS_BIT { PHY_ST_LINK = 0x01, }; +enum PSR_BIT { PSR_LMON = 0x01, }; /* EESR */ enum EESR_BIT { @@ -288,27 +288,6 @@ enum EESIPR_BIT { EESIPR_CERFIP = 0x00000001, }; -/* Receive descriptor 0 bits */ -enum RD_STS_BIT { - RD_RACT = 0x80000000, RD_RDLE = 0x40000000, - RD_RFP1 = 0x20000000, RD_RFP0 = 0x10000000, - RD_RFE = 0x08000000, RD_RFS10 = 0x00000200, - RD_RFS9 = 0x00000100, RD_RFS8 = 0x00000080, - RD_RFS7 = 0x00000040, RD_RFS6 = 0x00000020, - RD_RFS5 = 0x00000010, RD_RFS4 = 0x00000008, - RD_RFS3 = 0x00000004, RD_RFS2 = 0x00000002, - RD_RFS1 = 0x00000001, -}; -#define RDF1ST RD_RFP1 -#define RDFEND RD_RFP0 -#define RD_RFP (RD_RFP1|RD_RFP0) - -/* Receive descriptor 1 bits */ -enum RD_LEN_BIT { - RD_RFL = 0x0000ffff, /* receive frame length */ - RD_RBL = 0xffff0000, /* receive buffer length */ -}; - /* FCFTR */ enum FCFTR_BIT { FCFTR_RFF2 = 0x00040000, FCFTR_RFF1 = 0x00020000, @@ -318,28 +297,13 @@ enum FCFTR_BIT { #define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0) #define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0) -/* Transmit descriptor 0 bits */ -enum TD_STS_BIT { - TD_TACT = 0x80000000, TD_TDLE = 0x40000000, - TD_TFP1 = 0x20000000, TD_TFP0 = 0x10000000, - TD_TFE = 0x08000000, TD_TWBI = 0x04000000, -}; -#define TDF1ST TD_TFP1 -#define TDFEND TD_TFP0 -#define TD_TFP (TD_TFP1|TD_TFP0) - -/* Transmit descriptor 1 bits */ -enum TD_LEN_BIT { - TD_TBL = 0xffff0000, /* transmit buffer length */ -}; - /* RMCR */ enum RMCR_BIT { RMCR_RNC = 0x00000001, }; /* ECMR */ -enum FELIC_MODE_BIT { +enum ECMR_BIT { ECMR_TRCCM = 0x04000000, ECMR_RCSC = 0x00800000, ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000, ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000, @@ -350,7 +314,7 @@ enum FELIC_MODE_BIT { }; /* ECSR */ -enum ECSR_STATUS_BIT { +enum ECSR_BIT { ECSR_BRCRX = 0x20, ECSR_PSRTO = 0x10, ECSR_LCHNG = 0x04, ECSR_MPD = 0x02, ECSR_ICD = 0x01, @@ -360,7 +324,7 @@ enum ECSR_STATUS_BIT { ECSR_ICD | ECSIPR_MPDIP) /* ECSIPR */ -enum ECSIPR_STATUS_MASK_BIT { +enum ECSIPR_BIT { ECSIPR_BRCRXIP = 0x20, ECSIPR_PSRTOIP = 0x10, ECSIPR_LCHNGIP = 0x04, ECSIPR_MPDIP = 0x02, ECSIPR_ICDIP = 0x01, @@ -380,14 +344,20 @@ enum MPR_BIT { }; /* TRSCER */ -enum DESC_I_BIT { - DESC_I_TINT4 = 0x0800, DESC_I_TINT3 = 0x0400, DESC_I_TINT2 = 0x0200, - DESC_I_TINT1 = 0x0100, DESC_I_RINT8 = 0x0080, DESC_I_RINT5 = 0x0010, - DESC_I_RINT4 = 0x0008, DESC_I_RINT3 = 0x0004, DESC_I_RINT2 = 0x0002, - DESC_I_RINT1 = 0x0001, +enum TRSCER_BIT { + TRSCER_CNDCE = 0x00000800, + TRSCER_DLCCE = 0x00000400, + TRSCER_CDCE = 0x00000200, + TRSCER_TROCE = 0x00000100, + TRSCER_RMAFCE = 0x00000080, + TRSCER_RRFCE = 0x00000010, + TRSCER_RTLFCE = 0x00000008, + TRSCER_RTSFCE = 0x00000004, + TRSCER_PRECE = 0x00000002, + TRSCER_CERFCE = 0x00000001, }; -#define DEFAULT_TRSCER_ERR_MASK (DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2) +#define DEFAULT_TRSCER_ERR_MASK (TRSCER_RMAFCE | TRSCER_RRFCE | TRSCER_CDCE) /* RPADIR */ enum RPADIR_BIT { @@ -445,6 +415,24 @@ struct sh_eth_txdesc { u32 pad0; /* padding data */ } __aligned(2) __packed; +/* Transmit descriptor 0 bits */ +enum TD_STS_BIT { + TD_TACT = 0x80000000, + TD_TDLE = 0x40000000, + TD_TFP1 = 0x20000000, + TD_TFP0 = 0x10000000, + TD_TFE = 0x08000000, + TD_TWBI = 0x04000000, +}; +#define TDF1ST TD_TFP1 +#define TDFEND TD_TFP0 +#define TD_TFP (TD_TFP1 | TD_TFP0) + +/* Transmit descriptor 1 bits */ +enum TD_LEN_BIT { + TD_TBL = 0xffff0000, /* transmit buffer length */ +}; + /* The sh ether Rx buffer descriptors. * This structure should be 20 bytes. */ @@ -455,6 +443,34 @@ struct sh_eth_rxdesc { u32 pad0; /* padding data */ } __aligned(2) __packed; +/* Receive descriptor 0 bits */ +enum RD_STS_BIT { + RD_RACT = 0x80000000, + RD_RDLE = 0x40000000, + RD_RFP1 = 0x20000000, + RD_RFP0 = 0x10000000, + RD_RFE = 0x08000000, + RD_RFS10 = 0x00000200, + RD_RFS9 = 0x00000100, + RD_RFS8 = 0x00000080, + RD_RFS7 = 0x00000040, + RD_RFS6 = 0x00000020, + RD_RFS5 = 0x00000010, + RD_RFS4 = 0x00000008, + RD_RFS3 = 0x00000004, + RD_RFS2 = 0x00000002, + RD_RFS1 = 0x00000001, +}; +#define RDF1ST RD_RFP1 +#define RDFEND RD_RFP0 +#define RD_RFP (RD_RFP1 | RD_RFP0) + +/* Receive descriptor 1 bits */ +enum RD_LEN_BIT { + RD_RFL = 0x0000ffff, /* receive frame length */ + RD_RBL = 0xffff0000, /* receive buffer length */ +}; + /* This structure is used by each CPU dependency handling. */ struct sh_eth_cpu_data { /* mandatory functions */ diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 3473d296b2e2..a46633606cae 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2736,7 +2736,7 @@ static void rocker_switchdev_event_work(struct work_struct *work) switch (switchdev_work->event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: fdb_info = &switchdev_work->fdb_info; - if (!fdb_info->added_by_user) + if (!fdb_info->added_by_user || fdb_info->is_local) break; err = rocker_world_port_fdb_add(rocker_port, fdb_info); if (err) { @@ -2747,7 +2747,7 @@ static void rocker_switchdev_event_work(struct work_struct *work) break; case SWITCHDEV_FDB_DEL_TO_DEVICE: fdb_info = &switchdev_work->fdb_info; - if (!fdb_info->added_by_user) + if (!fdb_info->added_by_user || fdb_info->is_local) break; err = rocker_world_port_fdb_del(rocker_port, fdb_info); if (err) diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c index 33f79402850d..4639ed9438a3 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c @@ -25,8 +25,7 @@ #ifdef CONFIG_OF static int sxgbe_probe_config_dt(struct platform_device *pdev, - struct sxgbe_plat_data *plat, - const char **mac) + struct sxgbe_plat_data *plat) { struct device_node *np = pdev->dev.of_node; struct sxgbe_dma_cfg *dma_cfg; @@ -35,7 +34,6 @@ static int sxgbe_probe_config_dt(struct platform_device *pdev, if (!np) return -ENODEV; - *mac = of_get_mac_address(np); err = of_get_phy_mode(np, &plat->interface); if (err && err != -ENODEV) return err; @@ -63,8 +61,7 @@ static int sxgbe_probe_config_dt(struct platform_device *pdev, } #else static int sxgbe_probe_config_dt(struct platform_device *pdev, - struct sxgbe_plat_data *plat, - const char **mac) + struct sxgbe_plat_data *plat) { return -ENOSYS; } @@ -85,7 +82,6 @@ static int sxgbe_platform_probe(struct platform_device *pdev) void __iomem *addr; struct sxgbe_priv_data *priv = NULL; struct sxgbe_plat_data *plat_dat = NULL; - const char *mac = NULL; struct net_device *ndev = platform_get_drvdata(pdev); struct device_node *node = dev->of_node; @@ -101,7 +97,7 @@ static int sxgbe_platform_probe(struct platform_device *pdev) if (!plat_dat) return -ENOMEM; - ret = sxgbe_probe_config_dt(pdev, plat_dat, &mac); + ret = sxgbe_probe_config_dt(pdev, plat_dat); if (ret) { pr_err("%s: main dt probe failed\n", __func__); return ret; @@ -122,8 +118,7 @@ static int sxgbe_platform_probe(struct platform_device *pdev) } /* Get MAC address if available (DT) */ - if (!IS_ERR_OR_NULL(mac)) - ether_addr_copy(priv->dev->dev_addr, mac); + of_get_mac_address(node, priv->dev->dev_addr); /* Get the TX/RX IRQ numbers */ for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) { diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index da6886dcac37..c3f35da1b82a 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1747,6 +1747,22 @@ static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) mask, names); } +static void efx_ef10_get_fec_stats(struct efx_nic *efx, + struct ethtool_fec_stats *fec_stats) +{ + DECLARE_BITMAP(mask, EF10_STAT_COUNT); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u64 *stats = nic_data->stats; + + efx_ef10_get_stat_mask(efx, mask); + if (test_bit(EF10_STAT_fec_corrected_errors, mask)) + fec_stats->corrected_blocks.total = + stats[EF10_STAT_fec_corrected_errors]; + if (test_bit(EF10_STAT_fec_uncorrected_errors, mask)) + fec_stats->uncorrectable_blocks.total = + stats[EF10_STAT_fec_uncorrected_errors]; +} + static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats, struct rtnl_link_stats64 *core_stats) { @@ -2928,8 +2944,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) /* Get the transmit queue */ tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL); - tx_queue = efx_channel_get_tx_queue(channel, - tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL); + tx_queue = channel->tx_queue + (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL); if (!tx_queue->timestamping) { /* Transmit completion */ @@ -4122,6 +4137,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .get_wol = efx_ef10_get_wol, .set_wol = efx_ef10_set_wol, .resume_wol = efx_port_dummy_op_void, + .get_fec_stats = efx_ef10_get_fec_stats, .test_chip = efx_ef10_test_chip, .test_nvram = efx_mcdi_nvram_test_all, .mcdi_request = efx_ef10_mcdi_request, diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index 1bfeee283ea9..a3ca406a3561 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -914,6 +914,8 @@ int efx_set_channels(struct efx_nic *efx) } } } + if (xdp_queue_number) + efx->xdp_tx_queue_count = xdp_queue_number; rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); if (rc) diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h index 3332cdf2918a..cd590e0685e5 100644 --- a/drivers/net/ethernet/sfc/enum.h +++ b/drivers/net/ethernet/sfc/enum.h @@ -78,7 +78,6 @@ enum efx_loopback_mode { (1 << LOOPBACK_XAUI) | \ (1 << LOOPBACK_GMII) | \ (1 << LOOPBACK_SGMII) | \ - (1 << LOOPBACK_SGMII) | \ (1 << LOOPBACK_XGBR) | \ (1 << LOOPBACK_XFI) | \ (1 << LOOPBACK_XAUI_FAR) | \ diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 12a91c559aa2..058d9fe41d99 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -206,6 +206,15 @@ static int efx_ethtool_set_wol(struct net_device *net_dev, return efx->type->set_wol(efx, wol->wolopts); } +static void efx_ethtool_get_fec_stats(struct net_device *net_dev, + struct ethtool_fec_stats *fec_stats) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->get_fec_stats) + efx->type->get_fec_stats(efx, fec_stats); +} + static int efx_ethtool_get_ts_info(struct net_device *net_dev, struct ethtool_ts_info *ts_info) { @@ -257,6 +266,7 @@ const struct ethtool_ops efx_ethtool_ops = { .get_module_eeprom = efx_ethtool_get_module_eeprom, .get_link_ksettings = efx_ethtool_get_link_ksettings, .set_link_ksettings = efx_ethtool_set_link_ksettings, + .get_fec_stats = efx_ethtool_get_fec_stats, .get_fecparam = efx_ethtool_get_fecparam, .set_fecparam = efx_ethtool_set_fecparam, }; diff --git a/drivers/net/ethernet/sfc/falcon/net_driver.h b/drivers/net/ethernet/sfc/falcon/net_driver.h index a529ff395ead..a381cf9ec4f3 100644 --- a/drivers/net/ethernet/sfc/falcon/net_driver.h +++ b/drivers/net/ethernet/sfc/falcon/net_driver.h @@ -637,7 +637,7 @@ union ef4_multicast_hash { * struct ef4_nic - an Efx NIC * @name: Device name (net device name or bus id before net device registered) * @pci_dev: The PCI device - * @node: List node for maintaning primary/secondary function lists + * @node: List node for maintaining primary/secondary function lists * @primary: &struct ef4_nic instance for the primary function of this * controller. May be the same structure, and may be %NULL if no * primary function is bound. Serialised by rtnl_lock. diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index d75cf5ff5686..49df02ecee91 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -835,14 +835,14 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) /* Transmit completion */ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); - tx_queue = efx_channel_get_tx_queue( - channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL); + tx_queue = channel->tx_queue + + (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL); efx_xmit_done(tx_queue, tx_ev_desc_ptr); } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { /* Rewrite the FIFO write pointer */ tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); - tx_queue = efx_channel_get_tx_queue( - channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL); + tx_queue = channel->tx_queue + + (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL); netif_tx_lock(efx->net_dev); efx_farch_notify_tx_desc(tx_queue); @@ -1081,16 +1081,16 @@ static void efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) { struct efx_tx_queue *tx_queue; + struct efx_channel *channel; int qid; qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) { - tx_queue = efx_get_tx_queue(efx, qid / EFX_MAX_TXQ_PER_CHANNEL, - qid % EFX_MAX_TXQ_PER_CHANNEL); - if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) { + channel = efx_get_tx_channel(efx, qid / EFX_MAX_TXQ_PER_CHANNEL); + tx_queue = channel->tx_queue + (qid % EFX_MAX_TXQ_PER_CHANNEL); + if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) efx_farch_magic_event(tx_queue->channel, EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); - } } } diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 9f7dfdf708cf..9b4b25704271 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1187,6 +1187,7 @@ struct efx_udp_tunnel { * @get_wol: Get WoL configuration from driver state * @set_wol: Push WoL configuration to the NIC * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) + * @get_fec_stats: Get standard FEC statistics. * @test_chip: Test registers. May use efx_farch_test_registers(), and is * expected to reset the NIC. * @test_nvram: Test validity of NVRAM contents @@ -1332,6 +1333,8 @@ struct efx_nic_type { void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); int (*set_wol)(struct efx_nic *efx, u32 type); void (*resume_wol)(struct efx_nic *efx); + void (*get_fec_stats)(struct efx_nic *efx, + struct ethtool_fec_stats *fec_stats); unsigned int (*check_caps)(const struct efx_nic *efx, u8 flag, u32 offset); diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 89c5c75f479f..17b8119c48e5 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -94,12 +94,11 @@ static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel, rx_buf->len -= hdr_len; for (;;) { - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - rx_buf->page, rx_buf->page_offset, - rx_buf->len); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + rx_buf->page, rx_buf->page_offset, + rx_buf->len, efx->rx_buffer_truesize); rx_buf->page = NULL; - skb->len += rx_buf->len; - skb->data_len += rx_buf->len; + if (skb_shinfo(skb)->nr_frags == n_frags) break; @@ -111,8 +110,6 @@ static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel, n_frags = 0; } - skb->truesize += n_frags * efx->rx_buffer_truesize; - /* Move past the ethernet header */ skb->protocol = eth_type_trans(skb, efx->net_dev); diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 1665529a7271..0c6650d2e239 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -412,14 +412,6 @@ err: return NETDEV_TX_OK; } -static void efx_xdp_return_frames(int n, struct xdp_frame **xdpfs) -{ - int i; - - for (i = 0; i < n; i++) - xdp_return_frame_rx_napi(xdpfs[i]); -} - /* Transmit a packet from an XDP buffer * * Returns number of packets sent on success, error code otherwise. @@ -492,12 +484,7 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, if (flush && i > 0) efx_nic_push_buffers(tx_queue); - if (i == 0) - return -EIO; - - efx_xdp_return_frames(n - i, xdpfs + i); - - return i; + return i == 0 ? -EIO : i; } /* Initiate a packet transmission. We use one channel per CPU diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 891b49281bc6..cbde83f620a0 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -2204,7 +2204,7 @@ static int try_toggle_control_gpio(struct device *dev, const char *name, int index, int value, unsigned int nsdelay) { - struct gpio_desc *gpio = *desc; + struct gpio_desc *gpio; enum gpiod_flags flags = value ? GPIOD_OUT_LOW : GPIOD_OUT_HIGH; gpio = devm_gpiod_get_index_optional(dev, name, index, flags); diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 606c79de93a6..556a9790cdcf 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2649,11 +2649,13 @@ static const struct of_device_id smsc911x_dt_ids[] = { MODULE_DEVICE_TABLE(of, smsc911x_dt_ids); #endif +#ifdef CONFIG_ACPI static const struct acpi_device_id smsc911x_acpi_match[] = { { "ARMH9118", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, smsc911x_acpi_match); +#endif static struct platform_driver smsc911x_driver = { .probe = smsc911x_drv_probe, diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 200785e703c8..dfc85cc68173 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1760,8 +1760,7 @@ static int netsec_xdp_xmit(struct net_device *ndev, int n, { struct netsec_priv *priv = netdev_priv(ndev); struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX]; - int drops = 0; - int i; + int i, nxmit = 0; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; @@ -1772,12 +1771,11 @@ static int netsec_xdp_xmit(struct net_device *ndev, int n, int err; err = netsec_xdp_queue_one(priv, xdpf, true); - if (err != NETSEC_XDP_TX) { - xdp_return_frame_rx_napi(xdpf); - drops++; - } else { - tx_ring->xdp_xmit++; - } + if (err != NETSEC_XDP_TX) + break; + + tx_ring->xdp_xmit++; + nxmit++; } spin_unlock(&tx_ring->lock); @@ -1786,7 +1784,7 @@ static int netsec_xdp_xmit(struct net_device *ndev, int n, tx_ring->xdp_xmit = 0; } - return n - drops; + return nxmit; } static int netsec_xdp_setup(struct netsec_priv *priv, struct bpf_prog *prog, diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 501b9c7aba56..fcbb4bb31408 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -1559,7 +1559,6 @@ static int ave_probe(struct platform_device *pdev) struct ave_private *priv; struct net_device *ndev; struct device_node *np; - const void *mac_addr; void __iomem *base; const char *name; int i, irq, ret; @@ -1600,12 +1599,9 @@ static int ave_probe(struct platform_device *pdev) ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN); - mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) - ether_addr_copy(ndev->dev_addr, mac_addr); - - /* if the mac address is invalid, use random mac address */ - if (!is_valid_ether_addr(ndev->dev_addr)) { + ret = of_get_mac_address(np, ndev->dev_addr); + if (ret) { + /* if the mac address is invalid, use random mac address */ eth_hw_addr_random(ndev); dev_warn(dev, "Using random MAC address: %pM\n", ndev->dev_addr); diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 366740ab9c5a..f2e478b884b0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -6,6 +6,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \ dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \ stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \ + stmmac_xdp.o \ $(stmmac-y) stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 6f271c46368d..619e3c0760d6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -33,6 +33,7 @@ #define DWMAC_CORE_4_10 0x41 #define DWMAC_CORE_5_00 0x50 #define DWMAC_CORE_5_10 0x51 +#define DWMAC_CORE_5_20 0x52 #define DWXGMAC_CORE_2_10 0x21 #define DWXLGMAC_CORE_2_00 0x20 @@ -182,6 +183,12 @@ struct stmmac_extra_stats { /* TSO */ unsigned long tx_tso_frames; unsigned long tx_tso_nfrags; + /* EST */ + unsigned long mtl_est_cgce; + unsigned long mtl_est_hlbs; + unsigned long mtl_est_hlbf; + unsigned long mtl_est_btre; + unsigned long mtl_est_btrlm; }; /* Safety Feature statistics exposed by ethtool */ @@ -253,6 +260,9 @@ struct stmmac_safety_stats { #define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */ #define DEFAULT_DMA_PBL 8 +/* MSI defines */ +#define STMMAC_MSI_VEC_MAX 32 + /* PCS status and mask defines */ #define PCS_ANE_IRQ BIT(2) /* PCS Auto-Negotiation */ #define PCS_LINK_IRQ BIT(1) /* PCS Link */ @@ -303,12 +313,37 @@ enum dma_irq_status { handle_tx = 0x8, }; +enum dma_irq_dir { + DMA_DIR_RX = 0x1, + DMA_DIR_TX = 0x2, + DMA_DIR_RXTX = 0x3, +}; + +enum request_irq_err { + REQ_IRQ_ERR_ALL, + REQ_IRQ_ERR_TX, + REQ_IRQ_ERR_RX, + REQ_IRQ_ERR_SFTY_UE, + REQ_IRQ_ERR_SFTY_CE, + REQ_IRQ_ERR_LPI, + REQ_IRQ_ERR_WOL, + REQ_IRQ_ERR_MAC, + REQ_IRQ_ERR_NO, +}; + /* EEE and LPI defines */ #define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0) #define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1) #define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2) #define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3) +/* FPE defines */ +#define FPE_EVENT_UNKNOWN 0 +#define FPE_EVENT_TRSP BIT(0) +#define FPE_EVENT_TVER BIT(1) +#define FPE_EVENT_RRSP BIT(2) +#define FPE_EVENT_RVER BIT(3) + #define CORE_IRQ_MTL_RX_OVERFLOW BIT(8) /* Physical Coding Sublayer */ @@ -382,6 +417,8 @@ struct dma_features { unsigned int estsel; unsigned int fpesel; unsigned int tbssel; + /* Numbers of Auxiliary Snapshot Inputs */ + unsigned int aux_snapshot_n; }; /* RX Buffer size must be multiple of 4/8/16 bytes */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c index 08c76636c164..dfbaea06d108 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c @@ -115,7 +115,7 @@ static int anarion_dwmac_probe(struct platform_device *pdev) if (IS_ERR(gmac)) return PTR_ERR(gmac); - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index 27254b27d7ed..bc91fd867dcd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -438,7 +438,7 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev) if (IS_ERR(stmmac_res.addr)) return PTR_ERR(stmmac_res.addr); - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c index fad503820e04..fbfda55b4c52 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c @@ -27,7 +27,7 @@ static int dwmac_generic_probe(struct platform_device *pdev) return ret; if (pdev->dev.of_node) { - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) { dev_err(&pdev->dev, "dt configuration failed\n"); return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c index 223f69da7e95..84651207a1de 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c @@ -90,6 +90,32 @@ imx8dxl_set_intf_mode(struct plat_stmmacenet_data *plat_dat) return ret; } +static int imx_dwmac_clks_config(void *priv, bool enabled) +{ + struct imx_priv_data *dwmac = priv; + int ret = 0; + + if (enabled) { + ret = clk_prepare_enable(dwmac->clk_mem); + if (ret) { + dev_err(dwmac->dev, "mem clock enable failed\n"); + return ret; + } + + ret = clk_prepare_enable(dwmac->clk_tx); + if (ret) { + dev_err(dwmac->dev, "tx clock enable failed\n"); + clk_disable_unprepare(dwmac->clk_mem); + return ret; + } + } else { + clk_disable_unprepare(dwmac->clk_tx); + clk_disable_unprepare(dwmac->clk_mem); + } + + return ret; +} + static int imx_dwmac_init(struct platform_device *pdev, void *priv) { struct plat_stmmacenet_data *plat_dat; @@ -98,39 +124,18 @@ static int imx_dwmac_init(struct platform_device *pdev, void *priv) plat_dat = dwmac->plat_dat; - ret = clk_prepare_enable(dwmac->clk_mem); - if (ret) { - dev_err(&pdev->dev, "mem clock enable failed\n"); - return ret; - } - - ret = clk_prepare_enable(dwmac->clk_tx); - if (ret) { - dev_err(&pdev->dev, "tx clock enable failed\n"); - goto clk_tx_en_failed; - } - if (dwmac->ops->set_intf_mode) { ret = dwmac->ops->set_intf_mode(plat_dat); if (ret) - goto intf_mode_failed; + return ret; } return 0; - -intf_mode_failed: - clk_disable_unprepare(dwmac->clk_tx); -clk_tx_en_failed: - clk_disable_unprepare(dwmac->clk_mem); - return ret; } static void imx_dwmac_exit(struct platform_device *pdev, void *priv) { - struct imx_priv_data *dwmac = priv; - - clk_disable_unprepare(dwmac->clk_tx); - clk_disable_unprepare(dwmac->clk_mem); + /* nothing to do now */ } static void imx_dwmac_fix_speed(void *priv, unsigned int speed) @@ -226,7 +231,7 @@ static int imx_dwmac_probe(struct platform_device *pdev) if (!dwmac) return -ENOMEM; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); @@ -249,10 +254,15 @@ static int imx_dwmac_probe(struct platform_device *pdev) plat_dat->addr64 = dwmac->ops->addr_width; plat_dat->init = imx_dwmac_init; plat_dat->exit = imx_dwmac_exit; + plat_dat->clks_config = imx_dwmac_clks_config; plat_dat->fix_mac_speed = imx_dwmac_fix_speed; plat_dat->bsp_priv = dwmac; dwmac->plat_dat = plat_dat; + ret = imx_dwmac_clks_config(dwmac, true); + if (ret) + goto err_clks_config; + ret = imx_dwmac_init(pdev, dwmac); if (ret) goto err_dwmac_init; @@ -263,9 +273,11 @@ static int imx_dwmac_probe(struct platform_device *pdev) return 0; -err_dwmac_init: err_drv_probe: imx_dwmac_exit(pdev, plat_dat->bsp_priv); +err_dwmac_init: + imx_dwmac_clks_config(dwmac, false); +err_clks_config: err_parse_dt: err_match_data: stmmac_remove_config_dt(pdev, plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c index 6c19fcc76c6f..06d287f104be 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c @@ -85,7 +85,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) { dev_err(&pdev->dev, "dt configuration failed\n"); return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index 0b64f7710d17..80728a4c0e3f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -8,9 +8,28 @@ #include "dwmac-intel.h" #include "dwmac4.h" #include "stmmac.h" +#include "stmmac_ptp.h" + +#define INTEL_MGBE_ADHOC_ADDR 0x15 +#define INTEL_MGBE_XPCS_ADDR 0x16 + +/* Selection for PTP Clock Freq belongs to PSE & PCH GbE */ +#define PSE_PTP_CLK_FREQ_MASK (GMAC_GPO0 | GMAC_GPO3) +#define PSE_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0) +#define PSE_PTP_CLK_FREQ_200MHZ (GMAC_GPO0 | GMAC_GPO3) +#define PSE_PTP_CLK_FREQ_256MHZ (0) +#define PCH_PTP_CLK_FREQ_MASK (GMAC_GPO0) +#define PCH_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0) +#define PCH_PTP_CLK_FREQ_200MHZ (0) + +/* Cross-timestamping defines */ +#define ART_CPUID_LEAF 0x15 +#define EHL_PSE_ART_MHZ 19200000 struct intel_priv_data { int mdio_adhoc_addr; /* mdio address for serdes & etc */ + unsigned long crossts_adj; + bool is_pse; }; /* This struct is used to associate PCI Function of MAC controller on a board, @@ -134,6 +153,11 @@ static int intel_serdes_powerup(struct net_device *ndev, void *priv_data) return data; } + /* PSE only - ungate SGMII PHY Rx Clock */ + if (intel_priv->is_pse) + mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0, + 0, SERDES_PHY_RX_CLK); + return 0; } @@ -149,6 +173,11 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data) serdes_phy_addr = intel_priv->mdio_adhoc_addr; + /* PSE only - gate SGMII PHY Rx Clock */ + if (intel_priv->is_pse) + mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0, + SERDES_PHY_RX_CLK, 0); + /* move power state to P3 */ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); @@ -201,6 +230,161 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data) } } +/* Program PTP Clock Frequency for different variant of + * Intel mGBE that has slightly different GPO mapping + */ +static void intel_mgbe_ptp_clk_freq_config(void *npriv) +{ + struct stmmac_priv *priv = (struct stmmac_priv *)npriv; + struct intel_priv_data *intel_priv; + u32 gpio_value; + + intel_priv = (struct intel_priv_data *)priv->plat->bsp_priv; + + gpio_value = readl(priv->ioaddr + GMAC_GPIO_STATUS); + + if (intel_priv->is_pse) { + /* For PSE GbE, use 200MHz */ + gpio_value &= ~PSE_PTP_CLK_FREQ_MASK; + gpio_value |= PSE_PTP_CLK_FREQ_200MHZ; + } else { + /* For PCH GbE, use 200MHz */ + gpio_value &= ~PCH_PTP_CLK_FREQ_MASK; + gpio_value |= PCH_PTP_CLK_FREQ_200MHZ; + } + + writel(gpio_value, priv->ioaddr + GMAC_GPIO_STATUS); +} + +static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr, + u64 *art_time) +{ + u64 ns; + + ns = mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE3); + ns <<= GMAC4_ART_TIME_SHIFT; + ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE2); + ns <<= GMAC4_ART_TIME_SHIFT; + ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE1); + ns <<= GMAC4_ART_TIME_SHIFT; + ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE0); + + *art_time = ns; +} + +static int intel_crosststamp(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + struct intel_priv_data *intel_priv; + + struct stmmac_priv *priv = (struct stmmac_priv *)ctx; + void __iomem *ptpaddr = priv->ptpaddr; + void __iomem *ioaddr = priv->hw->pcsr; + unsigned long flags; + u64 art_time = 0; + u64 ptp_time = 0; + u32 num_snapshot; + u32 gpio_value; + u32 acr_value; + int ret; + u32 v; + int i; + + if (!boot_cpu_has(X86_FEATURE_ART)) + return -EOPNOTSUPP; + + intel_priv = priv->plat->bsp_priv; + + /* Both internal crosstimestamping and external triggered event + * timestamping cannot be run concurrently. + */ + if (priv->plat->ext_snapshot_en) + return -EBUSY; + + mutex_lock(&priv->aux_ts_lock); + /* Enable Internal snapshot trigger */ + acr_value = readl(ptpaddr + PTP_ACR); + acr_value &= ~PTP_ACR_MASK; + switch (priv->plat->int_snapshot_num) { + case AUX_SNAPSHOT0: + acr_value |= PTP_ACR_ATSEN0; + break; + case AUX_SNAPSHOT1: + acr_value |= PTP_ACR_ATSEN1; + break; + case AUX_SNAPSHOT2: + acr_value |= PTP_ACR_ATSEN2; + break; + case AUX_SNAPSHOT3: + acr_value |= PTP_ACR_ATSEN3; + break; + default: + mutex_unlock(&priv->aux_ts_lock); + return -EINVAL; + } + writel(acr_value, ptpaddr + PTP_ACR); + + /* Clear FIFO */ + acr_value = readl(ptpaddr + PTP_ACR); + acr_value |= PTP_ACR_ATSFC; + writel(acr_value, ptpaddr + PTP_ACR); + /* Release the mutex */ + mutex_unlock(&priv->aux_ts_lock); + + /* Trigger Internal snapshot signal + * Create a rising edge by just toggle the GPO1 to low + * and back to high. + */ + gpio_value = readl(ioaddr + GMAC_GPIO_STATUS); + gpio_value &= ~GMAC_GPO1; + writel(gpio_value, ioaddr + GMAC_GPIO_STATUS); + gpio_value |= GMAC_GPO1; + writel(gpio_value, ioaddr + GMAC_GPIO_STATUS); + + /* Poll for time sync operation done */ + ret = readl_poll_timeout(priv->ioaddr + GMAC_INT_STATUS, v, + (v & GMAC_INT_TSIE), 100, 10000); + + if (ret == -ETIMEDOUT) { + pr_err("%s: Wait for time sync operation timeout\n", __func__); + return ret; + } + + num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) & + GMAC_TIMESTAMP_ATSNS_MASK) >> + GMAC_TIMESTAMP_ATSNS_SHIFT; + + /* Repeat until the timestamps are from the FIFO last segment */ + for (i = 0; i < num_snapshot; i++) { + spin_lock_irqsave(&priv->ptp_lock, flags); + stmmac_get_ptptime(priv, ptpaddr, &ptp_time); + *device = ns_to_ktime(ptp_time); + spin_unlock_irqrestore(&priv->ptp_lock, flags); + get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time); + *system = convert_art_to_tsc(art_time); + } + + system->cycles *= intel_priv->crossts_adj; + + return 0; +} + +static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv, + int base) +{ + if (boot_cpu_has(X86_FEATURE_ART)) { + unsigned int art_freq; + + /* On systems that support ART, ART frequency can be obtained + * from ECX register of CPUID leaf (0x15). + */ + art_freq = cpuid_ecx(ART_CPUID_LEAF); + do_div(art_freq, base); + intel_priv->crossts_adj = art_freq; + } +} + static void common_default_data(struct plat_stmmacenet_data *plat) { plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ @@ -263,6 +447,9 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, /* Disable Priority config by default */ plat->tx_queues_cfg[i].use_prio = false; + /* Default TX Q0 to use TSO and rest TXQ for TBS */ + if (i > 0) + plat->tx_queues_cfg[i].tbs_en = 1; } /* FIFO size is 4096 bytes for 1 tx/rx queue */ @@ -284,6 +471,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, plat->dma_cfg->fixed_burst = 0; plat->dma_cfg->mixed_burst = 0; plat->dma_cfg->aal = 0; + plat->dma_cfg->dche = true; plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi), GFP_KERNEL); @@ -319,6 +507,8 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, return ret; } + plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config; + /* Set default value for multicast hash bins */ plat->multicast_filter_bins = HASH_TABLE_SIZE; @@ -333,6 +523,30 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, /* Use the last Rx queue */ plat->vlan_fail_q = plat->rx_queues_to_use - 1; + /* Intel mgbe SGMII interface uses pcs-xcps */ + if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII) { + plat->mdio_bus_data->has_xpcs = true; + plat->mdio_bus_data->xpcs_an_inband = true; + } + + /* Ensure mdio bus scan skips intel serdes and pcs-xpcs */ + plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR; + plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR; + + plat->int_snapshot_num = AUX_SNAPSHOT1; + plat->ext_snapshot_num = AUX_SNAPSHOT0; + + plat->has_crossts = true; + plat->crosststamp = intel_crosststamp; + + /* Setup MSI vector offset specific to Intel mGbE controller */ + plat->msi_mac_vec = 29; + plat->msi_lpi_vec = 28; + plat->msi_sfty_ce_vec = 27; + plat->msi_sfty_ue_vec = 26; + plat->msi_rx_base_vec = 0; + plat->msi_tx_base_vec = 1; + return 0; } @@ -378,8 +592,14 @@ static struct stmmac_pci_info ehl_rgmii1g_info = { static int ehl_pse0_common_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { + struct intel_priv_data *intel_priv = plat->bsp_priv; + + intel_priv->is_pse = true; plat->bus_id = 2; plat->addr64 = 32; + + intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ); + return ehl_common_data(pdev, plat); } @@ -410,8 +630,14 @@ static struct stmmac_pci_info ehl_pse0_sgmii1g_info = { static int ehl_pse1_common_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { + struct intel_priv_data *intel_priv = plat->bsp_priv; + + intel_priv->is_pse = true; plat->bus_id = 3; plat->addr64 = 32; + + intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ); + return ehl_common_data(pdev, plat); } @@ -609,6 +835,79 @@ static const struct stmmac_pci_info quark_info = { .setup = quark_default_data, }; +static int stmmac_config_single_msi(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat, + struct stmmac_resources *res) +{ + int ret; + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + if (ret < 0) { + dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n", + __func__); + return ret; + } + + res->irq = pci_irq_vector(pdev, 0); + res->wol_irq = res->irq; + plat->multi_msi_en = 0; + dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n", + __func__); + + return 0; +} + +static int stmmac_config_multi_msi(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat, + struct stmmac_resources *res) +{ + int ret; + int i; + + if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX || + plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) { + dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n", + __func__); + return -1; + } + + ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (ret < 0) { + dev_info(&pdev->dev, "%s: multi MSI enablement failed\n", + __func__); + return ret; + } + + /* For RX MSI */ + for (i = 0; i < plat->rx_queues_to_use; i++) { + res->rx_irq[i] = pci_irq_vector(pdev, + plat->msi_rx_base_vec + i * 2); + } + + /* For TX MSI */ + for (i = 0; i < plat->tx_queues_to_use; i++) { + res->tx_irq[i] = pci_irq_vector(pdev, + plat->msi_tx_base_vec + i * 2); + } + + if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX) + res->irq = pci_irq_vector(pdev, plat->msi_mac_vec); + if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX) + res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec); + if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX) + res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec); + if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX) + res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec); + if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX) + res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec); + + plat->multi_msi_en = 1; + dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__); + + return 0; +} + /** * intel_eth_pci_probe * @@ -650,7 +949,7 @@ static int intel_eth_pci_probe(struct pci_dev *pdev, return -ENOMEM; /* Enable pci device */ - ret = pci_enable_device(pdev); + ret = pcim_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__); @@ -664,20 +963,27 @@ static int intel_eth_pci_probe(struct pci_dev *pdev, pci_set_master(pdev); plat->bsp_priv = intel_priv; - intel_priv->mdio_adhoc_addr = 0x15; + intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR; + intel_priv->crossts_adj = 1; + + /* Initialize all MSI vectors to invalid so that it can be set + * according to platform data settings below. + * Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX) + */ + plat->msi_mac_vec = STMMAC_MSI_VEC_MAX; + plat->msi_wol_vec = STMMAC_MSI_VEC_MAX; + plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX; + plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX; + plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX; + plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX; + plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX; ret = info->setup(pdev, plat); if (ret) return ret; - ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); - if (ret < 0) - return ret; - memset(&res, 0, sizeof(res)); res.addr = pcim_iomap_table(pdev)[0]; - res.wol_irq = pci_irq_vector(pdev, 0); - res.irq = pci_irq_vector(pdev, 0); if (plat->eee_usecs_rate > 0) { u32 tx_lpi_usec; @@ -686,13 +992,28 @@ static int intel_eth_pci_probe(struct pci_dev *pdev, writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER); } + ret = stmmac_config_multi_msi(pdev, plat, &res); + if (ret) { + ret = stmmac_config_single_msi(pdev, plat, &res); + if (ret) { + dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n", + __func__); + goto err_alloc_irq; + } + } + ret = stmmac_dvr_probe(&pdev->dev, plat, &res); if (ret) { - pci_free_irq_vectors(pdev); - clk_disable_unprepare(plat->stmmac_clk); - clk_unregister_fixed_rate(plat->stmmac_clk); + goto err_dvr_probe; } + return 0; + +err_dvr_probe: + pci_free_irq_vectors(pdev); +err_alloc_irq: + clk_disable_unprepare(plat->stmmac_clk); + clk_unregister_fixed_rate(plat->stmmac_clk); return ret; } @@ -710,13 +1031,9 @@ static void intel_eth_pci_remove(struct pci_dev *pdev) stmmac_dvr_remove(&pdev->dev); - pci_free_irq_vectors(pdev); - clk_unregister_fixed_rate(priv->plat->stmmac_clk); pcim_iounmap_regions(pdev, BIT(0)); - - pci_disable_device(pdev); } static int __maybe_unused intel_eth_pci_suspend(struct device *dev) @@ -732,7 +1049,6 @@ static int __maybe_unused intel_eth_pci_suspend(struct device *dev) if (ret) return ret; - pci_disable_device(pdev); pci_wake_from_d3(pdev, true); return 0; } @@ -745,7 +1061,7 @@ static int __maybe_unused intel_eth_pci_resume(struct device *dev) pci_restore_state(pdev); pci_set_power_state(pdev, PCI_D0); - ret = pci_enable_device(pdev); + ret = pcim_enable_device(pdev); if (ret) return ret; @@ -757,41 +1073,41 @@ static int __maybe_unused intel_eth_pci_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend, intel_eth_pci_resume); -#define PCI_DEVICE_ID_INTEL_QUARK_ID 0x0937 -#define PCI_DEVICE_ID_INTEL_EHL_RGMII1G_ID 0x4b30 -#define PCI_DEVICE_ID_INTEL_EHL_SGMII1G_ID 0x4b31 -#define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5_ID 0x4b32 +#define PCI_DEVICE_ID_INTEL_QUARK 0x0937 +#define PCI_DEVICE_ID_INTEL_EHL_RGMII1G 0x4b30 +#define PCI_DEVICE_ID_INTEL_EHL_SGMII1G 0x4b31 +#define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5 0x4b32 /* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC * which are named PSE0 and PSE1 */ -#define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G_ID 0x4ba0 -#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G_ID 0x4ba1 -#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5_ID 0x4ba2 -#define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G_ID 0x4bb0 -#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G_ID 0x4bb1 -#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5_ID 0x4bb2 -#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0_ID 0x43ac -#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1_ID 0x43a2 -#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G_ID 0xa0ac -#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0_ID 0x7aac -#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1_ID 0x7aad +#define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G 0x4ba0 +#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G 0x4ba1 +#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5 0x4ba2 +#define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G 0x4bb0 +#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G 0x4bb1 +#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5 0x4bb2 +#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0 0x43ac +#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1 0x43a2 +#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G 0xa0ac +#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0 0x7aac +#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1 0x7aad static const struct pci_device_id intel_eth_pci_id_table[] = { - { PCI_DEVICE_DATA(INTEL, QUARK_ID, &quark_info) }, - { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G_ID, &ehl_rgmii1g_info) }, - { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G_ID, &ehl_sgmii1g_info) }, - { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5_ID, &ehl_sgmii1g_info) }, - { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G_ID, &ehl_pse0_rgmii1g_info) }, - { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G_ID, &ehl_pse0_sgmii1g_info) }, - { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5_ID, &ehl_pse0_sgmii1g_info) }, - { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G_ID, &ehl_pse1_rgmii1g_info) }, - { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID, &ehl_pse1_sgmii1g_info) }, - { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID, &ehl_pse1_sgmii1g_info) }, - { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_phy0_info) }, - { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0_ID, &tgl_sgmii1g_phy0_info) }, - { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1_ID, &tgl_sgmii1g_phy1_info) }, - { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0_ID, &adls_sgmii1g_phy0_info) }, - { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1_ID, &adls_sgmii1g_phy1_info) }, + { PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G, &ehl_rgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G, &ehl_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5, &ehl_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G, &ehl_pse0_rgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G, &ehl_pse0_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5, &ehl_pse0_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G, &ehl_pse1_rgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G, &ehl_pse1_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5, &ehl_pse1_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G, &tgl_sgmii1g_phy0_info) }, + { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0, &tgl_sgmii1g_phy0_info) }, + { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) }, + { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) }, + { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) }, {} }; MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h index e723096c0b15..542acb8ce467 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h @@ -14,6 +14,7 @@ /* SERDES defines */ #define SERDES_PLL_CLK BIT(0) /* PLL clk valid signal */ +#define SERDES_PHY_RX_CLK BIT(1) /* PSE SGMII PHY rx clk */ #define SERDES_RST BIT(2) /* Serdes Reset */ #define SERDES_PWR_ST_MASK GENMASK(6, 4) /* Serdes Power state*/ #define SERDES_PWR_ST_SHIFT 4 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index bf3250e0e59c..28dd0ed85a82 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -255,7 +255,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) if (val) return val; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); @@ -352,6 +352,8 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) plat_dat->bsp_priv = gmac; plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed; plat_dat->multicast_filter_bins = 0; + plat_dat->tx_fifo_size = 8192; + plat_dat->rx_fifo_size = 8192; err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (err) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c index 3d3f43d91b98..9d77c647badd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c @@ -37,7 +37,7 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c index 9e4b83832938..58c0feaa8131 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c @@ -407,7 +407,7 @@ static int mediatek_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c index bbc16b5a410a..16fb66a0ca72 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c @@ -52,7 +52,7 @@ static int meson6_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 848e5c37746b..c7a6588d9398 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -398,7 +398,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c index 8551ea878ba5..adfeb8d3293d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c @@ -118,7 +118,7 @@ static int oxnas_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index bfc4a92f1d92..84382fc5cc4d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -461,7 +461,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) { dev_err(&pdev->dev, "dt configuration failed\n"); return PTR_ERR(plat_dat); @@ -477,7 +477,6 @@ static int qcom_ethqos_probe(struct platform_device *pdev) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rgmii"); ethqos->rgmii_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(ethqos->rgmii_base)) { - dev_err(&pdev->dev, "Can't get rgmii base\n"); ret = PTR_ERR(ethqos->rgmii_base); goto err_mem; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 6ef30252bfe0..8d28a536e1bb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1396,7 +1396,7 @@ static int rk_gmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 70d41783329d..85208128f135 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -398,7 +398,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index e1b63df6f96f..710d7435733e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -325,7 +325,7 @@ static int sti_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index 5d4df4c5254e..2b38a499a404 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -371,7 +371,7 @@ static int stm32_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index e62efd166ec8..4422baeed3d8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -239,6 +239,22 @@ static const struct emac_variant emac_variant_h6 = { #define EMAC_RX_EARLY_INT BIT(13) #define EMAC_RGMII_STA_INT BIT(16) +#define EMAC_INT_MSK_COMMON EMAC_RGMII_STA_INT +#define EMAC_INT_MSK_TX (EMAC_TX_INT | \ + EMAC_TX_DMA_STOP_INT | \ + EMAC_TX_BUF_UA_INT | \ + EMAC_TX_TIMEOUT_INT | \ + EMAC_TX_UNDERFLOW_INT | \ + EMAC_TX_EARLY_INT |\ + EMAC_INT_MSK_COMMON) +#define EMAC_INT_MSK_RX (EMAC_RX_INT | \ + EMAC_RX_BUF_UA_INT | \ + EMAC_RX_DMA_STOP_INT | \ + EMAC_RX_TIMEOUT_INT | \ + EMAC_RX_OVERFLOW_INT | \ + EMAC_RX_EARLY_INT | \ + EMAC_INT_MSK_COMMON) + #define MAC_ADDR_TYPE_DST BIT(31) /* H3 specific bits for EPHY */ @@ -412,13 +428,19 @@ static void sun8i_dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan) } static int sun8i_dwmac_dma_interrupt(void __iomem *ioaddr, - struct stmmac_extra_stats *x, u32 chan) + struct stmmac_extra_stats *x, u32 chan, + u32 dir) { u32 v; int ret = 0; v = readl(ioaddr + EMAC_INT_STA); + if (dir == DMA_DIR_RX) + v &= EMAC_INT_MSK_RX; + else if (dir == DMA_DIR_TX) + v &= EMAC_INT_MSK_TX; + if (v & EMAC_TX_INT) { ret |= handle_tx; x->tx_normal_irq_n++; @@ -1199,7 +1221,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) if (ret) return -EINVAL; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index 0e1ca2cba3c7..527077c98ebc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -108,7 +108,7 @@ static int sun7i_gmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c index d23be45a64e5..d046e33b8a29 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c @@ -208,7 +208,7 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index 2bac49b49f73..90383abafa66 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -255,7 +255,7 @@ static void dwmac1000_get_hw_feature(void __iomem *ioaddr, } static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt, - u32 number_chan) + u32 queue) { writel(riwt, ioaddr + DMA_RX_WATCHDOG); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index 82df91c130f7..462ca7ed095a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -42,6 +42,7 @@ #define GMAC_HW_FEATURE3 0x00000128 #define GMAC_MDIO_ADDR 0x00000200 #define GMAC_MDIO_DATA 0x00000204 +#define GMAC_GPIO_STATUS 0x0000020C #define GMAC_ARP_ADDR 0x00000210 #define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8) #define GMAC_ADDR_LOW(reg) (0x304 + reg * 8) @@ -49,6 +50,7 @@ #define GMAC_L4_ADDR(reg) (0x904 + (reg) * 0x30) #define GMAC_L3_ADDR0(reg) (0x910 + (reg) * 0x30) #define GMAC_L3_ADDR1(reg) (0x914 + (reg) * 0x30) +#define GMAC_TIMESTAMP_STATUS 0x00000b20 /* RX Queues Routing */ #define GMAC_RXQCTRL_AVCPQ_MASK GENMASK(2, 0) @@ -143,6 +145,7 @@ #define GMAC_INT_PCS_PHYIS BIT(3) #define GMAC_INT_PMT_EN BIT(4) #define GMAC_INT_LPI_EN BIT(5) +#define GMAC_INT_TSIE BIT(12) #define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \ GMAC_INT_PCS_ANE) @@ -259,6 +262,7 @@ enum power_event { #define GMAC_HW_RXFIFOSIZE GENMASK(4, 0) /* MAC HW features2 bitmap */ +#define GMAC_HW_FEAT_AUXSNAPNUM GENMASK(30, 28) #define GMAC_HW_FEAT_PPSOUTNUM GENMASK(26, 24) #define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18) #define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12) @@ -278,6 +282,12 @@ enum power_event { #define GMAC_HW_FEAT_DVLAN BIT(5) #define GMAC_HW_FEAT_NRVF GENMASK(2, 0) +/* GMAC GPIO Status reg */ +#define GMAC_GPO0 BIT(16) +#define GMAC_GPO1 BIT(17) +#define GMAC_GPO2 BIT(18) +#define GMAC_GPO3 BIT(19) + /* MAC HW ADDR regs */ #define GMAC_HI_DCS GENMASK(18, 16) #define GMAC_HI_DCS_SHIFT 16 @@ -298,6 +308,11 @@ enum power_event { #define GMAC_L4DP0_SHIFT 16 #define GMAC_L4SP0 GENMASK(15, 0) +/* MAC Timestamp Status */ +#define GMAC_TIMESTAMP_AUXTSTRIG BIT(2) +#define GMAC_TIMESTAMP_ATSNS_MASK GENMASK(29, 25) +#define GMAC_TIMESTAMP_ATSNS_SHIFT 25 + /* MTL registers */ #define MTL_OPERATION_MODE 0x00000c00 #define MTL_FRPE BIT(15) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 29f765a246a0..95864f014ffa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -53,6 +53,10 @@ static void dwmac4_core_init(struct mac_device_info *hw, if (hw->pcs) value |= GMAC_PCS_IRQ_DEFAULT; + /* Enable FPE interrupt */ + if ((GMAC_HW_FEAT_FPESEL & readl(ioaddr + GMAC_HW_FEATURE3)) >> 26) + value |= GMAC_INT_FPE_EN; + writel(value, ioaddr + GMAC_INT_EN); } @@ -1245,6 +1249,8 @@ const struct stmmac_ops dwmac410_ops = { .config_l4_filter = dwmac4_config_l4_filter, .est_configure = dwmac5_est_configure, .fpe_configure = dwmac5_fpe_configure, + .fpe_send_mpacket = dwmac5_fpe_send_mpacket, + .fpe_irq_status = dwmac5_fpe_irq_status, .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, @@ -1294,6 +1300,8 @@ const struct stmmac_ops dwmac510_ops = { .config_l4_filter = dwmac4_config_l4_filter, .est_configure = dwmac5_est_configure, .fpe_configure = dwmac5_fpe_configure, + .fpe_send_mpacket = dwmac5_fpe_send_mpacket, + .fpe_irq_status = dwmac5_fpe_irq_status, .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index 62aa0e95beb7..a602d16b9e53 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -161,6 +161,19 @@ static void dwmac4_dma_init(void __iomem *ioaddr, value |= DMA_SYS_BUS_EAME; writel(value, ioaddr + DMA_SYS_BUS_MODE); + + value = readl(ioaddr + DMA_BUS_MODE); + + if (dma_cfg->multi_msi_en) { + value &= ~DMA_BUS_MODE_INTM_MASK; + value |= (DMA_BUS_MODE_INTM_MODE1 << DMA_BUS_MODE_INTM_SHIFT); + } + + if (dma_cfg->dche) + value |= DMA_BUS_MODE_DCHE; + + writel(value, ioaddr + DMA_BUS_MODE); + } static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel, @@ -210,12 +223,9 @@ static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) _dwmac4_dump_dma_regs(ioaddr, i, reg_space); } -static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 number_chan) +static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 queue) { - u32 chan; - - for (chan = 0; chan < number_chan; chan++) - writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(chan)); + writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(queue)); } static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode, @@ -415,6 +425,8 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr, /* IEEE 1588-2002 */ dma_cap->time_stamp = 0; + /* Number of Auxiliary Snapshot Inputs */ + dma_cap->aux_snapshot_n = (hw_cap & GMAC_HW_FEAT_AUXSNAPNUM) >> 28; /* MAC HW feature3 */ hw_cap = readl(ioaddr + GMAC_HW_FEATURE3); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h index 8391ca63d943..9321879b599c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h @@ -25,6 +25,10 @@ #define DMA_TBS_CTRL 0x00001050 /* DMA Bus Mode bitmap */ +#define DMA_BUS_MODE_DCHE BIT(19) +#define DMA_BUS_MODE_INTM_MASK GENMASK(17, 16) +#define DMA_BUS_MODE_INTM_SHIFT 16 +#define DMA_BUS_MODE_INTM_MODE1 0x1 #define DMA_BUS_MODE_SFT_RESET BIT(0) /* DMA SYS Bus Mode bitmap */ @@ -149,6 +153,25 @@ #define DMA_CHAN_STATUS_TPS BIT(1) #define DMA_CHAN_STATUS_TI BIT(0) +#define DMA_CHAN_STATUS_MSK_COMMON (DMA_CHAN_STATUS_NIS | \ + DMA_CHAN_STATUS_AIS | \ + DMA_CHAN_STATUS_CDE | \ + DMA_CHAN_STATUS_FBE) + +#define DMA_CHAN_STATUS_MSK_RX (DMA_CHAN_STATUS_REB | \ + DMA_CHAN_STATUS_ERI | \ + DMA_CHAN_STATUS_RWT | \ + DMA_CHAN_STATUS_RPS | \ + DMA_CHAN_STATUS_RBU | \ + DMA_CHAN_STATUS_RI | \ + DMA_CHAN_STATUS_MSK_COMMON) + +#define DMA_CHAN_STATUS_MSK_TX (DMA_CHAN_STATUS_ETI | \ + DMA_CHAN_STATUS_TBU | \ + DMA_CHAN_STATUS_TPS | \ + DMA_CHAN_STATUS_TI | \ + DMA_CHAN_STATUS_MSK_COMMON) + /* Interrupt enable bits per channel */ #define DMA_CHAN_INTR_ENA_NIE BIT(16) #define DMA_CHAN_INTR_ENA_AIE BIT(15) @@ -206,7 +229,7 @@ void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan); void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan); void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan); int dwmac4_dma_interrupt(void __iomem *ioaddr, - struct stmmac_extra_stats *x, u32 chan); + struct stmmac_extra_stats *x, u32 chan, u32 dir); void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan); void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan); void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c index 71e50751ef2d..e63270267578 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c @@ -135,12 +135,17 @@ void dwmac410_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx) } int dwmac4_dma_interrupt(void __iomem *ioaddr, - struct stmmac_extra_stats *x, u32 chan) + struct stmmac_extra_stats *x, u32 chan, u32 dir) { u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan)); u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); int ret = 0; + if (dir == DMA_DIR_RX) + intr_status &= DMA_CHAN_STATUS_MSK_RX; + else if (dir == DMA_DIR_TX) + intr_status &= DMA_CHAN_STATUS_MSK_TX; + /* ABNORMAL interrupts */ if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) { if (unlikely(intr_status & DMA_CHAN_STATUS_RBU)) @@ -161,20 +166,19 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr, } } /* TX/RX NORMAL interrupts */ - if (likely(intr_status & DMA_CHAN_STATUS_NIS)) { + if (likely(intr_status & DMA_CHAN_STATUS_NIS)) x->normal_irq_n++; - if (likely(intr_status & DMA_CHAN_STATUS_RI)) { - x->rx_normal_irq_n++; - ret |= handle_rx; - } - if (likely(intr_status & (DMA_CHAN_STATUS_TI | - DMA_CHAN_STATUS_TBU))) { - x->tx_normal_irq_n++; - ret |= handle_tx; - } - if (unlikely(intr_status & DMA_CHAN_STATUS_ERI)) - x->rx_early_irq++; + if (likely(intr_status & DMA_CHAN_STATUS_RI)) { + x->rx_normal_irq_n++; + ret |= handle_rx; + } + if (likely(intr_status & (DMA_CHAN_STATUS_TI | + DMA_CHAN_STATUS_TBU))) { + x->tx_normal_irq_n++; + ret |= handle_tx; } + if (unlikely(intr_status & DMA_CHAN_STATUS_ERI)) + x->rx_early_irq++; writel(intr_status & intr_en, ioaddr + DMA_CHAN_STATUS(chan)); return ret; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c index 8f7ac24545ef..d8c6ff725237 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c @@ -192,6 +192,7 @@ int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp) /* 1. Enable Safety Features */ value = readl(ioaddr + MTL_ECC_CONTROL); + value |= MEEAO; /* MTL ECC Error Addr Status Override */ value |= TSOEE; /* TSO ECC */ value |= MRXPEE; /* MTL RX Parser ECC */ value |= MESTEE; /* MTL EST ECC */ @@ -595,9 +596,95 @@ int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg, ctrl &= ~EEST; writel(ctrl, ioaddr + MTL_EST_CONTROL); + + /* Configure EST interrupt */ + if (cfg->enable) + ctrl = (IECGCE | IEHS | IEHF | IEBE | IECC); + else + ctrl = 0; + + writel(ctrl, ioaddr + MTL_EST_INT_EN); + return 0; } +void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev, + struct stmmac_extra_stats *x, u32 txqcnt) +{ + u32 status, value, feqn, hbfq, hbfs, btrl; + u32 txqcnt_mask = (1 << txqcnt) - 1; + + status = readl(ioaddr + MTL_EST_STATUS); + + value = (CGCE | HLBS | HLBF | BTRE | SWLC); + + /* Return if there is no error */ + if (!(status & value)) + return; + + if (status & CGCE) { + /* Clear Interrupt */ + writel(CGCE, ioaddr + MTL_EST_STATUS); + + x->mtl_est_cgce++; + } + + if (status & HLBS) { + value = readl(ioaddr + MTL_EST_SCH_ERR); + value &= txqcnt_mask; + + x->mtl_est_hlbs++; + + /* Clear Interrupt */ + writel(value, ioaddr + MTL_EST_SCH_ERR); + + /* Collecting info to shows all the queues that has HLBS + * issue. The only way to clear this is to clear the + * statistic + */ + if (net_ratelimit()) + netdev_err(dev, "EST: HLB(sched) Queue 0x%x\n", value); + } + + if (status & HLBF) { + value = readl(ioaddr + MTL_EST_FRM_SZ_ERR); + feqn = value & txqcnt_mask; + + value = readl(ioaddr + MTL_EST_FRM_SZ_CAP); + hbfq = (value & SZ_CAP_HBFQ_MASK(txqcnt)) >> SZ_CAP_HBFQ_SHIFT; + hbfs = value & SZ_CAP_HBFS_MASK; + + x->mtl_est_hlbf++; + + /* Clear Interrupt */ + writel(feqn, ioaddr + MTL_EST_FRM_SZ_ERR); + + if (net_ratelimit()) + netdev_err(dev, "EST: HLB(size) Queue %u Size %u\n", + hbfq, hbfs); + } + + if (status & BTRE) { + if ((status & BTRL) == BTRL_MAX) + x->mtl_est_btrlm++; + else + x->mtl_est_btre++; + + btrl = (status & BTRL) >> BTRL_SHIFT; + + if (net_ratelimit()) + netdev_info(dev, "EST: BTR Error Loop Count %u\n", + btrl); + + writel(BTRE, ioaddr + MTL_EST_STATUS); + } + + if (status & SWLC) { + writel(SWLC, ioaddr + MTL_EST_STATUS); + netdev_info(dev, "EST: SWOL has been switched\n"); + } +} + void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq, bool enable) { @@ -621,3 +708,52 @@ void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq, value |= EFPE; writel(value, ioaddr + MAC_FPE_CTRL_STS); } + +int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev) +{ + u32 value; + int status; + + status = FPE_EVENT_UNKNOWN; + + value = readl(ioaddr + MAC_FPE_CTRL_STS); + + if (value & TRSP) { + status |= FPE_EVENT_TRSP; + netdev_info(dev, "FPE: Respond mPacket is transmitted\n"); + } + + if (value & TVER) { + status |= FPE_EVENT_TVER; + netdev_info(dev, "FPE: Verify mPacket is transmitted\n"); + } + + if (value & RRSP) { + status |= FPE_EVENT_RRSP; + netdev_info(dev, "FPE: Respond mPacket is received\n"); + } + + if (value & RVER) { + status |= FPE_EVENT_RVER; + netdev_info(dev, "FPE: Verify mPacket is received\n"); + } + + return status; +} + +void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, enum stmmac_mpacket_type type) +{ + u32 value; + + value = readl(ioaddr + MAC_FPE_CTRL_STS); + + if (type == MPACKET_VERIFY) { + value &= ~SRSP; + value |= SVER; + } else { + value &= ~SVER; + value |= SRSP; + } + + writel(value, ioaddr + MAC_FPE_CTRL_STS); +} diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h index 56b0762c1276..6b2fd37b29ad 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h @@ -12,6 +12,12 @@ #define TMOUTEN BIT(0) #define MAC_FPE_CTRL_STS 0x00000234 +#define TRSP BIT(19) +#define TVER BIT(18) +#define RRSP BIT(17) +#define RVER BIT(16) +#define SRSP BIT(2) +#define SVER BIT(1) #define EFPE BIT(0) #define MAC_PPS_CONTROL 0x00000b70 @@ -38,6 +44,36 @@ #define PTOV_SHIFT 24 #define SSWL BIT(1) #define EEST BIT(0) + +#define MTL_EST_STATUS 0x00000c58 +#define BTRL GENMASK(11, 8) +#define BTRL_SHIFT 8 +#define BTRL_MAX (0xF << BTRL_SHIFT) +#define SWOL BIT(7) +#define SWOL_SHIFT 7 +#define CGCE BIT(4) +#define HLBS BIT(3) +#define HLBF BIT(2) +#define BTRE BIT(1) +#define SWLC BIT(0) + +#define MTL_EST_SCH_ERR 0x00000c60 +#define MTL_EST_FRM_SZ_ERR 0x00000c64 +#define MTL_EST_FRM_SZ_CAP 0x00000c68 +#define SZ_CAP_HBFS_MASK GENMASK(14, 0) +#define SZ_CAP_HBFQ_SHIFT 16 +#define SZ_CAP_HBFQ_MASK(_val) ({ typeof(_val) (val) = (_val); \ + ((val) > 4 ? GENMASK(18, 16) : \ + (val) > 2 ? GENMASK(17, 16) : \ + BIT(16)); }) + +#define MTL_EST_INT_EN 0x00000c70 +#define IECGCE CGCE +#define IEHS HLBS +#define IEHF HLBF +#define IEBE BTRE +#define IECC SWLC + #define MTL_EST_GCL_CONTROL 0x00000c80 #define BTR_LOW 0x0 #define BTR_HIGH 0x1 @@ -62,6 +98,7 @@ #define ADDR GENMASK(15, 0) #define MTL_RXP_IACC_DATA 0x00000cb4 #define MTL_ECC_CONTROL 0x00000cc0 +#define MEEAO BIT(8) #define TSOEE BIT(4) #define MRXPEE BIT(3) #define MESTEE BIT(2) @@ -98,6 +135,8 @@ #define GMAC_RXQCTRL_VFFQ_SHIFT 17 #define GMAC_RXQCTRL_VFFQE BIT(16) +#define GMAC_INT_FPE_EN BIT(17) + int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp); int dwmac5_safety_feat_irq_status(struct net_device *ndev, void __iomem *ioaddr, unsigned int asp, @@ -111,7 +150,12 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, u32 sub_second_inc, u32 systime_flags); int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg, unsigned int ptp_rate); +void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev, + struct stmmac_extra_stats *x, u32 txqcnt); void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq, bool enable); +void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, + enum stmmac_mpacket_type type); +int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev); #endif /* __DWMAC5_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index e5dbd0bc257e..1914ad698cab 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -128,6 +128,26 @@ #define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ #define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ +#define DMA_STATUS_MSK_COMMON (DMA_STATUS_NIS | \ + DMA_STATUS_AIS | \ + DMA_STATUS_FBI) + +#define DMA_STATUS_MSK_RX (DMA_STATUS_ERI | \ + DMA_STATUS_RWT | \ + DMA_STATUS_RPS | \ + DMA_STATUS_RU | \ + DMA_STATUS_RI | \ + DMA_STATUS_OVF | \ + DMA_STATUS_MSK_COMMON) + +#define DMA_STATUS_MSK_TX (DMA_STATUS_ETI | \ + DMA_STATUS_UNF | \ + DMA_STATUS_TJT | \ + DMA_STATUS_TU | \ + DMA_STATUS_TPS | \ + DMA_STATUS_TI | \ + DMA_STATUS_MSK_COMMON) + #define NUM_DWMAC100_DMA_REGS 9 #define NUM_DWMAC1000_DMA_REGS 23 @@ -139,7 +159,7 @@ void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan); void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan); void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan); int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x, - u32 chan); + u32 chan, u32 dir); int dwmac_dma_reset(void __iomem *ioaddr); #endif /* __DWMAC_DMA_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index 57a53a600aa5..d1c31200bb91 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -155,7 +155,7 @@ static void show_rx_process_state(unsigned int status) #endif int dwmac_dma_interrupt(void __iomem *ioaddr, - struct stmmac_extra_stats *x, u32 chan) + struct stmmac_extra_stats *x, u32 chan, u32 dir) { int ret = 0; /* read the status register (CSR5) */ @@ -167,6 +167,12 @@ int dwmac_dma_interrupt(void __iomem *ioaddr, show_tx_process_state(intr_status); show_rx_process_state(intr_status); #endif + + if (dir == DMA_DIR_RX) + intr_status &= DMA_STATUS_MSK_RX; + else if (dir == DMA_DIR_TX) + intr_status &= DMA_STATUS_MSK_TX; + /* ABNORMAL interrupts */ if (unlikely(intr_status & DMA_STATUS_AIS)) { if (unlikely(intr_status & DMA_STATUS_UNF)) { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index 6c3b8a950f58..1913385df685 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -412,6 +412,12 @@ #define XGMAC_TI BIT(0) #define XGMAC_REGSIZE ((0x0000317c + (0x80 * 15)) / 4) +#define XGMAC_DMA_STATUS_MSK_COMMON (XGMAC_NIS | XGMAC_AIS | XGMAC_FBE) +#define XGMAC_DMA_STATUS_MSK_RX (XGMAC_RBU | XGMAC_RI | \ + XGMAC_DMA_STATUS_MSK_COMMON) +#define XGMAC_DMA_STATUS_MSK_TX (XGMAC_TBU | XGMAC_TPS | XGMAC_TI | \ + XGMAC_DMA_STATUS_MSK_COMMON) + /* Descriptors */ #define XGMAC_TDES0_LTV BIT(31) #define XGMAC_TDES0_LT GENMASK(7, 0) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 77308c5c5d29..906e985441a9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -323,12 +323,18 @@ static void dwxgmac2_dma_stop_rx(void __iomem *ioaddr, u32 chan) } static int dwxgmac2_dma_interrupt(void __iomem *ioaddr, - struct stmmac_extra_stats *x, u32 chan) + struct stmmac_extra_stats *x, u32 chan, + u32 dir) { u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan)); u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); int ret = 0; + if (dir == DMA_DIR_RX) + intr_status &= XGMAC_DMA_STATUS_MSK_RX; + else if (dir == DMA_DIR_TX) + intr_status &= XGMAC_DMA_STATUS_MSK_TX; + /* ABNORMAL interrupts */ if (unlikely(intr_status & XGMAC_AIS)) { if (unlikely(intr_status & XGMAC_RBU)) { @@ -441,12 +447,9 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr, dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3; } -static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 nchan) +static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 queue) { - u32 i; - - for (i = 0; i < nchan; i++) - writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(i)); + writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(queue)); } static void dwxgmac2_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan) diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 979ac9fca23c..2cc91759b91f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -201,12 +201,12 @@ struct stmmac_dma_ops { void (*start_rx)(void __iomem *ioaddr, u32 chan); void (*stop_rx)(void __iomem *ioaddr, u32 chan); int (*dma_interrupt) (void __iomem *ioaddr, - struct stmmac_extra_stats *x, u32 chan); + struct stmmac_extra_stats *x, u32 chan, u32 dir); /* If supported then get the optional core features */ void (*get_hw_feature)(void __iomem *ioaddr, struct dma_features *dma_cap); /* Program the HW RX Watchdog */ - void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan); + void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 queue); void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); @@ -280,7 +280,6 @@ struct stmmac_dma_ops { struct mac_device_info; struct net_device; struct rgmii_adv; -struct stmmac_safety_stats; struct stmmac_tc_entry; struct stmmac_pps_cfg; struct stmmac_rss; @@ -393,8 +392,13 @@ struct stmmac_ops { void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr); int (*est_configure)(void __iomem *ioaddr, struct stmmac_est *cfg, unsigned int ptp_rate); + void (*est_irq_status)(void __iomem *ioaddr, struct net_device *dev, + struct stmmac_extra_stats *x, u32 txqcnt); void (*fpe_configure)(void __iomem *ioaddr, u32 num_txq, u32 num_rxq, bool enable); + void (*fpe_send_mpacket)(void __iomem *ioaddr, + enum stmmac_mpacket_type type); + int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev); }; #define stmmac_core_init(__priv, __args...) \ @@ -491,8 +495,16 @@ struct stmmac_ops { stmmac_do_void_callback(__priv, mac, set_arp_offload, __args) #define stmmac_est_configure(__priv, __args...) \ stmmac_do_callback(__priv, mac, est_configure, __args) +#define stmmac_est_irq_status(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, est_irq_status, __args) #define stmmac_fpe_configure(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, fpe_configure, __args) +#define stmmac_fpe_send_mpacket(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, fpe_send_mpacket, __args) +#define stmmac_fpe_irq_status(__priv, __args...) \ + stmmac_do_callback(__priv, mac, fpe_irq_status, __args) + +struct stmmac_priv; /* PTP and HW Timer helpers */ struct stmmac_hwtimestamp { @@ -504,6 +516,8 @@ struct stmmac_hwtimestamp { int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec, int add_sub, int gmac4); void (*get_systime) (void __iomem *ioaddr, u64 *systime); + void (*get_ptptime)(void __iomem *ioaddr, u64 *ptp_time); + void (*timestamp_interrupt)(struct stmmac_priv *priv); }; #define stmmac_config_hw_tstamping(__priv, __args...) \ @@ -518,6 +532,10 @@ struct stmmac_hwtimestamp { stmmac_do_callback(__priv, ptp, adjust_systime, __args) #define stmmac_get_systime(__priv, __args...) \ stmmac_do_void_callback(__priv, ptp, get_systime, __args) +#define stmmac_get_ptptime(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, get_ptptime, __args) +#define stmmac_timestamp_interrupt(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, timestamp_interrupt, __args) /* Helpers to manage the descriptors for chain and ring modes */ struct stmmac_mode_ops { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index e553b9a1f785..b6cd43eda7ac 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -26,10 +26,21 @@ struct stmmac_resources { void __iomem *addr; - const char *mac; + u8 mac[ETH_ALEN]; int wol_irq; int lpi_irq; int irq; + int sfty_ce_irq; + int sfty_ue_irq; + int rx_irq[MTL_MAX_RX_QUEUES]; + int tx_irq[MTL_MAX_TX_QUEUES]; +}; + +enum stmmac_txbuf_type { + STMMAC_TXBUF_T_SKB, + STMMAC_TXBUF_T_XDP_TX, + STMMAC_TXBUF_T_XDP_NDO, + STMMAC_TXBUF_T_XSK_TX, }; struct stmmac_tx_info { @@ -38,6 +49,7 @@ struct stmmac_tx_info { unsigned len; bool last_segment; bool is_jumbo; + enum stmmac_txbuf_type buf_type; }; #define STMMAC_TBS_AVAIL BIT(0) @@ -53,8 +65,13 @@ struct stmmac_tx_queue { struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; struct dma_edesc *dma_entx; struct dma_desc *dma_tx; - struct sk_buff **tx_skbuff; + union { + struct sk_buff **tx_skbuff; + struct xdp_frame **xdpf; + }; struct stmmac_tx_info *tx_skbuff_dma; + struct xsk_buff_pool *xsk_pool; + u32 xsk_frames_done; unsigned int cur_tx; unsigned int dirty_tx; dma_addr_t dma_tx_phy; @@ -63,15 +80,23 @@ struct stmmac_tx_queue { }; struct stmmac_rx_buffer { - struct page *page; + union { + struct { + struct page *page; + dma_addr_t addr; + __u32 page_offset; + }; + struct xdp_buff *xdp; + }; struct page *sec_page; - dma_addr_t addr; dma_addr_t sec_addr; }; struct stmmac_rx_queue { u32 rx_count_frames; u32 queue_index; + struct xdp_rxq_info xdp_rxq; + struct xsk_buff_pool *xsk_pool; struct page_pool *page_pool; struct stmmac_rx_buffer *buf_pool; struct stmmac_priv *priv_data; @@ -79,6 +104,7 @@ struct stmmac_rx_queue { struct dma_desc *dma_rx ____cacheline_aligned_in_smp; unsigned int cur_rx; unsigned int dirty_rx; + unsigned int buf_alloc_num; u32 rx_zeroc_thresh; dma_addr_t dma_rx_phy; u32 rx_tail_addr; @@ -93,6 +119,7 @@ struct stmmac_rx_queue { struct stmmac_channel { struct napi_struct rx_napi ____cacheline_aligned_in_smp; struct napi_struct tx_napi ____cacheline_aligned_in_smp; + struct napi_struct rxtx_napi ____cacheline_aligned_in_smp; struct stmmac_priv *priv_data; spinlock_t lock; u32 index; @@ -147,20 +174,21 @@ struct stmmac_flow_entry { struct stmmac_priv { /* Frequently used values are kept adjacent for cache effect */ - u32 tx_coal_frames; - u32 tx_coal_timer; - u32 rx_coal_frames; + u32 tx_coal_frames[MTL_MAX_TX_QUEUES]; + u32 tx_coal_timer[MTL_MAX_TX_QUEUES]; + u32 rx_coal_frames[MTL_MAX_TX_QUEUES]; int tx_coalesce; int hwts_tx_en; bool tx_path_in_lpi_mode; bool tso; int sph; + int sph_cap; u32 sarc_type; unsigned int dma_buf_sz; unsigned int rx_copybreak; - u32 rx_riwt; + u32 rx_riwt[MTL_MAX_TX_QUEUES]; int hwts_rx_en; void __iomem *ioaddr; @@ -222,9 +250,24 @@ struct stmmac_priv { int use_riwt; int irq_wake; spinlock_t ptp_lock; + /* Protects auxiliary snapshot registers from concurrent access. */ + struct mutex aux_ts_lock; + void __iomem *mmcaddr; void __iomem *ptpaddr; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + int sfty_ce_irq; + int sfty_ue_irq; + int rx_irq[MTL_MAX_RX_QUEUES]; + int tx_irq[MTL_MAX_TX_QUEUES]; + /*irq name */ + char int_name_mac[IFNAMSIZ + 9]; + char int_name_wol[IFNAMSIZ + 9]; + char int_name_lpi[IFNAMSIZ + 9]; + char int_name_sfty_ce[IFNAMSIZ + 10]; + char int_name_sfty_ue[IFNAMSIZ + 10]; + char int_name_rx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 14]; + char int_name_tx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 18]; #ifdef CONFIG_DEBUG_FS struct dentry *dbgfs_dir; @@ -234,6 +277,12 @@ struct stmmac_priv { struct workqueue_struct *wq; struct work_struct service_task; + /* Workqueue for handling FPE hand-shaking */ + unsigned long fpe_task_state; + struct workqueue_struct *fpe_wq; + struct work_struct fpe_task; + char wq_name[IFNAMSIZ + 4]; + /* TC Handling */ unsigned int tc_entries_max; unsigned int tc_off_max; @@ -246,6 +295,10 @@ struct stmmac_priv { /* Receive Side Scaling */ struct stmmac_rss rss; + + /* XDP BPF Program */ + unsigned long *af_xdp_zc_qps; + struct bpf_prog *xdp_prog; }; enum stmmac_state { @@ -262,6 +315,8 @@ void stmmac_set_ethtool_ops(struct net_device *netdev); void stmmac_ptp_register(struct stmmac_priv *priv); void stmmac_ptp_unregister(struct stmmac_priv *priv); +int stmmac_open(struct net_device *dev); +int stmmac_release(struct net_device *dev); int stmmac_resume(struct device *dev); int stmmac_suspend(struct device *dev); int stmmac_dvr_remove(struct device *dev); @@ -272,6 +327,27 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv); bool stmmac_eee_init(struct stmmac_priv *priv); int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt); int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size); +int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled); +void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable); + +static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv) +{ + return !!priv->xdp_prog; +} + +static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv) +{ + if (stmmac_xdp_is_enabled(priv)) + return XDP_PACKET_HEADROOM; + + return 0; +} + +void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue); +void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue); +void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue); +void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue); +int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags); #if IS_ENABLED(CONFIG_STMMAC_SELFTESTS) void stmmac_selftest_run(struct net_device *dev, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index c5642985ef95..61b11639ee0c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -158,6 +158,12 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { /* TSO */ STMMAC_STAT(tx_tso_frames), STMMAC_STAT(tx_tso_nfrags), + /* EST */ + STMMAC_STAT(mtl_est_cgce), + STMMAC_STAT(mtl_est_hlbs), + STMMAC_STAT(mtl_est_hlbf), + STMMAC_STAT(mtl_est_btre), + STMMAC_STAT(mtl_est_btrlm), }; #define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) @@ -756,28 +762,75 @@ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv) return (riwt * 256) / (clk / 1000000); } -static int stmmac_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) +static int __stmmac_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + int queue) { struct stmmac_priv *priv = netdev_priv(dev); + u32 max_cnt; + u32 rx_cnt; + u32 tx_cnt; - ec->tx_coalesce_usecs = priv->tx_coal_timer; - ec->tx_max_coalesced_frames = priv->tx_coal_frames; + rx_cnt = priv->plat->rx_queues_to_use; + tx_cnt = priv->plat->tx_queues_to_use; + max_cnt = max(rx_cnt, tx_cnt); - if (priv->use_riwt) { - ec->rx_max_coalesced_frames = priv->rx_coal_frames; - ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt, priv); + if (queue < 0) + queue = 0; + else if (queue >= max_cnt) + return -EINVAL; + + if (queue < tx_cnt) { + ec->tx_coalesce_usecs = priv->tx_coal_timer[queue]; + ec->tx_max_coalesced_frames = priv->tx_coal_frames[queue]; + } else { + ec->tx_coalesce_usecs = 0; + ec->tx_max_coalesced_frames = 0; + } + + if (priv->use_riwt && queue < rx_cnt) { + ec->rx_max_coalesced_frames = priv->rx_coal_frames[queue]; + ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt[queue], + priv); + } else { + ec->rx_max_coalesced_frames = 0; + ec->rx_coalesce_usecs = 0; } return 0; } -static int stmmac_set_coalesce(struct net_device *dev, +static int stmmac_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { + return __stmmac_get_coalesce(dev, ec, -1); +} + +static int stmmac_get_per_queue_coalesce(struct net_device *dev, u32 queue, + struct ethtool_coalesce *ec) +{ + return __stmmac_get_coalesce(dev, ec, queue); +} + +static int __stmmac_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + int queue) +{ struct stmmac_priv *priv = netdev_priv(dev); - u32 rx_cnt = priv->plat->rx_queues_to_use; + bool all_queues = false; unsigned int rx_riwt; + u32 max_cnt; + u32 rx_cnt; + u32 tx_cnt; + + rx_cnt = priv->plat->rx_queues_to_use; + tx_cnt = priv->plat->tx_queues_to_use; + max_cnt = max(rx_cnt, tx_cnt); + + if (queue < 0) + all_queues = true; + else if (queue >= max_cnt) + return -EINVAL; if (priv->use_riwt && (ec->rx_coalesce_usecs > 0)) { rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv); @@ -785,8 +838,23 @@ static int stmmac_set_coalesce(struct net_device *dev, if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT)) return -EINVAL; - priv->rx_riwt = rx_riwt; - stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt); + if (all_queues) { + int i; + + for (i = 0; i < rx_cnt; i++) { + priv->rx_riwt[i] = rx_riwt; + stmmac_rx_watchdog(priv, priv->ioaddr, + rx_riwt, i); + priv->rx_coal_frames[i] = + ec->rx_max_coalesced_frames; + } + } else if (queue < rx_cnt) { + priv->rx_riwt[queue] = rx_riwt; + stmmac_rx_watchdog(priv, priv->ioaddr, + rx_riwt, queue); + priv->rx_coal_frames[queue] = + ec->rx_max_coalesced_frames; + } } if ((ec->tx_coalesce_usecs == 0) && @@ -797,13 +865,37 @@ static int stmmac_set_coalesce(struct net_device *dev, (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES)) return -EINVAL; - /* Only copy relevant parameters, ignore all others. */ - priv->tx_coal_frames = ec->tx_max_coalesced_frames; - priv->tx_coal_timer = ec->tx_coalesce_usecs; - priv->rx_coal_frames = ec->rx_max_coalesced_frames; + if (all_queues) { + int i; + + for (i = 0; i < tx_cnt; i++) { + priv->tx_coal_frames[i] = + ec->tx_max_coalesced_frames; + priv->tx_coal_timer[i] = + ec->tx_coalesce_usecs; + } + } else if (queue < tx_cnt) { + priv->tx_coal_frames[queue] = + ec->tx_max_coalesced_frames; + priv->tx_coal_timer[queue] = + ec->tx_coalesce_usecs; + } + return 0; } +static int stmmac_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + return __stmmac_set_coalesce(dev, ec, -1); +} + +static int stmmac_set_per_queue_coalesce(struct net_device *dev, u32 queue, + struct ethtool_coalesce *ec) +{ + return __stmmac_set_coalesce(dev, ec, queue); +} + static int stmmac_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *rxnfc, u32 *rule_locs) { @@ -1001,6 +1093,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = { .get_ts_info = stmmac_get_ts_info, .get_coalesce = stmmac_get_coalesce, .set_coalesce = stmmac_set_coalesce, + .get_per_queue_coalesce = stmmac_get_per_queue_coalesce, + .set_per_queue_coalesce = stmmac_set_per_queue_coalesce, .get_channels = stmmac_get_channels, .set_channels = stmmac_set_channels, .get_tunable = stmmac_get_tunable, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index d291612eeafb..074e2cdfb0fa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -12,8 +12,11 @@ #include <linux/io.h> #include <linux/iopoll.h> #include <linux/delay.h> +#include <linux/ptp_clock_kernel.h> #include "common.h" #include "stmmac_ptp.h" +#include "dwmac4.h" +#include "stmmac.h" static void config_hw_tstamping(void __iomem *ioaddr, u32 data) { @@ -153,6 +156,51 @@ static void get_systime(void __iomem *ioaddr, u64 *systime) *systime = ns; } +static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time) +{ + u64 ns; + + ns = readl(ptpaddr + PTP_ATNR); + ns += readl(ptpaddr + PTP_ATSR) * NSEC_PER_SEC; + + *ptp_time = ns; +} + +static void timestamp_interrupt(struct stmmac_priv *priv) +{ + u32 num_snapshot, ts_status, tsync_int; + struct ptp_clock_event event; + unsigned long flags; + u64 ptp_time; + int i; + + tsync_int = readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE; + + if (!tsync_int) + return; + + /* Read timestamp status to clear interrupt from either external + * timestamp or start/end of PPS. + */ + ts_status = readl(priv->ioaddr + GMAC_TIMESTAMP_STATUS); + + if (!priv->plat->ext_snapshot_en) + return; + + num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >> + GMAC_TIMESTAMP_ATSNS_SHIFT; + + for (i = 0; i < num_snapshot; i++) { + spin_lock_irqsave(&priv->ptp_lock, flags); + get_ptptime(priv->ptpaddr, &ptp_time); + spin_unlock_irqrestore(&priv->ptp_lock, flags); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = ptp_time; + ptp_clock_event(priv->ptp_clock, &event); + } +} + const struct stmmac_hwtimestamp stmmac_ptp = { .config_hw_tstamping = config_hw_tstamping, .init_systime = init_systime, @@ -160,4 +208,6 @@ const struct stmmac_hwtimestamp stmmac_ptp = { .config_addend = config_addend, .adjust_systime = adjust_systime, .get_systime = get_systime, + .get_ptptime = get_ptptime, + .timestamp_interrupt = timestamp_interrupt, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 4749bd0af160..a9a984c57d78 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -28,6 +28,7 @@ #include <linux/if_vlan.h> #include <linux/dma-mapping.h> #include <linux/slab.h> +#include <linux/pm_runtime.h> #include <linux/prefetch.h> #include <linux/pinctrl/consumer.h> #ifdef CONFIG_DEBUG_FS @@ -37,9 +38,12 @@ #include <linux/net_tstamp.h> #include <linux/phylink.h> #include <linux/udp.h> +#include <linux/bpf_trace.h> #include <net/pkt_cls.h> +#include <net/xdp_sock_drv.h> #include "stmmac_ptp.h" #include "stmmac.h" +#include "stmmac_xdp.h" #include <linux/reset.h> #include <linux/of_mdio.h> #include "dwmac1000.h" @@ -66,6 +70,16 @@ MODULE_PARM_DESC(phyaddr, "Physical device address"); #define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4) #define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4) +/* Limit to make sure XDP TX and slow path can coexist */ +#define STMMAC_XSK_TX_BUDGET_MAX 256 +#define STMMAC_TX_XSK_AVAIL 16 +#define STMMAC_RX_FILL_BATCH 16 + +#define STMMAC_XDP_PASS 0 +#define STMMAC_XDP_CONSUMED BIT(0) +#define STMMAC_XDP_TX BIT(1) +#define STMMAC_XDP_REDIRECT BIT(2) + static int flow_ctrl = FLOW_AUTO; module_param(flow_ctrl, int, 0644); MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); @@ -104,6 +118,13 @@ module_param(chain_mode, int, 0444); MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); static irqreturn_t stmmac_interrupt(int irq, void *dev_id); +/* For MSI interrupts handling */ +static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id); +static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id); +static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); +static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); +static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue); +static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue); #ifdef CONFIG_DEBUG_FS static const struct net_device_ops stmmac_netdev_ops; @@ -113,6 +134,38 @@ static void stmmac_exit_fs(struct net_device *dev); #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC)) +int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) +{ + int ret = 0; + + if (enabled) { + ret = clk_prepare_enable(priv->plat->stmmac_clk); + if (ret) + return ret; + ret = clk_prepare_enable(priv->plat->pclk); + if (ret) { + clk_disable_unprepare(priv->plat->stmmac_clk); + return ret; + } + if (priv->plat->clks_config) { + ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled); + if (ret) { + clk_disable_unprepare(priv->plat->stmmac_clk); + clk_disable_unprepare(priv->plat->pclk); + return ret; + } + } + } else { + clk_disable_unprepare(priv->plat->stmmac_clk); + clk_disable_unprepare(priv->plat->pclk); + if (priv->plat->clks_config) + priv->plat->clks_config(priv->plat->bsp_priv, enabled); + } + + return ret; +} +EXPORT_SYMBOL_GPL(stmmac_bus_clks_config); + /** * stmmac_verify_args - verify the driver parameters. * Description: it checks the driver parameters and set a default in case of @@ -134,11 +187,7 @@ static void stmmac_verify_args(void) eee_timer = STMMAC_DEFAULT_LPI_TIMER; } -/** - * stmmac_disable_all_queues - Disable all queues - * @priv: driver private structure - */ -static void stmmac_disable_all_queues(struct stmmac_priv *priv) +static void __stmmac_disable_all_queues(struct stmmac_priv *priv) { u32 rx_queues_cnt = priv->plat->rx_queues_to_use; u32 tx_queues_cnt = priv->plat->tx_queues_to_use; @@ -148,6 +197,12 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv) for (queue = 0; queue < maxq; queue++) { struct stmmac_channel *ch = &priv->channel[queue]; + if (stmmac_xdp_is_enabled(priv) && + test_bit(queue, priv->af_xdp_zc_qps)) { + napi_disable(&ch->rxtx_napi); + continue; + } + if (queue < rx_queues_cnt) napi_disable(&ch->rx_napi); if (queue < tx_queues_cnt) @@ -156,6 +211,28 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv) } /** + * stmmac_disable_all_queues - Disable all queues + * @priv: driver private structure + */ +static void stmmac_disable_all_queues(struct stmmac_priv *priv) +{ + u32 rx_queues_cnt = priv->plat->rx_queues_to_use; + struct stmmac_rx_queue *rx_q; + u32 queue; + + /* synchronize_rcu() needed for pending XDP buffers to drain */ + for (queue = 0; queue < rx_queues_cnt; queue++) { + rx_q = &priv->rx_queue[queue]; + if (rx_q->xsk_pool) { + synchronize_rcu(); + break; + } + } + + __stmmac_disable_all_queues(priv); +} + +/** * stmmac_enable_all_queues - Enable all queues * @priv: driver private structure */ @@ -169,6 +246,12 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv) for (queue = 0; queue < maxq; queue++) { struct stmmac_channel *ch = &priv->channel[queue]; + if (stmmac_xdp_is_enabled(priv) && + test_bit(queue, priv->af_xdp_zc_qps)) { + napi_enable(&ch->rxtx_napi); + continue; + } + if (queue < rx_queues_cnt) napi_enable(&ch->rx_napi); if (queue < tx_queues_cnt) @@ -433,6 +516,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, { struct skb_shared_hwtstamps shhwtstamp; bool found = false; + s64 adjust = 0; u64 ns = 0; if (!priv->hwts_tx_en) @@ -451,6 +535,13 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, } if (found) { + /* Correct the clk domain crossing(CDC) error */ + if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) { + adjust += -(2 * (NSEC_PER_SEC / + priv->plat->clk_ptp_rate)); + ns += adjust; + } + memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamp.hwtstamp = ns_to_ktime(ns); @@ -474,6 +565,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, { struct skb_shared_hwtstamps *shhwtstamp = NULL; struct dma_desc *desc = p; + u64 adjust = 0; u64 ns = 0; if (!priv->hwts_rx_en) @@ -485,6 +577,13 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, /* Check if timestamp is available */ if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); + + /* Correct the clk domain crossing(CDC) error */ + if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) { + adjust += 2 * (NSEC_PER_SEC / priv->plat->clk_ptp_rate); + ns -= adjust; + } + netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); shhwtstamp = skb_hwtstamps(skb); memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); @@ -922,6 +1021,21 @@ static void stmmac_mac_an_restart(struct phylink_config *config) /* Not Supported */ } +static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) +{ + struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; + enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; + enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; + bool *hs_enable = &fpe_cfg->hs_enable; + + if (is_up && *hs_enable) { + stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY); + } else { + *lo_state = FPE_EVENT_UNKNOWN; + *lp_state = FPE_EVENT_UNKNOWN; + } +} + static void stmmac_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { @@ -932,6 +1046,9 @@ static void stmmac_mac_link_down(struct phylink_config *config, priv->tx_lpi_enabled = false; stmmac_eee_init(priv); stmmac_set_eee_pls(priv, priv->hw, false); + + if (priv->dma_cap.fpesel) + stmmac_fpe_link_state_handle(priv, false); } static void stmmac_mac_link_up(struct phylink_config *config, @@ -1030,6 +1147,9 @@ static void stmmac_mac_link_up(struct phylink_config *config, priv->tx_lpi_enabled = priv->eee_enabled; stmmac_set_eee_pls(priv, priv->hw, true); } + + if (priv->dma_cap.fpesel) + stmmac_fpe_link_state_handle(priv, true); } static const struct phylink_mac_ops stmmac_phylink_mac_ops = { @@ -1117,6 +1237,8 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) priv->phylink_config.dev = &priv->dev->dev; priv->phylink_config.type = PHYLINK_NETDEV; priv->phylink_config.pcs_poll = true; + priv->phylink_config.ovr_an_inband = + priv->plat->mdio_bus_data->xpcs_an_inband; if (!fwnode) fwnode = dev_fwnode(priv->device); @@ -1304,11 +1426,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; - buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); - if (!buf->page) - return -ENOMEM; + if (!buf->page) { + buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); + if (!buf->page) + return -ENOMEM; + buf->page_offset = stmmac_rx_offset(priv); + } - if (priv->sph) { + if (priv->sph && !buf->sec_page) { buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); if (!buf->sec_page) return -ENOMEM; @@ -1320,7 +1445,8 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); } - buf->addr = page_pool_get_dma_addr(buf->page); + buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; + stmmac_set_desc_addr(priv, p, buf->addr); if (priv->dma_buf_sz == BUF_SIZE_16KiB) stmmac_init_desc3(priv, p); @@ -1358,7 +1484,8 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) { struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; - if (tx_q->tx_skbuff_dma[i].buf) { + if (tx_q->tx_skbuff_dma[i].buf && + tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { if (tx_q->tx_skbuff_dma[i].map_as_page) dma_unmap_page(priv->device, tx_q->tx_skbuff_dma[i].buf, @@ -1371,84 +1498,227 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) DMA_TO_DEVICE); } - if (tx_q->tx_skbuff[i]) { + if (tx_q->xdpf[i] && + (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX || + tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) { + xdp_return_frame(tx_q->xdpf[i]); + tx_q->xdpf[i] = NULL; + } + + if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX) + tx_q->xsk_frames_done++; + + if (tx_q->tx_skbuff[i] && + tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) { dev_kfree_skb_any(tx_q->tx_skbuff[i]); tx_q->tx_skbuff[i] = NULL; - tx_q->tx_skbuff_dma[i].buf = 0; - tx_q->tx_skbuff_dma[i].map_as_page = false; } + + tx_q->tx_skbuff_dma[i].buf = 0; + tx_q->tx_skbuff_dma[i].map_as_page = false; } /** - * init_dma_rx_desc_rings - init the RX descriptor rings - * @dev: net device structure + * dma_free_rx_skbufs - free RX dma buffers + * @priv: private structure + * @queue: RX queue index + */ +static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) +{ + int i; + + for (i = 0; i < priv->dma_rx_size; i++) + stmmac_free_rx_buffer(priv, queue, i); +} + +static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue, + gfp_t flags) +{ + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + int i; + + for (i = 0; i < priv->dma_rx_size; i++) { + struct dma_desc *p; + int ret; + + if (priv->extend_desc) + p = &((rx_q->dma_erx + i)->basic); + else + p = rx_q->dma_rx + i; + + ret = stmmac_init_rx_buffers(priv, p, i, flags, + queue); + if (ret) + return ret; + + rx_q->buf_alloc_num++; + } + + return 0; +} + +/** + * dma_free_rx_xskbufs - free RX dma buffers from XSK pool + * @priv: private structure + * @queue: RX queue index + */ +static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + int i; + + for (i = 0; i < priv->dma_rx_size; i++) { + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; + + if (!buf->xdp) + continue; + + xsk_buff_free(buf->xdp); + buf->xdp = NULL; + } +} + +static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + int i; + + for (i = 0; i < priv->dma_rx_size; i++) { + struct stmmac_rx_buffer *buf; + dma_addr_t dma_addr; + struct dma_desc *p; + + if (priv->extend_desc) + p = (struct dma_desc *)(rx_q->dma_erx + i); + else + p = rx_q->dma_rx + i; + + buf = &rx_q->buf_pool[i]; + + buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); + if (!buf->xdp) + return -ENOMEM; + + dma_addr = xsk_buff_xdp_get_dma(buf->xdp); + stmmac_set_desc_addr(priv, p, dma_addr); + rx_q->buf_alloc_num++; + } + + return 0; +} + +static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue) +{ + if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps)) + return NULL; + + return xsk_get_pool_from_qid(priv->dev, queue); +} + +/** + * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue) + * @priv: driver private structure + * @queue: RX queue index * @flags: gfp flag. * Description: this function initializes the DMA RX descriptors * and allocates the socket buffers. It supports the chained and ring * modes. */ -static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) +static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags) { - struct stmmac_priv *priv = netdev_priv(dev); - u32 rx_count = priv->plat->rx_queues_to_use; - int ret = -ENOMEM; - int queue; - int i; + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + int ret; - /* RX INITIALIZATION */ netif_dbg(priv, probe, priv->dev, - "SKB addresses:\nskb\t\tskb data\tdma data\n"); + "(%s) dma_rx_phy=0x%08x\n", __func__, + (u32)rx_q->dma_rx_phy); - for (queue = 0; queue < rx_count; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + stmmac_clear_rx_descriptors(priv, queue); - netif_dbg(priv, probe, priv->dev, - "(%s) dma_rx_phy=0x%08x\n", __func__, - (u32)rx_q->dma_rx_phy); + xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); - stmmac_clear_rx_descriptors(priv, queue); + rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); - for (i = 0; i < priv->dma_rx_size; i++) { - struct dma_desc *p; + if (rx_q->xsk_pool) { + WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, + MEM_TYPE_XSK_BUFF_POOL, + NULL)); + netdev_info(priv->dev, + "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n", + rx_q->queue_index); + xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); + } else { + WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, + MEM_TYPE_PAGE_POOL, + rx_q->page_pool)); + netdev_info(priv->dev, + "Register MEM_TYPE_PAGE_POOL RxQ-%d\n", + rx_q->queue_index); + } - if (priv->extend_desc) - p = &((rx_q->dma_erx + i)->basic); - else - p = rx_q->dma_rx + i; + if (rx_q->xsk_pool) { + /* RX XDP ZC buffer pool may not be populated, e.g. + * xdpsock TX-only. + */ + stmmac_alloc_rx_buffers_zc(priv, queue); + } else { + ret = stmmac_alloc_rx_buffers(priv, queue, flags); + if (ret < 0) + return -ENOMEM; + } - ret = stmmac_init_rx_buffers(priv, p, i, flags, - queue); - if (ret) - goto err_init_rx_buffers; - } + rx_q->cur_rx = 0; + rx_q->dirty_rx = 0; - rx_q->cur_rx = 0; - rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size); - - /* Setup the chained descriptor addresses */ - if (priv->mode == STMMAC_CHAIN_MODE) { - if (priv->extend_desc) - stmmac_mode_init(priv, rx_q->dma_erx, - rx_q->dma_rx_phy, - priv->dma_rx_size, 1); - else - stmmac_mode_init(priv, rx_q->dma_rx, - rx_q->dma_rx_phy, - priv->dma_rx_size, 0); - } + /* Setup the chained descriptor addresses */ + if (priv->mode == STMMAC_CHAIN_MODE) { + if (priv->extend_desc) + stmmac_mode_init(priv, rx_q->dma_erx, + rx_q->dma_rx_phy, + priv->dma_rx_size, 1); + else + stmmac_mode_init(priv, rx_q->dma_rx, + rx_q->dma_rx_phy, + priv->dma_rx_size, 0); + } + + return 0; +} + +static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 rx_count = priv->plat->rx_queues_to_use; + u32 queue; + int ret; + + /* RX INITIALIZATION */ + netif_dbg(priv, probe, priv->dev, + "SKB addresses:\nskb\t\tskb data\tdma data\n"); + + for (queue = 0; queue < rx_count; queue++) { + ret = __init_dma_rx_desc_rings(priv, queue, flags); + if (ret) + goto err_init_rx_buffers; } return 0; err_init_rx_buffers: while (queue >= 0) { - while (--i >= 0) - stmmac_free_rx_buffer(priv, queue, i); + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + + if (rx_q->xsk_pool) + dma_free_rx_xskbufs(priv, queue); + else + dma_free_rx_skbufs(priv, queue); + + rx_q->buf_alloc_num = 0; + rx_q->xsk_pool = NULL; if (queue == 0) break; - i = priv->dma_rx_size; queue--; } @@ -1456,63 +1726,75 @@ err_init_rx_buffers: } /** - * init_dma_tx_desc_rings - init the TX descriptor rings - * @dev: net device structure. + * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue) + * @priv: driver private structure + * @queue : TX queue index * Description: this function initializes the DMA TX descriptors * and allocates the socket buffers. It supports the chained and ring * modes. */ -static int init_dma_tx_desc_rings(struct net_device *dev) +static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue) { - struct stmmac_priv *priv = netdev_priv(dev); - u32 tx_queue_cnt = priv->plat->tx_queues_to_use; - u32 queue; + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; int i; - for (queue = 0; queue < tx_queue_cnt; queue++) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + netif_dbg(priv, probe, priv->dev, + "(%s) dma_tx_phy=0x%08x\n", __func__, + (u32)tx_q->dma_tx_phy); - netif_dbg(priv, probe, priv->dev, - "(%s) dma_tx_phy=0x%08x\n", __func__, - (u32)tx_q->dma_tx_phy); - - /* Setup the chained descriptor addresses */ - if (priv->mode == STMMAC_CHAIN_MODE) { - if (priv->extend_desc) - stmmac_mode_init(priv, tx_q->dma_etx, - tx_q->dma_tx_phy, - priv->dma_tx_size, 1); - else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) - stmmac_mode_init(priv, tx_q->dma_tx, - tx_q->dma_tx_phy, - priv->dma_tx_size, 0); - } + /* Setup the chained descriptor addresses */ + if (priv->mode == STMMAC_CHAIN_MODE) { + if (priv->extend_desc) + stmmac_mode_init(priv, tx_q->dma_etx, + tx_q->dma_tx_phy, + priv->dma_tx_size, 1); + else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) + stmmac_mode_init(priv, tx_q->dma_tx, + tx_q->dma_tx_phy, + priv->dma_tx_size, 0); + } - for (i = 0; i < priv->dma_tx_size; i++) { - struct dma_desc *p; - if (priv->extend_desc) - p = &((tx_q->dma_etx + i)->basic); - else if (tx_q->tbs & STMMAC_TBS_AVAIL) - p = &((tx_q->dma_entx + i)->basic); - else - p = tx_q->dma_tx + i; + tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); - stmmac_clear_desc(priv, p); + for (i = 0; i < priv->dma_tx_size; i++) { + struct dma_desc *p; - tx_q->tx_skbuff_dma[i].buf = 0; - tx_q->tx_skbuff_dma[i].map_as_page = false; - tx_q->tx_skbuff_dma[i].len = 0; - tx_q->tx_skbuff_dma[i].last_segment = false; - tx_q->tx_skbuff[i] = NULL; - } + if (priv->extend_desc) + p = &((tx_q->dma_etx + i)->basic); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + p = &((tx_q->dma_entx + i)->basic); + else + p = tx_q->dma_tx + i; - tx_q->dirty_tx = 0; - tx_q->cur_tx = 0; - tx_q->mss = 0; + stmmac_clear_desc(priv, p); - netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); + tx_q->tx_skbuff_dma[i].buf = 0; + tx_q->tx_skbuff_dma[i].map_as_page = false; + tx_q->tx_skbuff_dma[i].len = 0; + tx_q->tx_skbuff_dma[i].last_segment = false; + tx_q->tx_skbuff[i] = NULL; } + tx_q->dirty_tx = 0; + tx_q->cur_tx = 0; + tx_q->mss = 0; + + netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); + + return 0; +} + +static int init_dma_tx_desc_rings(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 tx_queue_cnt; + u32 queue; + + tx_queue_cnt = priv->plat->tx_queues_to_use; + + for (queue = 0; queue < tx_queue_cnt; queue++) + __init_dma_tx_desc_rings(priv, queue); + return 0; } @@ -1544,29 +1826,25 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) } /** - * dma_free_rx_skbufs - free RX dma buffers - * @priv: private structure - * @queue: RX queue index - */ -static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) -{ - int i; - - for (i = 0; i < priv->dma_rx_size; i++) - stmmac_free_rx_buffer(priv, queue, i); -} - -/** * dma_free_tx_skbufs - free TX dma buffers * @priv: private structure * @queue: TX queue index */ static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) { + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; int i; + tx_q->xsk_frames_done = 0; + for (i = 0; i < priv->dma_tx_size; i++) stmmac_free_tx_buffer(priv, queue, i); + + if (tx_q->xsk_pool && tx_q->xsk_frames_done) { + xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); + tx_q->xsk_frames_done = 0; + tx_q->xsk_pool = NULL; + } } /** @@ -1583,137 +1861,186 @@ static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) } /** - * free_dma_rx_desc_resources - free RX dma desc resources + * __free_dma_rx_desc_resources - free RX dma desc resources (per queue) * @priv: private structure + * @queue: RX queue index */ +static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + + /* Release the DMA RX socket buffers */ + if (rx_q->xsk_pool) + dma_free_rx_xskbufs(priv, queue); + else + dma_free_rx_skbufs(priv, queue); + + rx_q->buf_alloc_num = 0; + rx_q->xsk_pool = NULL; + + /* Free DMA regions of consistent memory previously allocated */ + if (!priv->extend_desc) + dma_free_coherent(priv->device, priv->dma_rx_size * + sizeof(struct dma_desc), + rx_q->dma_rx, rx_q->dma_rx_phy); + else + dma_free_coherent(priv->device, priv->dma_rx_size * + sizeof(struct dma_extended_desc), + rx_q->dma_erx, rx_q->dma_rx_phy); + + if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) + xdp_rxq_info_unreg(&rx_q->xdp_rxq); + + kfree(rx_q->buf_pool); + if (rx_q->page_pool) + page_pool_destroy(rx_q->page_pool); +} + static void free_dma_rx_desc_resources(struct stmmac_priv *priv) { u32 rx_count = priv->plat->rx_queues_to_use; u32 queue; /* Free RX queue resources */ - for (queue = 0; queue < rx_count; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; - - /* Release the DMA RX socket buffers */ - dma_free_rx_skbufs(priv, queue); - - /* Free DMA regions of consistent memory previously allocated */ - if (!priv->extend_desc) - dma_free_coherent(priv->device, priv->dma_rx_size * - sizeof(struct dma_desc), - rx_q->dma_rx, rx_q->dma_rx_phy); - else - dma_free_coherent(priv->device, priv->dma_rx_size * - sizeof(struct dma_extended_desc), - rx_q->dma_erx, rx_q->dma_rx_phy); - - kfree(rx_q->buf_pool); - if (rx_q->page_pool) - page_pool_destroy(rx_q->page_pool); - } + for (queue = 0; queue < rx_count; queue++) + __free_dma_rx_desc_resources(priv, queue); } /** - * free_dma_tx_desc_resources - free TX dma desc resources + * __free_dma_tx_desc_resources - free TX dma desc resources (per queue) * @priv: private structure + * @queue: TX queue index */ -static void free_dma_tx_desc_resources(struct stmmac_priv *priv) +static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) { - u32 tx_count = priv->plat->tx_queues_to_use; - u32 queue; + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + size_t size; + void *addr; - /* Free TX queue resources */ - for (queue = 0; queue < tx_count; queue++) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; - size_t size; - void *addr; + /* Release the DMA TX socket buffers */ + dma_free_tx_skbufs(priv, queue); + + if (priv->extend_desc) { + size = sizeof(struct dma_extended_desc); + addr = tx_q->dma_etx; + } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { + size = sizeof(struct dma_edesc); + addr = tx_q->dma_entx; + } else { + size = sizeof(struct dma_desc); + addr = tx_q->dma_tx; + } - /* Release the DMA TX socket buffers */ - dma_free_tx_skbufs(priv, queue); + size *= priv->dma_tx_size; - if (priv->extend_desc) { - size = sizeof(struct dma_extended_desc); - addr = tx_q->dma_etx; - } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { - size = sizeof(struct dma_edesc); - addr = tx_q->dma_entx; - } else { - size = sizeof(struct dma_desc); - addr = tx_q->dma_tx; - } + dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); - size *= priv->dma_tx_size; + kfree(tx_q->tx_skbuff_dma); + kfree(tx_q->tx_skbuff); +} - dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); +static void free_dma_tx_desc_resources(struct stmmac_priv *priv) +{ + u32 tx_count = priv->plat->tx_queues_to_use; + u32 queue; - kfree(tx_q->tx_skbuff_dma); - kfree(tx_q->tx_skbuff); - } + /* Free TX queue resources */ + for (queue = 0; queue < tx_count; queue++) + __free_dma_tx_desc_resources(priv, queue); } /** - * alloc_dma_rx_desc_resources - alloc RX resources. + * __alloc_dma_rx_desc_resources - alloc RX resources (per queue). * @priv: private structure + * @queue: RX queue index * Description: according to which descriptor can be used (extend or basic) * this function allocates the resources for TX and RX paths. In case of * reception, for example, it pre-allocated the RX socket buffer in order to * allow zero-copy mechanism. */ +static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_channel *ch = &priv->channel[queue]; + bool xdp_prog = stmmac_xdp_is_enabled(priv); + struct page_pool_params pp_params = { 0 }; + unsigned int num_pages; + unsigned int napi_id; + int ret; + + rx_q->queue_index = queue; + rx_q->priv_data = priv; + + pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; + pp_params.pool_size = priv->dma_rx_size; + num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE); + pp_params.order = ilog2(num_pages); + pp_params.nid = dev_to_node(priv->device); + pp_params.dev = priv->device; + pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; + pp_params.offset = stmmac_rx_offset(priv); + pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages); + + rx_q->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rx_q->page_pool)) { + ret = PTR_ERR(rx_q->page_pool); + rx_q->page_pool = NULL; + return ret; + } + + rx_q->buf_pool = kcalloc(priv->dma_rx_size, + sizeof(*rx_q->buf_pool), + GFP_KERNEL); + if (!rx_q->buf_pool) + return -ENOMEM; + + if (priv->extend_desc) { + rx_q->dma_erx = dma_alloc_coherent(priv->device, + priv->dma_rx_size * + sizeof(struct dma_extended_desc), + &rx_q->dma_rx_phy, + GFP_KERNEL); + if (!rx_q->dma_erx) + return -ENOMEM; + + } else { + rx_q->dma_rx = dma_alloc_coherent(priv->device, + priv->dma_rx_size * + sizeof(struct dma_desc), + &rx_q->dma_rx_phy, + GFP_KERNEL); + if (!rx_q->dma_rx) + return -ENOMEM; + } + + if (stmmac_xdp_is_enabled(priv) && + test_bit(queue, priv->af_xdp_zc_qps)) + napi_id = ch->rxtx_napi.napi_id; + else + napi_id = ch->rx_napi.napi_id; + + ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, + rx_q->queue_index, + napi_id); + if (ret) { + netdev_err(priv->dev, "Failed to register xdp rxq info\n"); + return -EINVAL; + } + + return 0; +} + static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) { u32 rx_count = priv->plat->rx_queues_to_use; - int ret = -ENOMEM; u32 queue; + int ret; /* RX queues buffers and DMA */ for (queue = 0; queue < rx_count; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; - struct page_pool_params pp_params = { 0 }; - unsigned int num_pages; - - rx_q->queue_index = queue; - rx_q->priv_data = priv; - - pp_params.flags = PP_FLAG_DMA_MAP; - pp_params.pool_size = priv->dma_rx_size; - num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE); - pp_params.order = ilog2(num_pages); - pp_params.nid = dev_to_node(priv->device); - pp_params.dev = priv->device; - pp_params.dma_dir = DMA_FROM_DEVICE; - - rx_q->page_pool = page_pool_create(&pp_params); - if (IS_ERR(rx_q->page_pool)) { - ret = PTR_ERR(rx_q->page_pool); - rx_q->page_pool = NULL; - goto err_dma; - } - - rx_q->buf_pool = kcalloc(priv->dma_rx_size, - sizeof(*rx_q->buf_pool), - GFP_KERNEL); - if (!rx_q->buf_pool) + ret = __alloc_dma_rx_desc_resources(priv, queue); + if (ret) goto err_dma; - - if (priv->extend_desc) { - rx_q->dma_erx = dma_alloc_coherent(priv->device, - priv->dma_rx_size * - sizeof(struct dma_extended_desc), - &rx_q->dma_rx_phy, - GFP_KERNEL); - if (!rx_q->dma_erx) - goto err_dma; - - } else { - rx_q->dma_rx = dma_alloc_coherent(priv->device, - priv->dma_rx_size * - sizeof(struct dma_desc), - &rx_q->dma_rx_phy, - GFP_KERNEL); - if (!rx_q->dma_rx) - goto err_dma; - } } return 0; @@ -1725,60 +2052,70 @@ err_dma: } /** - * alloc_dma_tx_desc_resources - alloc TX resources. + * __alloc_dma_tx_desc_resources - alloc TX resources (per queue). * @priv: private structure + * @queue: TX queue index * Description: according to which descriptor can be used (extend or basic) * this function allocates the resources for TX and RX paths. In case of * reception, for example, it pre-allocated the RX socket buffer in order to * allow zero-copy mechanism. */ -static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) +static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) { - u32 tx_count = priv->plat->tx_queues_to_use; - int ret = -ENOMEM; - u32 queue; + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + size_t size; + void *addr; - /* TX queues buffers and DMA */ - for (queue = 0; queue < tx_count; queue++) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; - size_t size; - void *addr; + tx_q->queue_index = queue; + tx_q->priv_data = priv; - tx_q->queue_index = queue; - tx_q->priv_data = priv; + tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size, + sizeof(*tx_q->tx_skbuff_dma), + GFP_KERNEL); + if (!tx_q->tx_skbuff_dma) + return -ENOMEM; - tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size, - sizeof(*tx_q->tx_skbuff_dma), - GFP_KERNEL); - if (!tx_q->tx_skbuff_dma) - goto err_dma; + tx_q->tx_skbuff = kcalloc(priv->dma_tx_size, + sizeof(struct sk_buff *), + GFP_KERNEL); + if (!tx_q->tx_skbuff) + return -ENOMEM; - tx_q->tx_skbuff = kcalloc(priv->dma_tx_size, - sizeof(struct sk_buff *), - GFP_KERNEL); - if (!tx_q->tx_skbuff) - goto err_dma; + if (priv->extend_desc) + size = sizeof(struct dma_extended_desc); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + size = sizeof(struct dma_edesc); + else + size = sizeof(struct dma_desc); - if (priv->extend_desc) - size = sizeof(struct dma_extended_desc); - else if (tx_q->tbs & STMMAC_TBS_AVAIL) - size = sizeof(struct dma_edesc); - else - size = sizeof(struct dma_desc); + size *= priv->dma_tx_size; - size *= priv->dma_tx_size; + addr = dma_alloc_coherent(priv->device, size, + &tx_q->dma_tx_phy, GFP_KERNEL); + if (!addr) + return -ENOMEM; - addr = dma_alloc_coherent(priv->device, size, - &tx_q->dma_tx_phy, GFP_KERNEL); - if (!addr) - goto err_dma; + if (priv->extend_desc) + tx_q->dma_etx = addr; + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + tx_q->dma_entx = addr; + else + tx_q->dma_tx = addr; - if (priv->extend_desc) - tx_q->dma_etx = addr; - else if (tx_q->tbs & STMMAC_TBS_AVAIL) - tx_q->dma_entx = addr; - else - tx_q->dma_tx = addr; + return 0; +} + +static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) +{ + u32 tx_count = priv->plat->tx_queues_to_use; + u32 queue; + int ret; + + /* TX queues buffers and DMA */ + for (queue = 0; queue < tx_count; queue++) { + ret = __alloc_dma_tx_desc_resources(priv, queue); + if (ret) + goto err_dma; } return 0; @@ -1815,11 +2152,13 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv) */ static void free_dma_desc_resources(struct stmmac_priv *priv) { - /* Release the DMA RX socket buffers */ - free_dma_rx_desc_resources(priv); - /* Release the DMA TX socket buffers */ free_dma_tx_desc_resources(priv); + + /* Release the DMA RX socket buffers later + * to ensure all pending XDP_TX buffers are returned. + */ + free_dma_rx_desc_resources(priv); } /** @@ -1976,12 +2315,24 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) /* configure all channels */ for (chan = 0; chan < rx_channels_count; chan++) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; + u32 buf_size; + qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, qmode); - stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz, - chan); + + if (rx_q->xsk_pool) { + buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); + stmmac_set_dma_bfsize(priv, priv->ioaddr, + buf_size, + chan); + } else { + stmmac_set_dma_bfsize(priv, priv->ioaddr, + priv->dma_buf_sz, + chan); + } } for (chan = 0; chan < tx_channels_count; chan++) { @@ -1992,6 +2343,101 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) } } +static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) +{ + struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct xsk_buff_pool *pool = tx_q->xsk_pool; + unsigned int entry = tx_q->cur_tx; + struct dma_desc *tx_desc = NULL; + struct xdp_desc xdp_desc; + bool work_done = true; + + /* Avoids TX time-out as we are sharing with slow path */ + nq->trans_start = jiffies; + + budget = min(budget, stmmac_tx_avail(priv, queue)); + + while (budget-- > 0) { + dma_addr_t dma_addr; + bool set_ic; + + /* We are sharing with slow path and stop XSK TX desc submission when + * available TX ring is less than threshold. + */ + if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) || + !netif_carrier_ok(priv->dev)) { + work_done = false; + break; + } + + if (!xsk_tx_peek_desc(pool, &xdp_desc)) + break; + + if (likely(priv->extend_desc)) + tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + tx_desc = &tx_q->dma_entx[entry].basic; + else + tx_desc = tx_q->dma_tx + entry; + + dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr); + xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len); + + tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX; + + /* To return XDP buffer to XSK pool, we simple call + * xsk_tx_completed(), so we don't need to fill up + * 'buf' and 'xdpf'. + */ + tx_q->tx_skbuff_dma[entry].buf = 0; + tx_q->xdpf[entry] = NULL; + + tx_q->tx_skbuff_dma[entry].map_as_page = false; + tx_q->tx_skbuff_dma[entry].len = xdp_desc.len; + tx_q->tx_skbuff_dma[entry].last_segment = true; + tx_q->tx_skbuff_dma[entry].is_jumbo = false; + + stmmac_set_desc_addr(priv, tx_desc, dma_addr); + + tx_q->tx_count_frames++; + + if (!priv->tx_coal_frames[queue]) + set_ic = false; + else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) + set_ic = true; + else + set_ic = false; + + if (set_ic) { + tx_q->tx_count_frames = 0; + stmmac_set_tx_ic(priv, tx_desc); + priv->xstats.tx_set_ic_bit++; + } + + stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len, + true, priv->mode, true, true, + xdp_desc.len); + + stmmac_enable_dma_transmission(priv, priv->ioaddr); + + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); + entry = tx_q->cur_tx; + } + + if (tx_desc) { + stmmac_flush_tx_descriptors(priv, queue); + xsk_tx_release(pool); + } + + /* Return true if all of the 3 conditions are met + * a) TX Budget is still available + * b) work_done = true when XSK TX desc peek is empty (no more + * pending XSK TX for transmission) + */ + return !!budget && work_done; +} + /** * stmmac_tx_clean - to manage the transmission completion * @priv: driver private structure @@ -2003,18 +2449,35 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; unsigned int bytes_compl = 0, pkts_compl = 0; - unsigned int entry, count = 0; + unsigned int entry, xmits = 0, count = 0; __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); priv->xstats.tx_clean++; + tx_q->xsk_frames_done = 0; + entry = tx_q->dirty_tx; - while ((entry != tx_q->cur_tx) && (count < budget)) { - struct sk_buff *skb = tx_q->tx_skbuff[entry]; + + /* Try to clean all TX complete frame in 1 shot */ + while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) { + struct xdp_frame *xdpf; + struct sk_buff *skb; struct dma_desc *p; int status; + if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX || + tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { + xdpf = tx_q->xdpf[entry]; + skb = NULL; + } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { + xdpf = NULL; + skb = tx_q->tx_skbuff[entry]; + } else { + xdpf = NULL; + skb = NULL; + } + if (priv->extend_desc) p = (struct dma_desc *)(tx_q->dma_etx + entry); else if (tx_q->tbs & STMMAC_TBS_AVAIL) @@ -2044,10 +2507,12 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) priv->dev->stats.tx_packets++; priv->xstats.tx_pkt_n++; } - stmmac_get_tx_hwtstamp(priv, p, skb); + if (skb) + stmmac_get_tx_hwtstamp(priv, p, skb); } - if (likely(tx_q->tx_skbuff_dma[entry].buf)) { + if (likely(tx_q->tx_skbuff_dma[entry].buf && + tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) { if (tx_q->tx_skbuff_dma[entry].map_as_page) dma_unmap_page(priv->device, tx_q->tx_skbuff_dma[entry].buf, @@ -2068,11 +2533,28 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) tx_q->tx_skbuff_dma[entry].last_segment = false; tx_q->tx_skbuff_dma[entry].is_jumbo = false; - if (likely(skb != NULL)) { - pkts_compl++; - bytes_compl += skb->len; - dev_consume_skb_any(skb); - tx_q->tx_skbuff[entry] = NULL; + if (xdpf && + tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) { + xdp_return_frame_rx_napi(xdpf); + tx_q->xdpf[entry] = NULL; + } + + if (xdpf && + tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { + xdp_return_frame(xdpf); + tx_q->xdpf[entry] = NULL; + } + + if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX) + tx_q->xsk_frames_done++; + + if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { + if (likely(skb)) { + pkts_compl++; + bytes_compl += skb->len; + dev_consume_skb_any(skb); + tx_q->tx_skbuff[entry] = NULL; + } } stmmac_release_tx_desc(priv, p, priv->mode); @@ -2093,6 +2575,28 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); } + if (tx_q->xsk_pool) { + bool work_done; + + if (tx_q->xsk_frames_done) + xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); + + if (xsk_uses_need_wakeup(tx_q->xsk_pool)) + xsk_set_tx_need_wakeup(tx_q->xsk_pool); + + /* For XSK TX, we try to send as many as possible. + * If XSK work done (XSK TX desc empty and budget still + * available), return "budget - 1" to reenable TX IRQ. + * Else, return "budget" to make NAPI continue polling. + */ + work_done = stmmac_xdp_xmit_zc(priv, queue, + STMMAC_XSK_TX_BUDGET_MAX); + if (work_done) + xmits = budget - 1; + else + xmits = budget; + } + if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) { stmmac_enable_eee_mode(priv); @@ -2101,12 +2605,14 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) /* We still have pending packets, let's call for a new scheduling */ if (tx_q->dirty_tx != tx_q->cur_tx) - hrtimer_start(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer), + hrtimer_start(&tx_q->txtimer, + STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), HRTIMER_MODE_REL); __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); - return count; + /* Combine decisions from TX clean and XSK TX */ + return max(count, xmits); } /** @@ -2184,28 +2690,35 @@ static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) return false; } -static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan) +static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir) { int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, - &priv->xstats, chan); + &priv->xstats, chan, dir); + struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; + struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; struct stmmac_channel *ch = &priv->channel[chan]; + struct napi_struct *rx_napi; + struct napi_struct *tx_napi; unsigned long flags; + rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi; + tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; + if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { - if (napi_schedule_prep(&ch->rx_napi)) { + if (napi_schedule_prep(rx_napi)) { spin_lock_irqsave(&ch->lock, flags); stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); spin_unlock_irqrestore(&ch->lock, flags); - __napi_schedule(&ch->rx_napi); + __napi_schedule(rx_napi); } } if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { - if (napi_schedule_prep(&ch->tx_napi)) { + if (napi_schedule_prep(tx_napi)) { spin_lock_irqsave(&ch->lock, flags); stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); spin_unlock_irqrestore(&ch->lock, flags); - __napi_schedule(&ch->tx_napi); + __napi_schedule(tx_napi); } } @@ -2233,7 +2746,8 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) channels_to_check = ARRAY_SIZE(status); for (chan = 0; chan < channels_to_check; chan++) - status[chan] = stmmac_napi_check(priv, chan); + status[chan] = stmmac_napi_check(priv, chan, + DMA_DIR_RXTX); for (chan = 0; chan < tx_channel_count; chan++) { if (unlikely(status[chan] & tx_hard_error_bump_tc)) { @@ -2361,7 +2875,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) rx_q->dma_rx_phy, chan); rx_q->rx_tail_addr = rx_q->dma_rx_phy + - (priv->dma_rx_size * + (rx_q->buf_alloc_num * sizeof(struct dma_desc)); stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, chan); @@ -2386,7 +2900,8 @@ static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; - hrtimer_start(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer), + hrtimer_start(&tx_q->txtimer, + STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), HRTIMER_MODE_REL); } @@ -2401,16 +2916,18 @@ static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer); struct stmmac_priv *priv = tx_q->priv_data; struct stmmac_channel *ch; + struct napi_struct *napi; ch = &priv->channel[tx_q->queue_index]; + napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; - if (likely(napi_schedule_prep(&ch->tx_napi))) { + if (likely(napi_schedule_prep(napi))) { unsigned long flags; spin_lock_irqsave(&ch->lock, flags); stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); spin_unlock_irqrestore(&ch->lock, flags); - __napi_schedule(&ch->tx_napi); + __napi_schedule(napi); } return HRTIMER_NORESTART; @@ -2427,18 +2944,21 @@ static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) static void stmmac_init_coalesce(struct stmmac_priv *priv) { u32 tx_channel_count = priv->plat->tx_queues_to_use; + u32 rx_channel_count = priv->plat->rx_queues_to_use; u32 chan; - priv->tx_coal_frames = STMMAC_TX_FRAMES; - priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; - priv->rx_coal_frames = STMMAC_RX_FRAMES; - for (chan = 0; chan < tx_channel_count; chan++) { struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; + priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; + priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; + hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); tx_q->txtimer.function = stmmac_tx_timer; } + + for (chan = 0; chan < rx_channel_count; chan++) + priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; } static void stmmac_set_rings_length(struct stmmac_priv *priv) @@ -2655,6 +3175,26 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) } } +static int stmmac_fpe_start_wq(struct stmmac_priv *priv) +{ + char *name; + + clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); + + name = priv->wq_name; + sprintf(name, "%s-fpe", priv->dev->name); + + priv->fpe_wq = create_singlethread_workqueue(name); + if (!priv->fpe_wq) { + netdev_err(priv->dev, "%s: Failed to create workqueue\n", name); + + return -ENOMEM; + } + netdev_info(priv->dev, "FPE workqueue start"); + + return 0; +} + /** * stmmac_hw_setup - setup mac in a usable state. * @dev : pointer to the device structure. @@ -2673,6 +3213,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) struct stmmac_priv *priv = netdev_priv(dev); u32 rx_cnt = priv->plat->rx_queues_to_use; u32 tx_cnt = priv->plat->tx_queues_to_use; + bool sph_en; u32 chan; int ret; @@ -2743,10 +3284,15 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) priv->tx_lpi_timer = eee_timer * 1000; if (priv->use_riwt) { - if (!priv->rx_riwt) - priv->rx_riwt = DEF_DMA_RIWT; + u32 queue; - ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt); + for (queue = 0; queue < rx_cnt; queue++) { + if (!priv->rx_riwt[queue]) + priv->rx_riwt[queue] = DEF_DMA_RIWT; + + stmmac_rx_watchdog(priv, priv->ioaddr, + priv->rx_riwt[queue], queue); + } } if (priv->hw->pcs) @@ -2757,15 +3303,22 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) /* Enable TSO */ if (priv->tso) { - for (chan = 0; chan < tx_cnt; chan++) + for (chan = 0; chan < tx_cnt; chan++) { + struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; + + /* TSO and TBS cannot co-exist */ + if (tx_q->tbs & STMMAC_TBS_AVAIL) + continue; + stmmac_enable_tso(priv, priv->ioaddr, 1, chan); + } } /* Enable Split Header */ - if (priv->sph && priv->hw->rx_csum) { - for (chan = 0; chan < rx_cnt; chan++) - stmmac_enable_sph(priv, priv->ioaddr, 1, chan); - } + sph_en = (priv->hw->rx_csum > 0) && priv->sph; + for (chan = 0; chan < rx_cnt; chan++) + stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); + /* VLAN Tag Insertion */ if (priv->dma_cap.vlins) @@ -2786,6 +3339,13 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) /* Start the ball rolling... */ stmmac_start_all_dma(priv); + if (priv->dma_cap.fpesel) { + stmmac_fpe_start_wq(priv); + + if (priv->plat->fpe_cfg->enable) + stmmac_fpe_handshake(priv, true); + } + return 0; } @@ -2796,6 +3356,271 @@ static void stmmac_hw_teardown(struct net_device *dev) clk_disable_unprepare(priv->plat->clk_ptp_ref); } +static void stmmac_free_irq(struct net_device *dev, + enum request_irq_err irq_err, int irq_idx) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int j; + + switch (irq_err) { + case REQ_IRQ_ERR_ALL: + irq_idx = priv->plat->tx_queues_to_use; + fallthrough; + case REQ_IRQ_ERR_TX: + for (j = irq_idx - 1; j >= 0; j--) { + if (priv->tx_irq[j] > 0) { + irq_set_affinity_hint(priv->tx_irq[j], NULL); + free_irq(priv->tx_irq[j], &priv->tx_queue[j]); + } + } + irq_idx = priv->plat->rx_queues_to_use; + fallthrough; + case REQ_IRQ_ERR_RX: + for (j = irq_idx - 1; j >= 0; j--) { + if (priv->rx_irq[j] > 0) { + irq_set_affinity_hint(priv->rx_irq[j], NULL); + free_irq(priv->rx_irq[j], &priv->rx_queue[j]); + } + } + + if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) + free_irq(priv->sfty_ue_irq, dev); + fallthrough; + case REQ_IRQ_ERR_SFTY_UE: + if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) + free_irq(priv->sfty_ce_irq, dev); + fallthrough; + case REQ_IRQ_ERR_SFTY_CE: + if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) + free_irq(priv->lpi_irq, dev); + fallthrough; + case REQ_IRQ_ERR_LPI: + if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) + free_irq(priv->wol_irq, dev); + fallthrough; + case REQ_IRQ_ERR_WOL: + free_irq(dev->irq, dev); + fallthrough; + case REQ_IRQ_ERR_MAC: + case REQ_IRQ_ERR_NO: + /* If MAC IRQ request error, no more IRQ to free */ + break; + } +} + +static int stmmac_request_irq_multi_msi(struct net_device *dev) +{ + enum request_irq_err irq_err = REQ_IRQ_ERR_NO; + struct stmmac_priv *priv = netdev_priv(dev); + cpumask_t cpu_mask; + int irq_idx = 0; + char *int_name; + int ret; + int i; + + /* For common interrupt */ + int_name = priv->int_name_mac; + sprintf(int_name, "%s:%s", dev->name, "mac"); + ret = request_irq(dev->irq, stmmac_mac_interrupt, + 0, int_name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc mac MSI %d (error: %d)\n", + __func__, dev->irq, ret); + irq_err = REQ_IRQ_ERR_MAC; + goto irq_error; + } + + /* Request the Wake IRQ in case of another line + * is used for WoL + */ + if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { + int_name = priv->int_name_wol; + sprintf(int_name, "%s:%s", dev->name, "wol"); + ret = request_irq(priv->wol_irq, + stmmac_mac_interrupt, + 0, int_name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc wol MSI %d (error: %d)\n", + __func__, priv->wol_irq, ret); + irq_err = REQ_IRQ_ERR_WOL; + goto irq_error; + } + } + + /* Request the LPI IRQ in case of another line + * is used for LPI + */ + if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { + int_name = priv->int_name_lpi; + sprintf(int_name, "%s:%s", dev->name, "lpi"); + ret = request_irq(priv->lpi_irq, + stmmac_mac_interrupt, + 0, int_name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc lpi MSI %d (error: %d)\n", + __func__, priv->lpi_irq, ret); + irq_err = REQ_IRQ_ERR_LPI; + goto irq_error; + } + } + + /* Request the Safety Feature Correctible Error line in + * case of another line is used + */ + if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) { + int_name = priv->int_name_sfty_ce; + sprintf(int_name, "%s:%s", dev->name, "safety-ce"); + ret = request_irq(priv->sfty_ce_irq, + stmmac_safety_interrupt, + 0, int_name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc sfty ce MSI %d (error: %d)\n", + __func__, priv->sfty_ce_irq, ret); + irq_err = REQ_IRQ_ERR_SFTY_CE; + goto irq_error; + } + } + + /* Request the Safety Feature Uncorrectible Error line in + * case of another line is used + */ + if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) { + int_name = priv->int_name_sfty_ue; + sprintf(int_name, "%s:%s", dev->name, "safety-ue"); + ret = request_irq(priv->sfty_ue_irq, + stmmac_safety_interrupt, + 0, int_name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc sfty ue MSI %d (error: %d)\n", + __func__, priv->sfty_ue_irq, ret); + irq_err = REQ_IRQ_ERR_SFTY_UE; + goto irq_error; + } + } + + /* Request Rx MSI irq */ + for (i = 0; i < priv->plat->rx_queues_to_use; i++) { + if (priv->rx_irq[i] == 0) + continue; + + int_name = priv->int_name_rx_irq[i]; + sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); + ret = request_irq(priv->rx_irq[i], + stmmac_msi_intr_rx, + 0, int_name, &priv->rx_queue[i]); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc rx-%d MSI %d (error: %d)\n", + __func__, i, priv->rx_irq[i], ret); + irq_err = REQ_IRQ_ERR_RX; + irq_idx = i; + goto irq_error; + } + cpumask_clear(&cpu_mask); + cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); + irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask); + } + + /* Request Tx MSI irq */ + for (i = 0; i < priv->plat->tx_queues_to_use; i++) { + if (priv->tx_irq[i] == 0) + continue; + + int_name = priv->int_name_tx_irq[i]; + sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); + ret = request_irq(priv->tx_irq[i], + stmmac_msi_intr_tx, + 0, int_name, &priv->tx_queue[i]); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc tx-%d MSI %d (error: %d)\n", + __func__, i, priv->tx_irq[i], ret); + irq_err = REQ_IRQ_ERR_TX; + irq_idx = i; + goto irq_error; + } + cpumask_clear(&cpu_mask); + cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); + irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask); + } + + return 0; + +irq_error: + stmmac_free_irq(dev, irq_err, irq_idx); + return ret; +} + +static int stmmac_request_irq_single(struct net_device *dev) +{ + enum request_irq_err irq_err = REQ_IRQ_ERR_NO; + struct stmmac_priv *priv = netdev_priv(dev); + int ret; + + ret = request_irq(dev->irq, stmmac_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: ERROR: allocating the IRQ %d (error: %d)\n", + __func__, dev->irq, ret); + irq_err = REQ_IRQ_ERR_MAC; + return ret; + } + + /* Request the Wake IRQ in case of another line + * is used for WoL + */ + if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { + ret = request_irq(priv->wol_irq, stmmac_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: ERROR: allocating the WoL IRQ %d (%d)\n", + __func__, priv->wol_irq, ret); + irq_err = REQ_IRQ_ERR_WOL; + return ret; + } + } + + /* Request the IRQ lines */ + if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { + ret = request_irq(priv->lpi_irq, stmmac_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: ERROR: allocating the LPI IRQ %d (%d)\n", + __func__, priv->lpi_irq, ret); + irq_err = REQ_IRQ_ERR_LPI; + goto irq_error; + } + } + + return 0; + +irq_error: + stmmac_free_irq(dev, irq_err, 0); + return ret; +} + +static int stmmac_request_irq(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret; + + /* Request the IRQ lines */ + if (priv->plat->multi_msi_en) + ret = stmmac_request_irq_multi_msi(dev); + else + ret = stmmac_request_irq_single(dev); + + return ret; +} + /** * stmmac_open - open entry point of the driver * @dev : pointer to the device structure. @@ -2805,22 +3630,28 @@ static void stmmac_hw_teardown(struct net_device *dev) * 0 on success and an appropriate (-)ve integer as defined in errno.h * file on failure. */ -static int stmmac_open(struct net_device *dev) +int stmmac_open(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); int bfsize = 0; u32 chan; int ret; + ret = pm_runtime_get_sync(priv->device); + if (ret < 0) { + pm_runtime_put_noidle(priv->device); + return ret; + } + if (priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI && - priv->hw->xpcs == NULL) { + priv->hw->xpcs_args.an_mode != DW_AN_C73) { ret = stmmac_init_phy(dev); if (ret) { netdev_err(priv->dev, "%s: Cannot attach to PHY (error: %d)\n", __func__, ret); - return ret; + goto init_phy_error; } } @@ -2850,9 +3681,8 @@ static int stmmac_open(struct net_device *dev) struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; + /* Setup per-TXQ tbs flag before TX descriptor alloc */ tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; - if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan)) - tx_q->tbs &= ~STMMAC_TBS_AVAIL; } ret = alloc_dma_desc_resources(priv); @@ -2881,50 +3711,15 @@ static int stmmac_open(struct net_device *dev) /* We may have called phylink_speed_down before */ phylink_speed_up(priv->phylink); - /* Request the IRQ lines */ - ret = request_irq(dev->irq, stmmac_interrupt, - IRQF_SHARED, dev->name, dev); - if (unlikely(ret < 0)) { - netdev_err(priv->dev, - "%s: ERROR: allocating the IRQ %d (error: %d)\n", - __func__, dev->irq, ret); + ret = stmmac_request_irq(dev); + if (ret) goto irq_error; - } - - /* Request the Wake IRQ in case of another line is used for WoL */ - if (priv->wol_irq != dev->irq) { - ret = request_irq(priv->wol_irq, stmmac_interrupt, - IRQF_SHARED, dev->name, dev); - if (unlikely(ret < 0)) { - netdev_err(priv->dev, - "%s: ERROR: allocating the WoL IRQ %d (%d)\n", - __func__, priv->wol_irq, ret); - goto wolirq_error; - } - } - - /* Request the IRQ lines */ - if (priv->lpi_irq > 0) { - ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED, - dev->name, dev); - if (unlikely(ret < 0)) { - netdev_err(priv->dev, - "%s: ERROR: allocating the LPI IRQ %d (%d)\n", - __func__, priv->lpi_irq, ret); - goto lpiirq_error; - } - } stmmac_enable_all_queues(priv); netif_tx_start_all_queues(priv->dev); return 0; -lpiirq_error: - if (priv->wol_irq != dev->irq) - free_irq(priv->wol_irq, dev); -wolirq_error: - free_irq(dev->irq, dev); irq_error: phylink_stop(priv->phylink); @@ -2936,16 +3731,28 @@ init_error: free_dma_desc_resources(priv); dma_desc_error: phylink_disconnect_phy(priv->phylink); +init_phy_error: + pm_runtime_put(priv->device); return ret; } +static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) +{ + set_bit(__FPE_REMOVING, &priv->fpe_task_state); + + if (priv->fpe_wq) + destroy_workqueue(priv->fpe_wq); + + netdev_info(priv->dev, "FPE workqueue stop"); +} + /** * stmmac_release - close entry point of the driver * @dev : device pointer. * Description: * This is the stop entry point of the driver. */ -static int stmmac_release(struct net_device *dev) +int stmmac_release(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); u32 chan; @@ -2962,11 +3769,7 @@ static int stmmac_release(struct net_device *dev) hrtimer_cancel(&priv->tx_queue[chan].txtimer); /* Free the IRQ lines */ - free_irq(dev->irq, dev); - if (priv->wol_irq != dev->irq) - free_irq(priv->wol_irq, dev); - if (priv->lpi_irq > 0) - free_irq(priv->lpi_irq, dev); + stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); if (priv->eee_enabled) { priv->tx_path_in_lpi_mode = false; @@ -2986,6 +3789,11 @@ static int stmmac_release(struct net_device *dev) stmmac_release_ptp(priv); + pm_runtime_put(priv->device); + + if (priv->dma_cap.fpesel) + stmmac_fpe_stop_wq(priv); + return 0; } @@ -3071,6 +3879,28 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, } } +static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue) +{ + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + int desc_size; + + if (likely(priv->extend_desc)) + desc_size = sizeof(struct dma_extended_desc); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc_size = sizeof(struct dma_edesc); + else + desc_size = sizeof(struct dma_desc); + + /* The own bit must be the latest setting done when prepare the + * descriptor and then barrier is needed to make sure that + * all is coherent before granting the DMA engine. + */ + wmb(); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); +} + /** * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) * @skb : the socket buffer @@ -3102,10 +3932,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) { struct dma_desc *desc, *first, *mss_desc = NULL; struct stmmac_priv *priv = netdev_priv(dev); - int desc_size, tmp_pay_len = 0, first_tx; int nfrags = skb_shinfo(skb)->nr_frags; u32 queue = skb_get_queue_mapping(skb); unsigned int first_entry, tx_packets; + int tmp_pay_len = 0, first_tx; struct stmmac_tx_queue *tx_q; bool has_vlan, set_ic; u8 proto_hdr_len, hdr; @@ -3187,6 +4017,8 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_skbuff_dma[first_entry].buf = des; tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); + tx_q->tx_skbuff_dma[first_entry].map_as_page = false; + tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; if (priv->dma_cap.addr64 <= 32) { first->des0 = cpu_to_le32(des); @@ -3222,12 +4054,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; + tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; } tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; /* Only the last descriptor gets to point to the skb. */ tx_q->tx_skbuff[tx_q->cur_tx] = skb; + tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; /* Manage tx mitigation */ tx_packets = (tx_q->cur_tx + 1) - first_tx; @@ -3235,11 +4069,12 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) set_ic = true; - else if (!priv->tx_coal_frames) + else if (!priv->tx_coal_frames[queue]) set_ic = false; - else if (tx_packets > priv->tx_coal_frames) + else if (tx_packets > priv->tx_coal_frames[queue]) set_ic = true; - else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets) + else if ((tx_q->tx_count_frames % + priv->tx_coal_frames[queue]) < tx_packets) set_ic = true; else set_ic = false; @@ -3302,12 +4137,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) stmmac_set_tx_owner(priv, mss_desc); } - /* The own bit must be the latest setting done when prepare the - * descriptor and then barrier is needed to make sure that - * all is coherent before granting the DMA engine. - */ - wmb(); - if (netif_msg_pktdata(priv)) { pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, @@ -3318,13 +4147,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); - if (tx_q->tbs & STMMAC_TBS_AVAIL) - desc_size = sizeof(struct dma_edesc); - else - desc_size = sizeof(struct dma_desc); - - tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); - stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); + stmmac_flush_tx_descriptors(priv, queue); stmmac_tx_timer_arm(priv, queue); return NETDEV_TX_OK; @@ -3354,10 +4177,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) int nfrags = skb_shinfo(skb)->nr_frags; int gso = skb_shinfo(skb)->gso_type; struct dma_edesc *tbs_desc = NULL; - int entry, desc_size, first_tx; struct dma_desc *desc, *first; struct stmmac_tx_queue *tx_q; bool has_vlan, set_ic; + int entry, first_tx; dma_addr_t des; tx_q = &priv->tx_queue[queue]; @@ -3445,6 +4268,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_skbuff_dma[entry].map_as_page = true; tx_q->tx_skbuff_dma[entry].len = len; tx_q->tx_skbuff_dma[entry].last_segment = last_segment; + tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; /* Prepare the descriptor and set the own bit too */ stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, @@ -3453,6 +4277,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) /* Only the last descriptor gets to point to the skb. */ tx_q->tx_skbuff[entry] = skb; + tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; /* According to the coalesce parameter the IC bit for the latest * segment is reset and the timer re-started to clean the tx status. @@ -3464,11 +4289,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) set_ic = true; - else if (!priv->tx_coal_frames) + else if (!priv->tx_coal_frames[queue]) set_ic = false; - else if (tx_packets > priv->tx_coal_frames) + else if (tx_packets > priv->tx_coal_frames[queue]) set_ic = true; - else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets) + else if ((tx_q->tx_count_frames % + priv->tx_coal_frames[queue]) < tx_packets) set_ic = true; else set_ic = false; @@ -3530,6 +4356,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) goto dma_map_err; tx_q->tx_skbuff_dma[first_entry].buf = des; + tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; + tx_q->tx_skbuff_dma[first_entry].map_as_page = false; stmmac_set_desc_addr(priv, first, des); @@ -3558,25 +4386,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) stmmac_set_tx_owner(priv, first); - /* The own bit must be the latest setting done when prepare the - * descriptor and then barrier is needed to make sure that - * all is coherent before granting the DMA engine. - */ - wmb(); - netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); stmmac_enable_dma_transmission(priv, priv->ioaddr); - if (likely(priv->extend_desc)) - desc_size = sizeof(struct dma_extended_desc); - else if (tx_q->tbs & STMMAC_TBS_AVAIL) - desc_size = sizeof(struct dma_edesc); - else - desc_size = sizeof(struct dma_desc); - - tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); - stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); + stmmac_flush_tx_descriptors(priv, queue); stmmac_tx_timer_arm(priv, queue); return NETDEV_TX_OK; @@ -3619,11 +4433,9 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) { struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; - int len, dirty = stmmac_rx_dirty(priv, queue); + int dirty = stmmac_rx_dirty(priv, queue); unsigned int entry = rx_q->dirty_rx; - len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; - while (dirty-- > 0) { struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; struct dma_desc *p; @@ -3646,18 +4458,9 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) break; buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); - - dma_sync_single_for_device(priv->device, buf->sec_addr, - len, DMA_FROM_DEVICE); } - buf->addr = page_pool_get_dma_addr(buf->page); - - /* Sync whole allocation to device. This will invalidate old - * data. - */ - dma_sync_single_for_device(priv->device, buf->addr, len, - DMA_FROM_DEVICE); + buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; stmmac_set_desc_addr(priv, p, buf->addr); if (priv->sph) @@ -3667,11 +4470,11 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) stmmac_refill_desc3(priv, rx_q, p); rx_q->rx_count_frames++; - rx_q->rx_count_frames += priv->rx_coal_frames; - if (rx_q->rx_count_frames > priv->rx_coal_frames) + rx_q->rx_count_frames += priv->rx_coal_frames[queue]; + if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) rx_q->rx_count_frames = 0; - use_rx_wd = !priv->rx_coal_frames; + use_rx_wd = !priv->rx_coal_frames[queue]; use_rx_wd |= rx_q->rx_count_frames > 0; if (!priv->use_riwt) use_rx_wd = false; @@ -3736,6 +4539,487 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, return plen - len; } +static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, + struct xdp_frame *xdpf, bool dma_map) +{ + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + unsigned int entry = tx_q->cur_tx; + struct dma_desc *tx_desc; + dma_addr_t dma_addr; + bool set_ic; + + if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv)) + return STMMAC_XDP_CONSUMED; + + if (likely(priv->extend_desc)) + tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + tx_desc = &tx_q->dma_entx[entry].basic; + else + tx_desc = tx_q->dma_tx + entry; + + if (dma_map) { + dma_addr = dma_map_single(priv->device, xdpf->data, + xdpf->len, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, dma_addr)) + return STMMAC_XDP_CONSUMED; + + tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO; + } else { + struct page *page = virt_to_page(xdpf->data); + + dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) + + xdpf->headroom; + dma_sync_single_for_device(priv->device, dma_addr, + xdpf->len, DMA_BIDIRECTIONAL); + + tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX; + } + + tx_q->tx_skbuff_dma[entry].buf = dma_addr; + tx_q->tx_skbuff_dma[entry].map_as_page = false; + tx_q->tx_skbuff_dma[entry].len = xdpf->len; + tx_q->tx_skbuff_dma[entry].last_segment = true; + tx_q->tx_skbuff_dma[entry].is_jumbo = false; + + tx_q->xdpf[entry] = xdpf; + + stmmac_set_desc_addr(priv, tx_desc, dma_addr); + + stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, + true, priv->mode, true, true, + xdpf->len); + + tx_q->tx_count_frames++; + + if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) + set_ic = true; + else + set_ic = false; + + if (set_ic) { + tx_q->tx_count_frames = 0; + stmmac_set_tx_ic(priv, tx_desc); + priv->xstats.tx_set_ic_bit++; + } + + stmmac_enable_dma_transmission(priv, priv->ioaddr); + + entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); + tx_q->cur_tx = entry; + + return STMMAC_XDP_TX; +} + +static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv, + int cpu) +{ + int index = cpu; + + if (unlikely(index < 0)) + index = 0; + + while (index >= priv->plat->tx_queues_to_use) + index -= priv->plat->tx_queues_to_use; + + return index; +} + +static int stmmac_xdp_xmit_back(struct stmmac_priv *priv, + struct xdp_buff *xdp) +{ + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + int queue; + int res; + + if (unlikely(!xdpf)) + return STMMAC_XDP_CONSUMED; + + queue = stmmac_xdp_get_tx_queue(priv, cpu); + nq = netdev_get_tx_queue(priv->dev, queue); + + __netif_tx_lock(nq, cpu); + /* Avoids TX time-out as we are sharing with slow path */ + nq->trans_start = jiffies; + + res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false); + if (res == STMMAC_XDP_TX) + stmmac_flush_tx_descriptors(priv, queue); + + __netif_tx_unlock(nq); + + return res; +} + +/* This function assumes rcu_read_lock() is held by the caller. */ +static int __stmmac_xdp_run_prog(struct stmmac_priv *priv, + struct bpf_prog *prog, + struct xdp_buff *xdp) +{ + u32 act; + int res; + + act = bpf_prog_run_xdp(prog, xdp); + switch (act) { + case XDP_PASS: + res = STMMAC_XDP_PASS; + break; + case XDP_TX: + res = stmmac_xdp_xmit_back(priv, xdp); + break; + case XDP_REDIRECT: + if (xdp_do_redirect(priv->dev, xdp, prog) < 0) + res = STMMAC_XDP_CONSUMED; + else + res = STMMAC_XDP_REDIRECT; + break; + default: + bpf_warn_invalid_xdp_action(act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(priv->dev, prog, act); + fallthrough; + case XDP_DROP: + res = STMMAC_XDP_CONSUMED; + break; + } + + return res; +} + +static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv, + struct xdp_buff *xdp) +{ + struct bpf_prog *prog; + int res; + + rcu_read_lock(); + + prog = READ_ONCE(priv->xdp_prog); + if (!prog) { + res = STMMAC_XDP_PASS; + goto unlock; + } + + res = __stmmac_xdp_run_prog(priv, prog, xdp); +unlock: + rcu_read_unlock(); + return ERR_PTR(-res); +} + +static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv, + int xdp_status) +{ + int cpu = smp_processor_id(); + int queue; + + queue = stmmac_xdp_get_tx_queue(priv, cpu); + + if (xdp_status & STMMAC_XDP_TX) + stmmac_tx_timer_arm(priv, queue); + + if (xdp_status & STMMAC_XDP_REDIRECT) + xdp_do_flush(); +} + +static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch, + struct xdp_buff *xdp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + unsigned int datasize = xdp->data_end - xdp->data; + struct sk_buff *skb; + + skb = __napi_alloc_skb(&ch->rxtx_napi, + xdp->data_end - xdp->data_hard_start, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, xdp->data - xdp->data_hard_start); + memcpy(__skb_put(skb, datasize), xdp->data, datasize); + if (metasize) + skb_metadata_set(skb, metasize); + + return skb; +} + +static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, + struct dma_desc *p, struct dma_desc *np, + struct xdp_buff *xdp) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned int len = xdp->data_end - xdp->data; + enum pkt_hash_types hash_type; + int coe = priv->hw->rx_csum; + struct sk_buff *skb; + u32 hash; + + skb = stmmac_construct_skb_zc(ch, xdp); + if (!skb) { + priv->dev->stats.rx_dropped++; + return; + } + + stmmac_get_rx_hwtstamp(priv, p, np, skb); + stmmac_rx_vlan(priv->dev, skb); + skb->protocol = eth_type_trans(skb, priv->dev); + + if (unlikely(!coe)) + skb_checksum_none_assert(skb); + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) + skb_set_hash(skb, hash, hash_type); + + skb_record_rx_queue(skb, queue); + napi_gro_receive(&ch->rxtx_napi, skb); + + priv->dev->stats.rx_packets++; + priv->dev->stats.rx_bytes += len; +} + +static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) +{ + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + unsigned int entry = rx_q->dirty_rx; + struct dma_desc *rx_desc = NULL; + bool ret = true; + + budget = min(budget, stmmac_rx_dirty(priv, queue)); + + while (budget-- > 0 && entry != rx_q->cur_rx) { + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; + dma_addr_t dma_addr; + bool use_rx_wd; + + if (!buf->xdp) { + buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); + if (!buf->xdp) { + ret = false; + break; + } + } + + if (priv->extend_desc) + rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry); + else + rx_desc = rx_q->dma_rx + entry; + + dma_addr = xsk_buff_xdp_get_dma(buf->xdp); + stmmac_set_desc_addr(priv, rx_desc, dma_addr); + stmmac_set_desc_sec_addr(priv, rx_desc, 0, false); + stmmac_refill_desc3(priv, rx_q, rx_desc); + + rx_q->rx_count_frames++; + rx_q->rx_count_frames += priv->rx_coal_frames[queue]; + if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) + rx_q->rx_count_frames = 0; + + use_rx_wd = !priv->rx_coal_frames[queue]; + use_rx_wd |= rx_q->rx_count_frames > 0; + if (!priv->use_riwt) + use_rx_wd = false; + + dma_wmb(); + stmmac_set_rx_owner(priv, rx_desc, use_rx_wd); + + entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); + } + + if (rx_desc) { + rx_q->dirty_rx = entry; + rx_q->rx_tail_addr = rx_q->dma_rx_phy + + (rx_q->dirty_rx * sizeof(struct dma_desc)); + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); + } + + return ret; +} + +static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + unsigned int count = 0, error = 0, len = 0; + int dirty = stmmac_rx_dirty(priv, queue); + unsigned int next_entry = rx_q->cur_rx; + unsigned int desc_size; + struct bpf_prog *prog; + bool failure = false; + int xdp_status = 0; + int status = 0; + + if (netif_msg_rx_status(priv)) { + void *rx_head; + + netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); + if (priv->extend_desc) { + rx_head = (void *)rx_q->dma_erx; + desc_size = sizeof(struct dma_extended_desc); + } else { + rx_head = (void *)rx_q->dma_rx; + desc_size = sizeof(struct dma_desc); + } + + stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true, + rx_q->dma_rx_phy, desc_size); + } + while (count < limit) { + struct stmmac_rx_buffer *buf; + unsigned int buf1_len = 0; + struct dma_desc *np, *p; + int entry; + int res; + + if (!count && rx_q->state_saved) { + error = rx_q->state.error; + len = rx_q->state.len; + } else { + rx_q->state_saved = false; + error = 0; + len = 0; + } + + if (count >= limit) + break; + +read_again: + buf1_len = 0; + entry = next_entry; + buf = &rx_q->buf_pool[entry]; + + if (dirty >= STMMAC_RX_FILL_BATCH) { + failure = failure || + !stmmac_rx_refill_zc(priv, queue, dirty); + dirty = 0; + } + + if (priv->extend_desc) + p = (struct dma_desc *)(rx_q->dma_erx + entry); + else + p = rx_q->dma_rx + entry; + + /* read the status of the incoming frame */ + status = stmmac_rx_status(priv, &priv->dev->stats, + &priv->xstats, p); + /* check if managed by the DMA otherwise go ahead */ + if (unlikely(status & dma_own)) + break; + + /* Prefetch the next RX descriptor */ + rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, + priv->dma_rx_size); + next_entry = rx_q->cur_rx; + + if (priv->extend_desc) + np = (struct dma_desc *)(rx_q->dma_erx + next_entry); + else + np = rx_q->dma_rx + next_entry; + + prefetch(np); + + if (priv->extend_desc) + stmmac_rx_extended_status(priv, &priv->dev->stats, + &priv->xstats, + rx_q->dma_erx + entry); + if (unlikely(status == discard_frame)) { + xsk_buff_free(buf->xdp); + buf->xdp = NULL; + dirty++; + error = 1; + if (!priv->hwts_rx_en) + priv->dev->stats.rx_errors++; + } + + if (unlikely(error && (status & rx_not_ls))) + goto read_again; + if (unlikely(error)) { + count++; + continue; + } + + /* Ensure a valid XSK buffer before proceed */ + if (!buf->xdp) + break; + + /* XSK pool expects RX frame 1:1 mapped to XSK buffer */ + if (likely(status & rx_not_ls)) { + xsk_buff_free(buf->xdp); + buf->xdp = NULL; + dirty++; + count++; + goto read_again; + } + + /* XDP ZC Frame only support primary buffers for now */ + buf1_len = stmmac_rx_buf1_len(priv, p, status, len); + len += buf1_len; + + /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 + * Type frames (LLC/LLC-SNAP) + * + * llc_snap is never checked in GMAC >= 4, so this ACS + * feature is always disabled and packets need to be + * stripped manually. + */ + if (likely(!(status & rx_not_ls)) && + (likely(priv->synopsys_id >= DWMAC_CORE_4_00) || + unlikely(status != llc_snap))) { + buf1_len -= ETH_FCS_LEN; + len -= ETH_FCS_LEN; + } + + /* RX buffer is good and fit into a XSK pool buffer */ + buf->xdp->data_end = buf->xdp->data + buf1_len; + xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool); + + rcu_read_lock(); + prog = READ_ONCE(priv->xdp_prog); + res = __stmmac_xdp_run_prog(priv, prog, buf->xdp); + rcu_read_unlock(); + + switch (res) { + case STMMAC_XDP_PASS: + stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp); + xsk_buff_free(buf->xdp); + break; + case STMMAC_XDP_CONSUMED: + xsk_buff_free(buf->xdp); + priv->dev->stats.rx_dropped++; + break; + case STMMAC_XDP_TX: + case STMMAC_XDP_REDIRECT: + xdp_status |= res; + break; + } + + buf->xdp = NULL; + dirty++; + count++; + } + + if (status & rx_not_ls) { + rx_q->state_saved = true; + rx_q->state.error = error; + rx_q->state.len = len; + } + + stmmac_finalize_xdp_rx(priv, xdp_status); + + if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { + if (failure || stmmac_rx_dirty(priv, queue) > 0) + xsk_set_rx_need_wakeup(rx_q->xsk_pool); + else + xsk_clear_rx_need_wakeup(rx_q->xsk_pool); + + return (int)count; + } + + return failure ? limit : (int)count; +} + /** * stmmac_rx - manage the receive process * @priv: driver private structure @@ -3751,8 +5035,15 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) unsigned int count = 0, error = 0, len = 0; int status = 0, coe = priv->hw->rx_csum; unsigned int next_entry = rx_q->cur_rx; + enum dma_data_direction dma_dir; unsigned int desc_size; struct sk_buff *skb = NULL; + struct xdp_buff xdp; + int xdp_status = 0; + int buf_sz; + + dma_dir = page_pool_get_dma_dir(rx_q->page_pool); + buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; if (netif_msg_rx_status(priv)) { void *rx_head; @@ -3870,6 +5161,64 @@ read_again: } if (!skb) { + unsigned int pre_len, sync_len; + + dma_sync_single_for_cpu(priv->device, buf->addr, + buf1_len, dma_dir); + + xdp.data = page_address(buf->page) + buf->page_offset; + xdp.data_end = xdp.data + buf1_len; + xdp.data_hard_start = page_address(buf->page); + xdp_set_data_meta_invalid(&xdp); + xdp.frame_sz = buf_sz; + xdp.rxq = &rx_q->xdp_rxq; + + pre_len = xdp.data_end - xdp.data_hard_start - + buf->page_offset; + skb = stmmac_xdp_run_prog(priv, &xdp); + /* Due xdp_adjust_tail: DMA sync for_device + * cover max len CPU touch + */ + sync_len = xdp.data_end - xdp.data_hard_start - + buf->page_offset; + sync_len = max(sync_len, pre_len); + + /* For Not XDP_PASS verdict */ + if (IS_ERR(skb)) { + unsigned int xdp_res = -PTR_ERR(skb); + + if (xdp_res & STMMAC_XDP_CONSUMED) { + page_pool_put_page(rx_q->page_pool, + virt_to_head_page(xdp.data), + sync_len, true); + buf->page = NULL; + priv->dev->stats.rx_dropped++; + + /* Clear skb as it was set as + * status by XDP program. + */ + skb = NULL; + + if (unlikely((status & rx_not_ls))) + goto read_again; + + count++; + continue; + } else if (xdp_res & (STMMAC_XDP_TX | + STMMAC_XDP_REDIRECT)) { + xdp_status |= xdp_res; + buf->page = NULL; + skb = NULL; + count++; + continue; + } + } + } + + if (!skb) { + /* XDP program may expand or reduce tail */ + buf1_len = xdp.data_end - xdp.data; + skb = napi_alloc_skb(&ch->rx_napi, buf1_len); if (!skb) { priv->dev->stats.rx_dropped++; @@ -3877,10 +5226,8 @@ read_again: goto drain_data; } - dma_sync_single_for_cpu(priv->device, buf->addr, - buf1_len, DMA_FROM_DEVICE); - skb_copy_to_linear_data(skb, page_address(buf->page), - buf1_len); + /* XDP program may adjust header */ + skb_copy_to_linear_data(skb, xdp.data, buf1_len); skb_put(skb, buf1_len); /* Data payload copied into SKB, page ready for recycle */ @@ -3888,9 +5235,9 @@ read_again: buf->page = NULL; } else if (buf1_len) { dma_sync_single_for_cpu(priv->device, buf->addr, - buf1_len, DMA_FROM_DEVICE); + buf1_len, dma_dir); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - buf->page, 0, buf1_len, + buf->page, buf->page_offset, buf1_len, priv->dma_buf_sz); /* Data payload appended into SKB */ @@ -3900,7 +5247,7 @@ read_again: if (buf2_len) { dma_sync_single_for_cpu(priv->device, buf->sec_addr, - buf2_len, DMA_FROM_DEVICE); + buf2_len, dma_dir); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, buf->sec_page, 0, buf2_len, priv->dma_buf_sz); @@ -3946,6 +5293,8 @@ drain_data: rx_q->state.len = len; } + stmmac_finalize_xdp_rx(priv, xdp_status); + stmmac_rx_refill(priv, queue); priv->xstats.rx_pkt_n += count; @@ -3985,7 +5334,7 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) priv->xstats.napi_poll++; - work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan); + work_done = stmmac_tx_clean(priv, budget, chan); work_done = min(work_done, budget); if (work_done < budget && napi_complete_done(napi, work_done)) { @@ -3999,6 +5348,42 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) return work_done; } +static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) +{ + struct stmmac_channel *ch = + container_of(napi, struct stmmac_channel, rxtx_napi); + struct stmmac_priv *priv = ch->priv_data; + int rx_done, tx_done; + u32 chan = ch->index; + + priv->xstats.napi_poll++; + + tx_done = stmmac_tx_clean(priv, budget, chan); + tx_done = min(tx_done, budget); + + rx_done = stmmac_rx_zc(priv, budget, chan); + + /* If either TX or RX work is not complete, return budget + * and keep pooling + */ + if (tx_done >= budget || rx_done >= budget) + return budget; + + /* all work done, exit the polling mode */ + if (napi_complete_done(napi, rx_done)) { + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + /* Both RX and TX work done are compelte, + * so enable both RX & TX IRQs. + */ + stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); + spin_unlock_irqrestore(&ch->lock, flags); + } + + return min(rx_done, budget - 1); +} + /** * stmmac_tx_timeout * @dev : Pointer to net device structure @@ -4058,6 +5443,11 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu) return -EBUSY; } + if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) { + netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n"); + return -EINVAL; + } + new_mtu = STMMAC_ALIGN(new_mtu); /* If condition true, FIFO is too small or MTU too large */ @@ -4119,27 +5509,57 @@ static int stmmac_set_features(struct net_device *netdev, stmmac_rx_ipc(priv, priv->hw); sph_en = (priv->hw->rx_csum > 0) && priv->sph; + for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); return 0; } -/** - * stmmac_interrupt - main ISR - * @irq: interrupt number. - * @dev_id: to pass the net device pointer (must be valid). - * Description: this is the main driver interrupt service routine. - * It can call: - * o DMA service routine (to manage incoming frame reception and transmission - * status) - * o Core interrupts to manage: remote wake-up, management counter, LPI - * interrupts. - */ -static irqreturn_t stmmac_interrupt(int irq, void *dev_id) +static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) +{ + struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; + enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; + enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; + bool *hs_enable = &fpe_cfg->hs_enable; + + if (status == FPE_EVENT_UNKNOWN || !*hs_enable) + return; + + /* If LP has sent verify mPacket, LP is FPE capable */ + if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) { + if (*lp_state < FPE_STATE_CAPABLE) + *lp_state = FPE_STATE_CAPABLE; + + /* If user has requested FPE enable, quickly response */ + if (*hs_enable) + stmmac_fpe_send_mpacket(priv, priv->ioaddr, + MPACKET_RESPONSE); + } + + /* If Local has sent verify mPacket, Local is FPE capable */ + if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) { + if (*lo_state < FPE_STATE_CAPABLE) + *lo_state = FPE_STATE_CAPABLE; + } + + /* If LP has sent response mPacket, LP is entering FPE ON */ + if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP) + *lp_state = FPE_STATE_ENTERING_ON; + + /* If Local has sent response mPacket, Local is entering FPE ON */ + if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP) + *lo_state = FPE_STATE_ENTERING_ON; + + if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) && + !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) && + priv->fpe_wq) { + queue_work(priv->fpe_wq, &priv->fpe_task); + } +} + +static void stmmac_common_interrupt(struct stmmac_priv *priv) { - struct net_device *dev = (struct net_device *)dev_id; - struct stmmac_priv *priv = netdev_priv(dev); u32 rx_cnt = priv->plat->rx_queues_to_use; u32 tx_cnt = priv->plat->tx_queues_to_use; u32 queues_count; @@ -4152,12 +5572,16 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) if (priv->irq_wake) pm_wakeup_event(priv->device, 0); - /* Check if adapter is up */ - if (test_bit(STMMAC_DOWN, &priv->state)) - return IRQ_HANDLED; - /* Check if a fatal error happened */ - if (stmmac_safety_feat_interrupt(priv)) - return IRQ_HANDLED; + if (priv->dma_cap.estsel) + stmmac_est_irq_status(priv, priv->ioaddr, priv->dev, + &priv->xstats, tx_cnt); + + if (priv->dma_cap.fpesel) { + int status = stmmac_fpe_irq_status(priv, priv->ioaddr, + priv->dev); + + stmmac_fpe_event_status(priv, status); + } /* To handle GMAC own interrupts */ if ((priv->plat->has_gmac) || xmac) { @@ -4189,11 +5613,41 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) /* PCS link status */ if (priv->hw->pcs) { if (priv->xstats.pcs_link) - netif_carrier_on(dev); + netif_carrier_on(priv->dev); else - netif_carrier_off(dev); + netif_carrier_off(priv->dev); } + + stmmac_timestamp_interrupt(priv, priv); } +} + +/** + * stmmac_interrupt - main ISR + * @irq: interrupt number. + * @dev_id: to pass the net device pointer. + * Description: this is the main driver interrupt service routine. + * It can call: + * o DMA service routine (to manage incoming frame reception and transmission + * status) + * o Core interrupts to manage: remote wake-up, management counter, LPI + * interrupts. + */ +static irqreturn_t stmmac_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct stmmac_priv *priv = netdev_priv(dev); + + /* Check if adapter is up */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return IRQ_HANDLED; + + /* Check if a fatal error happened */ + if (stmmac_safety_feat_interrupt(priv)) + return IRQ_HANDLED; + + /* To handle Common interrupts */ + stmmac_common_interrupt(priv); /* To handle DMA interrupts */ stmmac_dma_interrupt(priv); @@ -4201,15 +5655,136 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } +static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct stmmac_priv *priv = netdev_priv(dev); + + if (unlikely(!dev)) { + netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); + return IRQ_NONE; + } + + /* Check if adapter is up */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return IRQ_HANDLED; + + /* To handle Common interrupts */ + stmmac_common_interrupt(priv); + + return IRQ_HANDLED; +} + +static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct stmmac_priv *priv = netdev_priv(dev); + + if (unlikely(!dev)) { + netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); + return IRQ_NONE; + } + + /* Check if adapter is up */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return IRQ_HANDLED; + + /* Check if a fatal error happened */ + stmmac_safety_feat_interrupt(priv); + + return IRQ_HANDLED; +} + +static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) +{ + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data; + int chan = tx_q->queue_index; + struct stmmac_priv *priv; + int status; + + priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]); + + if (unlikely(!data)) { + netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); + return IRQ_NONE; + } + + /* Check if adapter is up */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return IRQ_HANDLED; + + status = stmmac_napi_check(priv, chan, DMA_DIR_TX); + + if (unlikely(status & tx_hard_error_bump_tc)) { + /* Try to bump up the dma threshold on this failure */ + if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && + tc <= 256) { + tc += 64; + if (priv->plat->force_thresh_dma_mode) + stmmac_set_dma_operation_mode(priv, + tc, + tc, + chan); + else + stmmac_set_dma_operation_mode(priv, + tc, + SF_DMA_MODE, + chan); + priv->xstats.threshold = tc; + } + } else if (unlikely(status == tx_hard_error)) { + stmmac_tx_err(priv, chan); + } + + return IRQ_HANDLED; +} + +static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) +{ + struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data; + int chan = rx_q->queue_index; + struct stmmac_priv *priv; + + priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]); + + if (unlikely(!data)) { + netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); + return IRQ_NONE; + } + + /* Check if adapter is up */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return IRQ_HANDLED; + + stmmac_napi_check(priv, chan, DMA_DIR_RX); + + return IRQ_HANDLED; +} + #ifdef CONFIG_NET_POLL_CONTROLLER /* Polling receive - used by NETCONSOLE and other diagnostic tools * to allow network I/O with interrupts disabled. */ static void stmmac_poll_controller(struct net_device *dev) { - disable_irq(dev->irq); - stmmac_interrupt(dev->irq, dev); - enable_irq(dev->irq); + struct stmmac_priv *priv = netdev_priv(dev); + int i; + + /* If adapter is down, do nothing */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return; + + if (priv->plat->multi_msi_en) { + for (i = 0; i < priv->plat->rx_queues_to_use; i++) + stmmac_msi_intr_rx(0, &priv->rx_queue[i]); + + for (i = 0; i < priv->plat->tx_queues_to_use; i++) + stmmac_msi_intr_tx(0, &priv->tx_queue[i]); + } else { + disable_irq(dev->irq); + stmmac_interrupt(dev->irq, dev); + enable_irq(dev->irq); + } } #endif @@ -4258,7 +5833,7 @@ static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) return ret; - stmmac_disable_all_queues(priv); + __stmmac_disable_all_queues(priv); switch (type) { case TC_SETUP_CLSU32: @@ -4622,6 +6197,12 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid bool is_double = false; int ret; + ret = pm_runtime_get_sync(priv->device); + if (ret < 0) { + pm_runtime_put_noidle(priv->device); + return ret; + } + if (be16_to_cpu(proto) == ETH_P_8021AD) is_double = true; @@ -4655,10 +6236,222 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi if (priv->hw->num_vlan) { ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); if (ret) - return ret; + goto del_vlan_error; + } + + ret = stmmac_vlan_update(priv, is_double); + +del_vlan_error: + pm_runtime_put(priv->device); + + return ret; +} + +static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + switch (bpf->command) { + case XDP_SETUP_PROG: + return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack); + case XDP_SETUP_XSK_POOL: + return stmmac_xdp_setup_pool(priv, bpf->xsk.pool, + bpf->xsk.queue_id); + default: + return -EOPNOTSUPP; + } +} + +static int stmmac_xdp_xmit(struct net_device *dev, int num_frames, + struct xdp_frame **frames, u32 flags) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + int i, nxmit = 0; + int queue; + + if (unlikely(test_bit(STMMAC_DOWN, &priv->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + queue = stmmac_xdp_get_tx_queue(priv, cpu); + nq = netdev_get_tx_queue(priv->dev, queue); + + __netif_tx_lock(nq, cpu); + /* Avoids TX time-out as we are sharing with slow path */ + nq->trans_start = jiffies; + + for (i = 0; i < num_frames; i++) { + int res; + + res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true); + if (res == STMMAC_XDP_CONSUMED) + break; + + nxmit++; } - return stmmac_vlan_update(priv, is_double); + if (flags & XDP_XMIT_FLUSH) { + stmmac_flush_tx_descriptors(priv, queue); + stmmac_tx_timer_arm(priv, queue); + } + + __netif_tx_unlock(nq); + + return nxmit; +} + +void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0); + spin_unlock_irqrestore(&ch->lock, flags); + + stmmac_stop_rx_dma(priv, queue); + __free_dma_rx_desc_resources(priv, queue); +} + +void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned long flags; + u32 buf_size; + int ret; + + ret = __alloc_dma_rx_desc_resources(priv, queue); + if (ret) { + netdev_err(priv->dev, "Failed to alloc RX desc.\n"); + return; + } + + ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL); + if (ret) { + __free_dma_rx_desc_resources(priv, queue); + netdev_err(priv->dev, "Failed to init RX desc.\n"); + return; + } + + stmmac_clear_rx_descriptors(priv, queue); + + stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + rx_q->dma_rx_phy, rx_q->queue_index); + + rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num * + sizeof(struct dma_desc)); + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, + rx_q->rx_tail_addr, rx_q->queue_index); + + if (rx_q->xsk_pool && rx_q->buf_alloc_num) { + buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); + stmmac_set_dma_bfsize(priv, priv->ioaddr, + buf_size, + rx_q->queue_index); + } else { + stmmac_set_dma_bfsize(priv, priv->ioaddr, + priv->dma_buf_sz, + rx_q->queue_index); + } + + stmmac_start_rx_dma(priv, queue); + + spin_lock_irqsave(&ch->lock, flags); + stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0); + spin_unlock_irqrestore(&ch->lock, flags); +} + +void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1); + spin_unlock_irqrestore(&ch->lock, flags); + + stmmac_stop_tx_dma(priv, queue); + __free_dma_tx_desc_resources(priv, queue); +} + +void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned long flags; + int ret; + + ret = __alloc_dma_tx_desc_resources(priv, queue); + if (ret) { + netdev_err(priv->dev, "Failed to alloc TX desc.\n"); + return; + } + + ret = __init_dma_tx_desc_rings(priv, queue); + if (ret) { + __free_dma_tx_desc_resources(priv, queue); + netdev_err(priv->dev, "Failed to init TX desc.\n"); + return; + } + + stmmac_clear_tx_descriptors(priv, queue); + + stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + tx_q->dma_tx_phy, tx_q->queue_index); + + if (tx_q->tbs & STMMAC_TBS_AVAIL) + stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy; + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, + tx_q->tx_tail_addr, tx_q->queue_index); + + stmmac_start_tx_dma(priv, queue); + + spin_lock_irqsave(&ch->lock, flags); + stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1); + spin_unlock_irqrestore(&ch->lock, flags); +} + +int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct stmmac_rx_queue *rx_q; + struct stmmac_tx_queue *tx_q; + struct stmmac_channel *ch; + + if (test_bit(STMMAC_DOWN, &priv->state) || + !netif_carrier_ok(priv->dev)) + return -ENETDOWN; + + if (!stmmac_xdp_is_enabled(priv)) + return -ENXIO; + + if (queue >= priv->plat->rx_queues_to_use || + queue >= priv->plat->tx_queues_to_use) + return -EINVAL; + + rx_q = &priv->rx_queue[queue]; + tx_q = &priv->tx_queue[queue]; + ch = &priv->channel[queue]; + + if (!rx_q->xsk_pool && !tx_q->xsk_pool) + return -ENXIO; + + if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) { + /* EQoS does not have per-DMA channel SW interrupt, + * so we schedule RX Napi straight-away. + */ + if (likely(napi_schedule_prep(&ch->rxtx_napi))) + __napi_schedule(&ch->rxtx_napi); + } + + return 0; } static const struct net_device_ops stmmac_netdev_ops = { @@ -4679,6 +6472,9 @@ static const struct net_device_ops stmmac_netdev_ops = { .ndo_set_mac_address = stmmac_set_mac_address, .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, + .ndo_bpf = stmmac_bpf, + .ndo_xdp_xmit = stmmac_xdp_xmit, + .ndo_xsk_wakeup = stmmac_xsk_wakeup, }; static void stmmac_reset_subtask(struct stmmac_priv *priv) @@ -4837,6 +6633,12 @@ static void stmmac_napi_add(struct net_device *dev) stmmac_napi_poll_tx, NAPI_POLL_WEIGHT); } + if (queue < priv->plat->rx_queues_to_use && + queue < priv->plat->tx_queues_to_use) { + netif_napi_add(dev, &ch->rxtx_napi, + stmmac_napi_poll_rxtx, + NAPI_POLL_WEIGHT); + } } } @@ -4854,6 +6656,10 @@ static void stmmac_napi_del(struct net_device *dev) netif_napi_del(&ch->rx_napi); if (queue < priv->plat->tx_queues_to_use) netif_napi_del(&ch->tx_napi); + if (queue < priv->plat->rx_queues_to_use && + queue < priv->plat->tx_queues_to_use) { + netif_napi_del(&ch->rxtx_napi); + } } } @@ -4895,6 +6701,68 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) return ret; } +#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n" +static void stmmac_fpe_lp_task(struct work_struct *work) +{ + struct stmmac_priv *priv = container_of(work, struct stmmac_priv, + fpe_task); + struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; + enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; + enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; + bool *hs_enable = &fpe_cfg->hs_enable; + bool *enable = &fpe_cfg->enable; + int retries = 20; + + while (retries-- > 0) { + /* Bail out immediately if FPE handshake is OFF */ + if (*lo_state == FPE_STATE_OFF || !*hs_enable) + break; + + if (*lo_state == FPE_STATE_ENTERING_ON && + *lp_state == FPE_STATE_ENTERING_ON) { + stmmac_fpe_configure(priv, priv->ioaddr, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, + *enable); + + netdev_info(priv->dev, "configured FPE\n"); + + *lo_state = FPE_STATE_ON; + *lp_state = FPE_STATE_ON; + netdev_info(priv->dev, "!!! BOTH FPE stations ON\n"); + break; + } + + if ((*lo_state == FPE_STATE_CAPABLE || + *lo_state == FPE_STATE_ENTERING_ON) && + *lp_state != FPE_STATE_ON) { + netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT, + *lo_state, *lp_state); + stmmac_fpe_send_mpacket(priv, priv->ioaddr, + MPACKET_VERIFY); + } + /* Sleep then retry */ + msleep(500); + } + + clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); +} + +void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable) +{ + if (priv->plat->fpe_cfg->hs_enable != enable) { + if (enable) { + stmmac_fpe_send_mpacket(priv, priv->ioaddr, + MPACKET_VERIFY); + } else { + priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF; + priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF; + } + + priv->plat->fpe_cfg->hs_enable = enable; + } +} + /** * stmmac_dvr_probe * @device: device pointer @@ -4930,12 +6798,19 @@ int stmmac_dvr_probe(struct device *device, priv->plat = plat_dat; priv->ioaddr = res->addr; priv->dev->base_addr = (unsigned long)res->addr; + priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en; priv->dev->irq = res->irq; priv->wol_irq = res->wol_irq; priv->lpi_irq = res->lpi_irq; - - if (!IS_ERR_OR_NULL(res->mac)) + priv->sfty_ce_irq = res->sfty_ce_irq; + priv->sfty_ue_irq = res->sfty_ue_irq; + for (i = 0; i < MTL_MAX_RX_QUEUES; i++) + priv->rx_irq[i] = res->rx_irq[i]; + for (i = 0; i < MTL_MAX_TX_QUEUES; i++) + priv->tx_irq[i] = res->tx_irq[i]; + + if (!is_zero_ether_addr(res->mac)) memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); dev_set_drvdata(device, priv->dev); @@ -4943,6 +6818,10 @@ int stmmac_dvr_probe(struct device *device, /* Verify driver arguments */ stmmac_verify_args(); + priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL); + if (!priv->af_xdp_zc_qps) + return -ENOMEM; + /* Allocate workqueue */ priv->wq = create_singlethread_workqueue("stmmac_wq"); if (!priv->wq) { @@ -4952,6 +6831,9 @@ int stmmac_dvr_probe(struct device *device, INIT_WORK(&priv->service_task, stmmac_service_task); + /* Initialize Link Partner FPE workqueue */ + INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task); + /* Override with kernel parameters if supplied XXX CRS XXX * this needs to have multiple instances */ @@ -4973,6 +6855,11 @@ int stmmac_dvr_probe(struct device *device, if (ret) goto error_hw_init; + /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch. + */ + if (priv->synopsys_id < DWMAC_CORE_5_20) + priv->plat->dma_cfg->dche = false; + stmmac_check_ether_addr(priv); ndev->netdev_ops = &stmmac_netdev_ops; @@ -4995,7 +6882,8 @@ int stmmac_dvr_probe(struct device *device, if (priv->dma_cap.sphen) { ndev->hw_features |= NETIF_F_GRO; - priv->sph = true; + priv->sph_cap = true; + priv->sph = priv->sph_cap; dev_info(priv->device, "SPH feature enabled\n"); } @@ -5097,6 +6985,10 @@ int stmmac_dvr_probe(struct device *device, stmmac_check_pcs_mode(priv); + pm_runtime_get_noresume(device); + pm_runtime_set_active(device); + pm_runtime_enable(device); + if (priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) { /* MDIO bus Registration */ @@ -5134,6 +7026,11 @@ int stmmac_dvr_probe(struct device *device, stmmac_init_fs(ndev); #endif + /* Let pm_runtime_put() disable the clocks. + * If CONFIG_PM is not enabled, the clocks will stay powered. + */ + pm_runtime_put(device); + return ret; error_serdes_powerup: @@ -5148,6 +7045,8 @@ error_mdio_register: stmmac_napi_del(ndev); error_hw_init: destroy_workqueue(priv->wq); + stmmac_bus_clks_config(priv, false); + bitmap_free(priv->af_xdp_zc_qps); return ret; } @@ -5183,13 +7082,14 @@ int stmmac_dvr_remove(struct device *dev) phylink_destroy(priv->phylink); if (priv->plat->stmmac_rst) reset_control_assert(priv->plat->stmmac_rst); - clk_disable_unprepare(priv->plat->pclk); - clk_disable_unprepare(priv->plat->stmmac_clk); + pm_runtime_put(dev); + pm_runtime_disable(dev); if (priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); destroy_workqueue(priv->wq); mutex_destroy(&priv->lock); + bitmap_free(priv->af_xdp_zc_qps); return 0; } @@ -5207,6 +7107,7 @@ int stmmac_suspend(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); u32 chan; + int ret; if (!ndev || !netif_running(ndev)) return 0; @@ -5250,11 +7151,24 @@ int stmmac_suspend(struct device *dev) pinctrl_pm_select_sleep_state(priv->device); /* Disable clock in case of PWM is off */ clk_disable_unprepare(priv->plat->clk_ptp_ref); - clk_disable_unprepare(priv->plat->pclk); - clk_disable_unprepare(priv->plat->stmmac_clk); + ret = pm_runtime_force_suspend(dev); + if (ret) { + mutex_unlock(&priv->lock); + return ret; + } } + mutex_unlock(&priv->lock); + if (priv->dma_cap.fpesel) { + /* Disable FPE */ + stmmac_fpe_configure(priv, priv->ioaddr, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, false); + + stmmac_fpe_handshake(priv, false); + } + priv->speed = SPEED_UNKNOWN; return 0; } @@ -5317,8 +7231,9 @@ int stmmac_resume(struct device *dev) } else { pinctrl_pm_select_default_state(priv->device); /* enable the clk previously disabled */ - clk_prepare_enable(priv->plat->stmmac_clk); - clk_prepare_enable(priv->plat->pclk); + ret = pm_runtime_force_resume(dev); + if (ret) + return ret; if (priv->plat->clk_ptp_ref) clk_prepare_enable(priv->plat->clk_ptp_ref); /* reset the phy so that it's ready */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index d64116e0543e..b750074f8f9c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -15,6 +15,7 @@ #include <linux/iopoll.h> #include <linux/mii.h> #include <linux/of_mdio.h> +#include <linux/pm_runtime.h> #include <linux/phy.h> #include <linux/property.h> #include <linux/slab.h> @@ -87,21 +88,29 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) u32 tmp, addr, value = MII_XGMAC_BUSY; int ret; + ret = pm_runtime_get_sync(priv->device); + if (ret < 0) { + pm_runtime_put_noidle(priv->device); + return ret; + } + /* Wait until any existing MII operation is complete */ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, - !(tmp & MII_XGMAC_BUSY), 100, 10000)) - return -EBUSY; + !(tmp & MII_XGMAC_BUSY), 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } if (phyreg & MII_ADDR_C45) { phyreg &= ~MII_ADDR_C45; ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr); if (ret) - return ret; + goto err_disable_clks; } else { ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr); if (ret) - return ret; + goto err_disable_clks; value |= MII_XGMAC_SADDR; } @@ -112,8 +121,10 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) /* Wait until any existing MII operation is complete */ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, - !(tmp & MII_XGMAC_BUSY), 100, 10000)) - return -EBUSY; + !(tmp & MII_XGMAC_BUSY), 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } /* Set the MII address register to read */ writel(addr, priv->ioaddr + mii_address); @@ -121,11 +132,18 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) /* Wait until any existing MII operation is complete */ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, - !(tmp & MII_XGMAC_BUSY), 100, 10000)) - return -EBUSY; + !(tmp & MII_XGMAC_BUSY), 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } /* Read the data from the MII data register */ - return readl(priv->ioaddr + mii_data) & GENMASK(15, 0); + ret = (int)readl(priv->ioaddr + mii_data) & GENMASK(15, 0); + +err_disable_clks: + pm_runtime_put(priv->device); + + return ret; } static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr, @@ -138,21 +156,29 @@ static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr, u32 addr, tmp, value = MII_XGMAC_BUSY; int ret; + ret = pm_runtime_get_sync(priv->device); + if (ret < 0) { + pm_runtime_put_noidle(priv->device); + return ret; + } + /* Wait until any existing MII operation is complete */ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, - !(tmp & MII_XGMAC_BUSY), 100, 10000)) - return -EBUSY; + !(tmp & MII_XGMAC_BUSY), 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } if (phyreg & MII_ADDR_C45) { phyreg &= ~MII_ADDR_C45; ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr); if (ret) - return ret; + goto err_disable_clks; } else { ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr); if (ret) - return ret; + goto err_disable_clks; value |= MII_XGMAC_SADDR; } @@ -164,16 +190,23 @@ static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr, /* Wait until any existing MII operation is complete */ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, - !(tmp & MII_XGMAC_BUSY), 100, 10000)) - return -EBUSY; + !(tmp & MII_XGMAC_BUSY), 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } /* Set the MII address register to write */ writel(addr, priv->ioaddr + mii_address); writel(value, priv->ioaddr + mii_data); /* Wait until any existing MII operation is complete */ - return readl_poll_timeout(priv->ioaddr + mii_data, tmp, - !(tmp & MII_XGMAC_BUSY), 100, 10000); + ret = readl_poll_timeout(priv->ioaddr + mii_data, tmp, + !(tmp & MII_XGMAC_BUSY), 100, 10000); + +err_disable_clks: + pm_runtime_put(priv->device); + + return ret; } /** @@ -196,6 +229,12 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) int data = 0; u32 v; + data = pm_runtime_get_sync(priv->device); + if (data < 0) { + pm_runtime_put_noidle(priv->device); + return data; + } + value |= (phyaddr << priv->hw->mii.addr_shift) & priv->hw->mii.addr_mask; value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask; @@ -216,19 +255,26 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) } if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), - 100, 10000)) - return -EBUSY; + 100, 10000)) { + data = -EBUSY; + goto err_disable_clks; + } writel(data, priv->ioaddr + mii_data); writel(value, priv->ioaddr + mii_address); if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), - 100, 10000)) - return -EBUSY; + 100, 10000)) { + data = -EBUSY; + goto err_disable_clks; + } /* Read the data from the MII data register */ data = (int)readl(priv->ioaddr + mii_data) & MII_DATA_MASK; +err_disable_clks: + pm_runtime_put(priv->device); + return data; } @@ -247,10 +293,16 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, struct stmmac_priv *priv = netdev_priv(ndev); unsigned int mii_address = priv->hw->mii.addr; unsigned int mii_data = priv->hw->mii.data; + int ret, data = phydata; u32 value = MII_BUSY; - int data = phydata; u32 v; + ret = pm_runtime_get_sync(priv->device); + if (ret < 0) { + pm_runtime_put_noidle(priv->device); + return ret; + } + value |= (phyaddr << priv->hw->mii.addr_shift) & priv->hw->mii.addr_mask; value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask; @@ -275,16 +327,23 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, /* Wait until any existing MII operation is complete */ if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), - 100, 10000)) - return -EBUSY; + 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } /* Set the MII address register to write */ writel(data, priv->ioaddr + mii_data); writel(value, priv->ioaddr + mii_address); /* Wait until any existing MII operation is complete */ - return readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), - 100, 10000); + ret = readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), + 100, 10000); + +err_disable_clks: + pm_runtime_put(priv->device); + + return ret; } /** diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 272cb47af9f2..95e0e4d6f74d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -198,8 +198,6 @@ static int stmmac_pci_probe(struct pci_dev *pdev, if (ret) return ret; - pci_enable_msi(pdev); - memset(&res, 0, sizeof(res)); res.addr = pcim_iomap_table(pdev)[i]; res.wol_irq = pdev->irq; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 6dc9f10414e4..1e17a23d9118 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -394,7 +394,7 @@ static int stmmac_of_get_mac_mode(struct device_node *np) * set some private fields that will be used by the main at runtime. */ struct plat_stmmacenet_data * -stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) +stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) { struct device_node *np = pdev->dev.of_node; struct plat_stmmacenet_data *plat; @@ -406,12 +406,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) if (!plat) return ERR_PTR(-ENOMEM); - *mac = of_get_mac_address(np); - if (IS_ERR(*mac)) { - if (PTR_ERR(*mac) == -EPROBE_DEFER) - return ERR_CAST(*mac); + rc = of_get_mac_address(np, mac); + if (rc) { + if (rc == -EPROBE_DEFER) + return ERR_PTR(rc); - *mac = NULL; + eth_zero_addr(mac); } plat->phy_interface = device_get_phy_mode(&pdev->dev); @@ -627,7 +627,7 @@ void stmmac_remove_config_dt(struct platform_device *pdev, } #else struct plat_stmmacenet_data * -stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) +stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) { return ERR_PTR(-EINVAL); } @@ -704,7 +704,6 @@ int stmmac_pltfr_remove(struct platform_device *pdev) } EXPORT_SYMBOL_GPL(stmmac_pltfr_remove); -#ifdef CONFIG_PM_SLEEP /** * stmmac_pltfr_suspend * @dev: device pointer @@ -712,7 +711,7 @@ EXPORT_SYMBOL_GPL(stmmac_pltfr_remove); * call the main suspend function and then, if required, on some platform, it * can call an exit helper. */ -static int stmmac_pltfr_suspend(struct device *dev) +static int __maybe_unused stmmac_pltfr_suspend(struct device *dev) { int ret; struct net_device *ndev = dev_get_drvdata(dev); @@ -733,7 +732,7 @@ static int stmmac_pltfr_suspend(struct device *dev) * the main resume function, on some platforms, it can call own init helper * if required. */ -static int stmmac_pltfr_resume(struct device *dev) +static int __maybe_unused stmmac_pltfr_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); @@ -744,10 +743,29 @@ static int stmmac_pltfr_resume(struct device *dev) return stmmac_resume(dev); } -#endif /* CONFIG_PM_SLEEP */ -SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend, - stmmac_pltfr_resume); +static int __maybe_unused stmmac_runtime_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + + stmmac_bus_clks_config(priv, false); + + return 0; +} + +static int __maybe_unused stmmac_runtime_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + + return stmmac_bus_clks_config(priv, true); +} + +const struct dev_pm_ops stmmac_pltfr_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_suspend, stmmac_pltfr_resume) + SET_RUNTIME_PM_OPS(stmmac_runtime_suspend, stmmac_runtime_resume, NULL) +}; EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops); MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h index 3a4663b7b460..3fff3f59d73d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h @@ -12,7 +12,7 @@ #include "stmmac.h" struct plat_stmmacenet_data * -stmmac_probe_config_dt(struct platform_device *pdev, const char **mac); +stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac); void stmmac_remove_config_dt(struct platform_device *pdev, struct plat_stmmacenet_data *plat); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 0989e2bb6ee3..4e86cdf2bc9f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -9,6 +9,7 @@ *******************************************************************************/ #include "stmmac.h" #include "stmmac_ptp.h" +#include "dwmac4.h" /** * stmmac_adjust_freq @@ -134,7 +135,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp, { struct stmmac_priv *priv = container_of(ptp, struct stmmac_priv, ptp_clock_ops); + void __iomem *ptpaddr = priv->ptpaddr; + void __iomem *ioaddr = priv->hw->pcsr; struct stmmac_pps_cfg *cfg; + u32 intr_value, acr_value; int ret = -EOPNOTSUPP; unsigned long flags; @@ -158,6 +162,37 @@ static int stmmac_enable(struct ptp_clock_info *ptp, priv->systime_flags); spin_unlock_irqrestore(&priv->ptp_lock, flags); break; + case PTP_CLK_REQ_EXTTS: + priv->plat->ext_snapshot_en = on; + mutex_lock(&priv->aux_ts_lock); + acr_value = readl(ptpaddr + PTP_ACR); + acr_value &= ~PTP_ACR_MASK; + if (on) { + /* Enable External snapshot trigger */ + acr_value |= priv->plat->ext_snapshot_num; + acr_value |= PTP_ACR_ATSFC; + netdev_dbg(priv->dev, "Auxiliary Snapshot %d enabled.\n", + priv->plat->ext_snapshot_num >> + PTP_ACR_ATSEN_SHIFT); + /* Enable Timestamp Interrupt */ + intr_value = readl(ioaddr + GMAC_INT_EN); + intr_value |= GMAC_INT_TSIE; + writel(intr_value, ioaddr + GMAC_INT_EN); + + } else { + netdev_dbg(priv->dev, "Auxiliary Snapshot %d disabled.\n", + priv->plat->ext_snapshot_num >> + PTP_ACR_ATSEN_SHIFT); + /* Disable Timestamp Interrupt */ + intr_value = readl(ioaddr + GMAC_INT_EN); + intr_value &= ~GMAC_INT_TSIE; + writel(intr_value, ioaddr + GMAC_INT_EN); + } + writel(acr_value, ptpaddr + PTP_ACR); + mutex_unlock(&priv->aux_ts_lock); + ret = 0; + break; + default: break; } @@ -165,13 +200,43 @@ static int stmmac_enable(struct ptp_clock_info *ptp, return ret; } +/** + * stmmac_get_syncdevicetime + * @device: current device time + * @system: system counter value read synchronously with device time + * @ctx: context provided by timekeeping code + * Description: Read device and system clock simultaneously and return the + * corrected clock values in ns. + **/ +static int stmmac_get_syncdevicetime(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + struct stmmac_priv *priv = (struct stmmac_priv *)ctx; + + if (priv->plat->crosststamp) + return priv->plat->crosststamp(device, system, ctx); + else + return -EOPNOTSUPP; +} + +static int stmmac_getcrosststamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *xtstamp) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + + return get_device_system_crosststamp(stmmac_get_syncdevicetime, + priv, NULL, xtstamp); +} + /* structure describing a PTP hardware clock */ static struct ptp_clock_info stmmac_ptp_clock_ops = { .owner = THIS_MODULE, .name = "stmmac ptp", .max_adj = 62500000, .n_alarm = 0, - .n_ext_ts = 0, + .n_ext_ts = 0, /* will be overwritten in stmmac_ptp_register */ .n_per_out = 0, /* will be overwritten in stmmac_ptp_register */ .n_pins = 0, .pps = 0, @@ -180,6 +245,7 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = { .gettime64 = stmmac_get_time, .settime64 = stmmac_set_time, .enable = stmmac_enable, + .getcrosststamp = stmmac_getcrosststamp, }; /** @@ -192,6 +258,9 @@ void stmmac_ptp_register(struct stmmac_priv *priv) { int i; + if (priv->plat->ptp_clk_freq_config) + priv->plat->ptp_clk_freq_config(priv); + for (i = 0; i < priv->dma_cap.pps_out_num; i++) { if (i >= STMMAC_PPS_MAX) break; @@ -202,8 +271,10 @@ void stmmac_ptp_register(struct stmmac_priv *priv) stmmac_ptp_clock_ops.max_adj = priv->plat->ptp_max_adj; stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num; + stmmac_ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n; spin_lock_init(&priv->ptp_lock); + mutex_init(&priv->aux_ts_lock); priv->ptp_clock_ops = stmmac_ptp_clock_ops; priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops, @@ -229,4 +300,6 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv) pr_debug("Removed PTP HW clock successfully on %s\n", priv->dev->name); } + + mutex_destroy(&priv->aux_ts_lock); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index 7abb1d47e7da..53172a439810 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -23,6 +23,9 @@ #define PTP_STSUR 0x10 /* System Time – Seconds Update Reg */ #define PTP_STNSUR 0x14 /* System Time – Nanoseconds Update Reg */ #define PTP_TAR 0x18 /* Timestamp Addend Reg */ +#define PTP_ACR 0x40 /* Auxiliary Control Reg */ +#define PTP_ATNR 0x48 /* Auxiliary Timestamp - Nanoseconds Reg */ +#define PTP_ATSR 0x4c /* Auxiliary Timestamp - Seconds Reg */ #define PTP_STNSUR_ADDSUB_SHIFT 31 #define PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */ @@ -64,4 +67,25 @@ #define PTP_SSIR_SSINC_MASK 0xff #define GMAC4_PTP_SSIR_SSINC_SHIFT 16 +/* Auxiliary Control defines */ +#define PTP_ACR_ATSFC BIT(0) /* Auxiliary Snapshot FIFO Clear */ +#define PTP_ACR_ATSEN0 BIT(4) /* Auxiliary Snapshot 0 Enable */ +#define PTP_ACR_ATSEN1 BIT(5) /* Auxiliary Snapshot 1 Enable */ +#define PTP_ACR_ATSEN2 BIT(6) /* Auxiliary Snapshot 2 Enable */ +#define PTP_ACR_ATSEN3 BIT(7) /* Auxiliary Snapshot 3 Enable */ +#define PTP_ACR_ATSEN_SHIFT 5 /* Auxiliary Snapshot shift */ +#define PTP_ACR_MASK GENMASK(7, 4) /* Aux Snapshot Mask */ +#define PMC_ART_VALUE0 0x01 /* PMC_ART[15:0] timer value */ +#define PMC_ART_VALUE1 0x02 /* PMC_ART[31:16] timer value */ +#define PMC_ART_VALUE2 0x03 /* PMC_ART[47:32] timer value */ +#define PMC_ART_VALUE3 0x04 /* PMC_ART[63:48] timer value */ +#define GMAC4_ART_TIME_SHIFT 16 /* ART TIME 16-bits shift */ + +enum aux_snapshot { + AUX_SNAPSHOT0 = 0x10, + AUX_SNAPSHOT1 = 0x20, + AUX_SNAPSHOT2 = 0x40, + AUX_SNAPSHOT3 = 0x80, +}; + #endif /* __STMMAC_PTP_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 44bb133c3000..4e70efc45458 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -254,6 +254,16 @@ static int tc_init(struct stmmac_priv *priv) priv->flow_entries_max); } + if (!priv->plat->fpe_cfg) { + priv->plat->fpe_cfg = devm_kzalloc(priv->device, + sizeof(*priv->plat->fpe_cfg), + GFP_KERNEL); + if (!priv->plat->fpe_cfg) + return -ENOMEM; + } else { + memset(priv->plat->fpe_cfg, 0, sizeof(*priv->plat->fpe_cfg)); + } + /* Fail silently as we can still use remaining features, e.g. CBS */ if (!dma_cap->frpsel) return 0; @@ -297,6 +307,7 @@ static int tc_init(struct stmmac_priv *priv) dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n", priv->tc_entries_max, priv->tc_off_max); + return 0; } @@ -598,6 +609,87 @@ static int tc_del_flow(struct stmmac_priv *priv, return ret; } +#define VLAN_PRIO_FULL_MASK (0x07) + +static int tc_add_vlan_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + int tc = tc_classid_to_hwtc(priv->dev, cls->classid); + struct flow_match_vlan match; + + /* Nothing to do here */ + if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) + return -EINVAL; + + if (tc < 0) { + netdev_err(priv->dev, "Invalid traffic class\n"); + return -EINVAL; + } + + flow_rule_match_vlan(rule, &match); + + if (match.mask->vlan_priority) { + u32 prio; + + if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) { + netdev_err(priv->dev, "Only full mask is supported for VLAN priority"); + return -EINVAL; + } + + prio = BIT(match.key->vlan_priority); + stmmac_rx_queue_prio(priv, priv->hw, prio, tc); + } + + return 0; +} + +static int tc_del_vlan_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + int tc = tc_classid_to_hwtc(priv->dev, cls->classid); + + /* Nothing to do here */ + if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) + return -EINVAL; + + if (tc < 0) { + netdev_err(priv->dev, "Invalid traffic class\n"); + return -EINVAL; + } + + stmmac_rx_queue_prio(priv, priv->hw, 0, tc); + + return 0; +} + +static int tc_add_flow_cls(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + int ret; + + ret = tc_add_flow(priv, cls); + if (!ret) + return ret; + + return tc_add_vlan_flow(priv, cls); +} + +static int tc_del_flow_cls(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + int ret; + + ret = tc_del_flow(priv, cls); + if (!ret) + return ret; + + return tc_del_vlan_flow(priv, cls); +} + static int tc_setup_cls(struct stmmac_priv *priv, struct flow_cls_offload *cls) { @@ -609,10 +701,10 @@ static int tc_setup_cls(struct stmmac_priv *priv, switch (cls->command) { case FLOW_CLS_REPLACE: - ret = tc_add_flow(priv, cls); + ret = tc_add_flow_cls(priv, cls); break; case FLOW_CLS_DESTROY: - ret = tc_del_flow(priv, cls); + ret = tc_del_flow_cls(priv, cls); break; default: return -EOPNOTSUPP; @@ -748,13 +840,10 @@ static int tc_setup_taprio(struct stmmac_priv *priv, if (fpe && !priv->dma_cap.fpesel) return -EOPNOTSUPP; - ret = stmmac_fpe_configure(priv, priv->ioaddr, - priv->plat->tx_queues_to_use, - priv->plat->rx_queues_to_use, fpe); - if (ret && fpe) { - netdev_err(priv->dev, "failed to enable Frame Preemption\n"); - return ret; - } + /* Actual FPE register configuration will be done after FPE handshake + * is success. + */ + priv->plat->fpe_cfg->enable = fpe; ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, priv->plat->clk_ptp_rate); @@ -764,12 +853,29 @@ static int tc_setup_taprio(struct stmmac_priv *priv, } netdev_info(priv->dev, "configured EST\n"); + + if (fpe) { + stmmac_fpe_handshake(priv, true); + netdev_info(priv->dev, "start FPE handshake\n"); + } + return 0; disable: priv->plat->est->enable = false; stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, priv->plat->clk_ptp_rate); + + priv->plat->fpe_cfg->enable = false; + stmmac_fpe_configure(priv, priv->ioaddr, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, + false); + netdev_info(priv->dev, "disabled FPE\n"); + + stmmac_fpe_handshake(priv, false); + netdev_info(priv->dev, "stop FPE handshake\n"); + return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c new file mode 100644 index 000000000000..105821b53020 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021, Intel Corporation. */ + +#include <net/xdp_sock_drv.h> + +#include "stmmac.h" +#include "stmmac_xdp.h" + +static int stmmac_xdp_enable_pool(struct stmmac_priv *priv, + struct xsk_buff_pool *pool, u16 queue) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + bool need_update; + u32 frame_size; + int err; + + if (queue >= priv->plat->rx_queues_to_use || + queue >= priv->plat->tx_queues_to_use) + return -EINVAL; + + frame_size = xsk_pool_get_rx_frame_size(pool); + /* XDP ZC does not span multiple frame, make sure XSK pool buffer + * size can at least store Q-in-Q frame. + */ + if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) + return -EOPNOTSUPP; + + err = xsk_pool_dma_map(pool, priv->device, STMMAC_RX_DMA_ATTR); + if (err) { + netdev_err(priv->dev, "Failed to map xsk pool\n"); + return err; + } + + need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv); + + if (need_update) { + stmmac_disable_rx_queue(priv, queue); + stmmac_disable_tx_queue(priv, queue); + napi_disable(&ch->rx_napi); + napi_disable(&ch->tx_napi); + } + + set_bit(queue, priv->af_xdp_zc_qps); + + if (need_update) { + napi_enable(&ch->rxtx_napi); + stmmac_enable_rx_queue(priv, queue); + stmmac_enable_tx_queue(priv, queue); + + err = stmmac_xsk_wakeup(priv->dev, queue, XDP_WAKEUP_RX); + if (err) + return err; + } + + return 0; +} + +static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + struct xsk_buff_pool *pool; + bool need_update; + + if (queue >= priv->plat->rx_queues_to_use || + queue >= priv->plat->tx_queues_to_use) + return -EINVAL; + + pool = xsk_get_pool_from_qid(priv->dev, queue); + if (!pool) + return -EINVAL; + + need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv); + + if (need_update) { + stmmac_disable_rx_queue(priv, queue); + stmmac_disable_tx_queue(priv, queue); + synchronize_rcu(); + napi_disable(&ch->rxtx_napi); + } + + xsk_pool_dma_unmap(pool, STMMAC_RX_DMA_ATTR); + + clear_bit(queue, priv->af_xdp_zc_qps); + + if (need_update) { + napi_enable(&ch->rx_napi); + napi_enable(&ch->tx_napi); + stmmac_enable_rx_queue(priv, queue); + stmmac_enable_tx_queue(priv, queue); + } + + return 0; +} + +int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool, + u16 queue) +{ + return pool ? stmmac_xdp_enable_pool(priv, pool, queue) : + stmmac_xdp_disable_pool(priv, queue); +} + +int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct net_device *dev = priv->dev; + struct bpf_prog *old_prog; + bool need_update; + bool if_running; + + if_running = netif_running(dev); + + if (prog && dev->mtu > ETH_DATA_LEN) { + /* For now, the driver doesn't support XDP functionality with + * jumbo frames so we return error. + */ + NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported"); + return -EOPNOTSUPP; + } + + need_update = !!priv->xdp_prog != !!prog; + if (if_running && need_update) + stmmac_release(dev); + + old_prog = xchg(&priv->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + /* Disable RX SPH for XDP operation */ + priv->sph = priv->sph_cap && !stmmac_xdp_is_enabled(priv); + + if (if_running && need_update) + stmmac_open(dev); + + return 0; +} diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h new file mode 100644 index 000000000000..896dc987d4ef --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021, Intel Corporation. */ + +#ifndef _STMMAC_XDP_H_ +#define _STMMAC_XDP_H_ + +#define STMMAC_MAX_RX_BUF_SIZE(num) (((num) * PAGE_SIZE) - XDP_PACKET_HEADROOM) +#define STMMAC_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool, + u16 queue); +int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, + struct netlink_ext_ack *extack); + +#endif /* _STMMAC_XDP_H_ */ diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 9ff894ba8d3e..54f45d8c79a7 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -1599,6 +1599,7 @@ static inline int cas_mdio_link_not_up(struct cas *cp) cas_phy_write(cp, MII_BMCR, val); break; } + break; default: break; } diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index 58f142ee78a3..9790656cf970 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -1674,8 +1674,8 @@ static void gem_init_phy(struct gem *gp) if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) { int i; - /* Those delay sucks, the HW seem to love them though, I'll - * serisouly consider breaking some locks here to be able + /* Those delays sucks, the HW seems to love them though, I'll + * seriously consider breaking some locks here to be able * to schedule instead */ for (i = 0; i < 3; i++) { diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 638d7b03be4b..6a67b026df0b 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1824,7 +1824,6 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) for_each_child_of_node(node, port_np) { struct am65_cpsw_port *port; - const void *mac_addr; u32 port_id; /* it is not a slave port node, continue */ @@ -1903,15 +1902,15 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) return ret; } - mac_addr = of_get_mac_address(port_np); - if (!IS_ERR(mac_addr)) { - ether_addr_copy(port->slave.mac_addr, mac_addr); - } else if (am65_cpsw_am654_get_efuse_macid(port_np, - port->port_id, - port->slave.mac_addr) || - !is_valid_ether_addr(port->slave.mac_addr)) { - random_ether_addr(port->slave.mac_addr); - dev_err(dev, "Use random MAC address\n"); + ret = of_get_mac_address(port_np, port->slave.mac_addr); + if (ret) { + am65_cpsw_am654_get_efuse_macid(port_np, + port->port_id, + port->slave.mac_addr); + if (!is_valid_ether_addr(port->slave.mac_addr)) { + random_ether_addr(port->slave.mac_addr); + dev_err(dev, "Use random MAC address\n"); + } } } of_node_put(node); diff --git a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c index d93ffd8a08b0..23cfb91e9c4d 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c +++ b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c @@ -385,7 +385,7 @@ static void am65_cpsw_switchdev_event_work(struct work_struct *work) fdb->addr, fdb->vid, fdb->added_by_user, fdb->offloaded, port_id); - if (!fdb->added_by_user) + if (!fdb->added_by_user || fdb->is_local) break; if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0) port_id = HOST_PORT_NUM; @@ -401,7 +401,7 @@ static void am65_cpsw_switchdev_event_work(struct work_struct *work) fdb->addr, fdb->vid, fdb->added_by_user, fdb->offloaded, port_id); - if (!fdb->added_by_user) + if (!fdb->added_by_user || fdb->is_local) break; if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0) port_id = HOST_PORT_NUM; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index fd966567464c..c0cd7de88316 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1123,25 +1123,23 @@ static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n, struct cpsw_priv *priv = netdev_priv(ndev); struct cpsw_common *cpsw = priv->cpsw; struct xdp_frame *xdpf; - int i, drops = 0, port; + int i, nxmit = 0, port; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; for (i = 0; i < n; i++) { xdpf = frames[i]; - if (xdpf->len < CPSW_MIN_PACKET_SIZE) { - xdp_return_frame_rx_napi(xdpf); - drops++; - continue; - } + if (xdpf->len < CPSW_MIN_PACKET_SIZE) + break; port = priv->emac_port + cpsw->data.dual_emac; if (cpsw_xdp_tx_frame(priv, xdpf, NULL, port)) - drops++; + break; + nxmit++; } - return n - drops; + return nxmit; } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -1298,7 +1296,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, for_each_available_child_of_node(node, slave_node) { struct cpsw_slave_data *slave_data = data->slave_data + i; - const void *mac_addr = NULL; int lenp; const __be32 *parp; @@ -1370,10 +1367,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, } no_phy_slave: - mac_addr = of_get_mac_address(slave_node); - if (!IS_ERR(mac_addr)) { - ether_addr_copy(slave_data->mac_addr, mac_addr); - } else { + ret = of_get_mac_address(slave_node, slave_data->mac_addr); + if (ret) { ret = ti_cm_get_macid(&pdev->dev, i, slave_data->mac_addr); if (ret) diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 58a64313ac00..69b7a4e0220a 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -1093,24 +1093,22 @@ static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n, { struct cpsw_priv *priv = netdev_priv(ndev); struct xdp_frame *xdpf; - int i, drops = 0; + int i, nxmit = 0; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; for (i = 0; i < n; i++) { xdpf = frames[i]; - if (xdpf->len < CPSW_MIN_PACKET_SIZE) { - xdp_return_frame_rx_napi(xdpf); - drops++; - continue; - } + if (xdpf->len < CPSW_MIN_PACKET_SIZE) + break; if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port)) - drops++; + break; + nxmit++; } - return n - drops; + return nxmit; } static int cpsw_get_port_parent_id(struct net_device *ndev, @@ -1259,7 +1257,6 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw) for_each_child_of_node(tmp_node, port_np) { struct cpsw_slave_data *slave_data; - const void *mac_addr; u32 port_id; ret = of_property_read_u32(port_np, "reg", &port_id); @@ -1318,10 +1315,8 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw) goto err_node_put; } - mac_addr = of_get_mac_address(port_np); - if (!IS_ERR(mac_addr)) { - ether_addr_copy(slave_data->mac_addr, mac_addr); - } else { + ret = of_get_mac_address(port_np, slave_data->mac_addr); + if (ret) { ret = ti_cm_get_macid(dev, port_id - 1, slave_data->mac_addr); if (ret) diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index bb59e768915e..5862f0a4a975 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -1305,19 +1305,15 @@ int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf, ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf), dma, xdpf->len, port); } else { - if (sizeof(*xmeta) > xdpf->headroom) { - xdp_return_frame_rx_napi(xdpf); + if (sizeof(*xmeta) > xdpf->headroom) return -EINVAL; - } ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf), xdpf->data, xdpf->len, port); } - if (ret) { + if (ret) priv->ndev->stats.tx_dropped++; - xdp_return_frame_rx_napi(xdpf); - } return ret; } @@ -1353,7 +1349,8 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp, if (unlikely(!xdpf)) goto drop; - cpsw_xdp_tx_frame(priv, xdpf, page, port); + if (cpsw_xdp_tx_frame(priv, xdpf, page, port)) + xdp_return_frame_rx_napi(xdpf); break; case XDP_REDIRECT: if (xdp_do_redirect(ndev, xdp, prog)) diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c index a72bb570756f..05a64fb7a04f 100644 --- a/drivers/net/ethernet/ti/cpsw_switchdev.c +++ b/drivers/net/ethernet/ti/cpsw_switchdev.c @@ -395,7 +395,7 @@ static void cpsw_switchdev_event_work(struct work_struct *work) fdb->addr, fdb->vid, fdb->added_by_user, fdb->offloaded, port); - if (!fdb->added_by_user) + if (!fdb->added_by_user || fdb->is_local) break; if (memcmp(priv->mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0) port = HOST_PORT_NUM; @@ -411,7 +411,7 @@ static void cpsw_switchdev_event_work(struct work_struct *work) fdb->addr, fdb->vid, fdb->added_by_user, fdb->offloaded, port); - if (!fdb->added_by_user) + if (!fdb->added_by_user || fdb->is_local) break; if (memcmp(priv->mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0) port = HOST_PORT_NUM; diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index c7031e1960d4..f9417b44cae8 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -169,11 +169,11 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; /* EMAC mac_status register */ #define EMAC_MACSTATUS_TXERRCODE_MASK (0xF00000) #define EMAC_MACSTATUS_TXERRCODE_SHIFT (20) -#define EMAC_MACSTATUS_TXERRCH_MASK (0x7) +#define EMAC_MACSTATUS_TXERRCH_MASK (0x70000) #define EMAC_MACSTATUS_TXERRCH_SHIFT (16) #define EMAC_MACSTATUS_RXERRCODE_MASK (0xF000) #define EMAC_MACSTATUS_RXERRCODE_SHIFT (12) -#define EMAC_MACSTATUS_RXERRCH_MASK (0x7) +#define EMAC_MACSTATUS_RXERRCH_MASK (0x700) #define EMAC_MACSTATUS_RXERRCH_SHIFT (8) /* EMAC RX register masks */ @@ -1687,7 +1687,6 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) const struct of_device_id *match; const struct emac_platform_data *auxdata; struct emac_platform_data *pdata = NULL; - const u8 *mac_addr; if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node) return dev_get_platdata(&pdev->dev); @@ -1699,11 +1698,8 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) np = pdev->dev.of_node; pdata->version = EMAC_VERSION_2; - if (!is_valid_ether_addr(pdata->mac_addr)) { - mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) - ether_addr_copy(pdata->mac_addr, mac_addr); - } + if (!is_valid_ether_addr(pdata->mac_addr)) + of_get_mac_address(np, pdata->mac_addr); of_property_read_u32(np, "ti,davinci-ctrl-reg-offset", &pdata->ctrl_reg_offset); diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index d7a144b4a09f..9030e619e543 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1966,7 +1966,6 @@ static int netcp_create_interface(struct netcp_device *netcp_device, struct resource res; void __iomem *efuse = NULL; u32 efuse_mac = 0; - const void *mac_addr; u8 efuse_mac_addr[6]; u32 temp[2]; int ret = 0; @@ -2036,10 +2035,8 @@ static int netcp_create_interface(struct netcp_device *netcp_device, devm_iounmap(dev, efuse); devm_release_mem_region(dev, res.start, size); } else { - mac_addr = of_get_mac_address(node_interface); - if (!IS_ERR(mac_addr)) - ether_addr_copy(ndev->dev_addr, mac_addr); - else + ret = of_get_mac_address(node_interface, ndev->dev_addr); + if (ret) eth_random_addr(ndev->dev_addr); } diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index d5a75ef7e3ca..226a76633e65 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -146,7 +146,8 @@ spider_net_read_phy(struct net_device *netdev, int mii_id, int reg) /* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT * interrupt, as we poll for the completion of the read operation - * in spider_net_read_phy. Should take about 50 us */ + * in spider_net_read_phy. Should take about 50 us + */ do { readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD); } while (readvalue & SPIDER_NET_GPREXEC); @@ -387,7 +388,8 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, (~(SPIDER_NET_RXBUF_ALIGN - 1)); /* and we need to have it 128 byte aligned, therefore we allocate a - * bit more */ + * bit more + */ /* allocate an skb */ descr->skb = netdev_alloc_skb(card->netdev, bufsize + SPIDER_NET_RXBUF_ALIGN - 1); @@ -488,7 +490,8 @@ spider_net_refill_rx_chain(struct spider_net_card *card) /* one context doing the refill (and a second context seeing that * and omitting it) is ok. If called by NAPI, we'll be called again * as spider_net_decode_one_descr is called several times. If some - * interrupt calls us, the NAPI is about to clean up anyway. */ + * interrupt calls us, the NAPI is about to clean up anyway. + */ if (!spin_trylock_irqsave(&chain->lock, flags)) return; @@ -523,14 +526,16 @@ spider_net_alloc_rx_skbs(struct spider_net_card *card) /* Put at least one buffer into the chain. if this fails, * we've got a problem. If not, spider_net_refill_rx_chain - * will do the rest at the end of this function. */ + * will do the rest at the end of this function. + */ if (spider_net_prepare_rx_descr(card, chain->head)) goto error; else chain->head = chain->head->next; /* This will allocate the rest of the rx buffers; - * if not, it's business as usual later on. */ + * if not, it's business as usual later on. + */ spider_net_refill_rx_chain(card); spider_net_enable_rxdmac(card); return 0; @@ -706,7 +711,8 @@ spider_net_set_low_watermark(struct spider_net_card *card) int i; /* Measure the length of the queue. Measurement does not - * need to be precise -- does not need a lock. */ + * need to be precise -- does not need a lock. + */ while (descr != card->tx_chain.head) { status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE; if (status == SPIDER_NET_DESCR_NOT_IN_USE) @@ -786,7 +792,8 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal) /* fallthrough, if we release the descriptors * brutally (then we don't care about - * SPIDER_NET_DESCR_CARDOWNED) */ + * SPIDER_NET_DESCR_CARDOWNED) + */ fallthrough; case SPIDER_NET_DESCR_RESPONSE_ERROR: @@ -948,7 +955,8 @@ spider_net_pass_skb_up(struct spider_net_descr *descr, skb_put(skb, hwdescr->valid_size); /* the card seems to add 2 bytes of junk in front - * of the ethernet frame */ + * of the ethernet frame + */ #define SPIDER_MISALIGN 2 skb_pull(skb, SPIDER_MISALIGN); skb->protocol = eth_type_trans(skb, netdev); @@ -1382,7 +1390,8 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg, /* PHY read operation completed */ /* we don't use semaphores, as we poll for the completion * of the read operation in spider_net_read_phy. Should take - * about 50 us */ + * about 50 us + */ show_error = 0; break; case SPIDER_NET_GPWFFINT: @@ -1450,7 +1459,8 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg, { case SPIDER_NET_GTMFLLINT: /* TX RAM full may happen on a usual case. - * Logging is not needed. */ + * Logging is not needed. + */ show_error = 0; break; case SPIDER_NET_GRFDFLLINT: @@ -1694,7 +1704,8 @@ spider_net_enable_card(struct spider_net_card *card) { int i; /* the following array consists of (register),(value) pairs - * that are set in this function. A register of 0 ends the list */ + * that are set in this function. A register of 0 ends the list + */ u32 regs[][2] = { { SPIDER_NET_GRESUMINTNUM, 0 }, { SPIDER_NET_GREINTNUM, 0 }, @@ -1757,7 +1768,8 @@ spider_net_enable_card(struct spider_net_card *card) spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE); /* set chain tail address for RX chains and - * enable DMA */ + * enable DMA + */ spider_net_enable_rxchtails(card); spider_net_enable_rxdmac(card); @@ -1995,7 +2007,8 @@ static void spider_net_link_phy(struct timer_list *t) case BCM54XX_UNKNOWN: /* copper, fiber with and without failed, - * retry from beginning */ + * retry from beginning + */ spider_net_setup_aneg(card); card->medium = BCM54XX_COPPER; break; @@ -2263,7 +2276,8 @@ spider_net_setup_netdev(struct spider_net_card *card) netdev->features |= NETIF_F_RXCSUM; netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX; /* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - * NETIF_F_HW_VLAN_CTAG_FILTER */ + * NETIF_F_HW_VLAN_CTAG_FILTER + */ /* MTU range: 64 - 2294 */ netdev->min_mtu = SPIDER_NET_MIN_MTU; diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index 7a6e5ff8e5d4..fedb2bf69261 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -1914,7 +1914,8 @@ tc35815_set_multicast_list(struct net_device *dev) if (dev->flags & IFF_PROMISC) { /* With some (all?) 100MHalf HUB, controller will hang - * if we enabled promiscuous mode before linkup... */ + * if we enabled promiscuous mode before linkup... + */ struct tc35815_local *lp = netdev_priv(dev); if (!lp->link) diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index b65767f9e499..fecc4d7b00b0 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2525,7 +2525,7 @@ static int velocity_close(struct net_device *dev) * @skb: buffer to transmit * @dev: network device * - * Called by the networ layer to request a packet is queued to + * Called by the network layer to request a packet is queued to * the velocity. Returns zero on success. */ static netdev_tx_t velocity_xmit(struct sk_buff *skb, diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c index 2b4126d2427d..2b84848dc26a 100644 --- a/drivers/net/ethernet/wiznet/w5100-spi.c +++ b/drivers/net/ethernet/wiznet/w5100-spi.c @@ -423,8 +423,14 @@ static int w5100_spi_probe(struct spi_device *spi) const struct of_device_id *of_id; const struct w5100_ops *ops; kernel_ulong_t driver_data; + const void *mac = NULL; + u8 tmpmac[ETH_ALEN]; int priv_size; - const void *mac = of_get_mac_address(spi->dev.of_node); + int ret; + + ret = of_get_mac_address(spi->dev.of_node, tmpmac); + if (!ret) + mac = tmpmac; if (spi->dev.of_node) { of_id = of_match_device(w5100_of_match, &spi->dev); diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index c0d181a7f83a..ec5db481c9cd 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -1157,7 +1157,7 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops, INIT_WORK(&priv->setrx_work, w5100_setrx_work); INIT_WORK(&priv->restart_work, w5100_restart_work); - if (!IS_ERR_OR_NULL(mac_addr)) + if (mac_addr) memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); else eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig index c6eb7f2368aa..911b5ef9e680 100644 --- a/drivers/net/ethernet/xilinx/Kconfig +++ b/drivers/net/ethernet/xilinx/Kconfig @@ -18,12 +18,14 @@ if NET_VENDOR_XILINX config XILINX_EMACLITE tristate "Xilinx 10/100 Ethernet Lite support" + depends on HAS_IOMEM select PHYLIB help This driver supports the 10/100 Ethernet Lite from Xilinx. config XILINX_AXI_EMAC tristate "Xilinx 10/100/1000 AXI Ethernet support" + depends on HAS_IOMEM select PHYLINK help This driver supports the 10/100/1000 Ethernet from Xilinx for the @@ -31,6 +33,7 @@ config XILINX_AXI_EMAC config XILINX_LL_TEMAC tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver" + depends on HAS_IOMEM select PHYLIB help This driver supports the Xilinx 10/100/1000 LocalLink TEMAC diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 030185301014..a1f5f07f4ca9 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -438,7 +438,7 @@ static void temac_do_set_mac_address(struct net_device *ndev) static int temac_init_mac_address(struct net_device *ndev, const void *address) { - ether_addr_copy(ndev->dev_addr, address); + memcpy(ndev->dev_addr, address, ETH_ALEN); if (!is_valid_ether_addr(ndev->dev_addr)) eth_hw_addr_random(ndev); temac_do_set_mac_address(ndev); @@ -1351,7 +1351,7 @@ static int temac_probe(struct platform_device *pdev) struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np; struct temac_local *lp; struct net_device *ndev; - const void *addr; + u8 addr[ETH_ALEN]; __be32 *p; bool little_endian; int rc = 0; @@ -1542,8 +1542,8 @@ static int temac_probe(struct platform_device *pdev) if (temac_np) { /* Retrieve the MAC address */ - addr = of_get_mac_address(temac_np); - if (IS_ERR(addr)) { + rc = of_get_mac_address(temac_np, addr); + if (rc) { dev_err(&pdev->dev, "could not find MAC address\n"); return -ENODEV; } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index aca7f82f6791..5b4d153b1492 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -376,6 +376,8 @@ struct axidma_bd { struct sk_buff *skb; } __aligned(XAXIDMA_BD_MINIMUM_ALIGNMENT); +#define XAE_NUM_MISC_CLOCKS 3 + /** * struct axienet_local - axienet private per device data * @ndev: Pointer for net_device to which it will be attached. @@ -385,7 +387,8 @@ struct axidma_bd { * @phylink_config: phylink configuration settings * @pcs_phy: Reference to PCS/PMA PHY if used * @switch_x_sgmii: Whether switchable 1000BaseX/SGMII mode is enabled in the core - * @clk: Clock for AXI bus + * @axi_clk: AXI4-Lite bus clock + * @misc_clks: Misc ethernet clocks (AXI4-Stream, Ref, MGT clocks) * @mii_bus: Pointer to MII bus structure * @mii_clk_div: MII bus clock divider value * @regs_start: Resource start for axienet device addresses @@ -434,7 +437,8 @@ struct axienet_local { bool switch_x_sgmii; - struct clk *clk; + struct clk *axi_clk; + struct clk_bulk_data misc_clks[XAE_NUM_MISC_CLOCKS]; struct mii_bus *mii_bus; u8 mii_clk_div; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index f8f8654ea728..b508c9453f40 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1835,8 +1835,8 @@ static int axienet_probe(struct platform_device *pdev) struct device_node *np; struct axienet_local *lp; struct net_device *ndev; - const void *mac_addr; struct resource *ethres; + u8 mac_addr[ETH_ALEN]; int addr_width = 32; u32 value; @@ -1863,22 +1863,39 @@ static int axienet_probe(struct platform_device *pdev) lp->rx_bd_num = RX_BD_NUM_DEFAULT; lp->tx_bd_num = TX_BD_NUM_DEFAULT; - lp->clk = devm_clk_get_optional(&pdev->dev, NULL); - if (IS_ERR(lp->clk)) { - ret = PTR_ERR(lp->clk); + lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); + if (!lp->axi_clk) { + /* For backward compatibility, if named AXI clock is not present, + * treat the first clock specified as the AXI clock. + */ + lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL); + } + if (IS_ERR(lp->axi_clk)) { + ret = PTR_ERR(lp->axi_clk); goto free_netdev; } - ret = clk_prepare_enable(lp->clk); + ret = clk_prepare_enable(lp->axi_clk); if (ret) { - dev_err(&pdev->dev, "Unable to enable clock: %d\n", ret); + dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret); goto free_netdev; } + lp->misc_clks[0].id = "axis_clk"; + lp->misc_clks[1].id = "ref_clk"; + lp->misc_clks[2].id = "mgt_clk"; + + ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks); + if (ret) + goto cleanup_clk; + + ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks); + if (ret) + goto cleanup_clk; + /* Map device registers */ ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0); lp->regs = devm_ioremap_resource(&pdev->dev, ethres); if (IS_ERR(lp->regs)) { - dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n"); ret = PTR_ERR(lp->regs); goto cleanup_clk; } @@ -2045,13 +2062,14 @@ static int axienet_probe(struct platform_device *pdev) dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); /* Retrieve the MAC address */ - mac_addr = of_get_mac_address(pdev->dev.of_node); - if (IS_ERR(mac_addr)) { - dev_warn(&pdev->dev, "could not find MAC address property: %ld\n", - PTR_ERR(mac_addr)); - mac_addr = NULL; + ret = of_get_mac_address(pdev->dev.of_node, mac_addr); + if (!ret) { + axienet_set_mac_address(ndev, mac_addr); + } else { + dev_warn(&pdev->dev, "could not find MAC address property: %d\n", + ret); + axienet_set_mac_address(ndev, NULL); } - axienet_set_mac_address(ndev, mac_addr); lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; @@ -2109,7 +2127,8 @@ cleanup_mdio: of_node_put(lp->phy_node); cleanup_clk: - clk_disable_unprepare(lp->clk); + clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); + clk_disable_unprepare(lp->axi_clk); free_netdev: free_netdev(ndev); @@ -2132,7 +2151,8 @@ static int axienet_remove(struct platform_device *pdev) axienet_mdio_teardown(lp); - clk_disable_unprepare(lp->clk); + clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); + clk_disable_unprepare(lp->axi_clk); of_node_put(lp->phy_node); lp->phy_node = NULL; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c index 9c014cee34b2..48f544f6c999 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c @@ -159,8 +159,8 @@ int axienet_mdio_enable(struct axienet_local *lp) lp->mii_clk_div = 0; - if (lp->clk) { - host_clock = clk_get_rate(lp->clk); + if (lp->axi_clk) { + host_clock = clk_get_rate(lp->axi_clk); } else { struct device_node *np1; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 007840d4a807..d9d58a7dabee 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1115,7 +1115,6 @@ static int xemaclite_of_probe(struct platform_device *ofdev) struct net_device *ndev = NULL; struct net_local *lp = NULL; struct device *dev = &ofdev->dev; - const void *mac_address; int rc = 0; @@ -1157,12 +1156,9 @@ static int xemaclite_of_probe(struct platform_device *ofdev) lp->next_rx_buf_to_use = 0x0; lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong"); lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong"); - mac_address = of_get_mac_address(ofdev->dev.of_node); - if (!IS_ERR(mac_address)) { - /* Set the MAC address. */ - ether_addr_copy(ndev->dev_addr, mac_address); - } else { + rc = of_get_mac_address(ofdev->dev.of_node, ndev->dev_addr); + if (rc) { dev_warn(dev, "No MAC address found, using random\n"); eth_hw_addr_random(ndev); } diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index 3e337142b516..2049d76a0e68 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -798,8 +798,6 @@ xirc2ps_config(struct pcmcia_device * link) goto config_error; } port_found: - if (err) - goto config_error; /**************** * Now allocate an interrupt line. Note that this does not diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig index 7b83a6e5d894..468ffe3d1707 100644 --- a/drivers/net/ethernet/xscale/Kconfig +++ b/drivers/net/ethernet/xscale/Kconfig @@ -22,6 +22,7 @@ config IXP4XX_ETH tristate "Intel IXP4xx Ethernet support" depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR select PHYLIB + select OF_MDIO if OF select NET_PTP_CLASSIFY help Say Y here if you want to use built-in Ethernet ports diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 0152f1e70783..cb89323855d8 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -28,6 +28,7 @@ #include <linux/kernel.h> #include <linux/net_tstamp.h> #include <linux/of.h> +#include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/platform_data/eth_ixp4xx.h> #include <linux/platform_device.h> @@ -165,7 +166,6 @@ struct eth_regs { }; struct port { - struct resource *mem_res; struct eth_regs __iomem *regs; struct npe *npe; struct net_device *netdev; @@ -250,6 +250,7 @@ static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt) static DEFINE_SPINLOCK(mdio_lock); static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */ static struct mii_bus *mdio_bus; +static struct device_node *mdio_bus_np; static int ports_open; static struct port *npe_port_tab[MAX_NPES]; static struct dma_pool *dma_pool; @@ -533,7 +534,8 @@ static int ixp4xx_mdio_register(struct eth_regs __iomem *regs) mdio_bus->write = &ixp4xx_mdio_write; snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "ixp4xx-eth-0"); - if ((err = mdiobus_register(mdio_bus))) + err = of_mdiobus_register(mdio_bus, mdio_bus_np); + if (err) mdiobus_free(mdio_bus); return err; } @@ -1085,7 +1087,7 @@ static int init_queues(struct port *port) int i; if (!ports_open) { - dma_pool = dma_pool_create(DRV_NAME, port->netdev->dev.parent, + dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev, POOL_ALLOC_SIZE, 32, 0); if (!dma_pool) return -ENOMEM; @@ -1358,19 +1360,118 @@ static const struct net_device_ops ixp4xx_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; +#ifdef CONFIG_OF +static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev) +{ + struct device_node *np = dev->of_node; + struct of_phandle_args queue_spec; + struct of_phandle_args npe_spec; + struct device_node *mdio_np; + struct eth_plat_info *plat; + int ret; + + plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL); + if (!plat) + return NULL; + + ret = of_parse_phandle_with_fixed_args(np, "intel,npe-handle", 1, 0, + &npe_spec); + if (ret) { + dev_err(dev, "no NPE engine specified\n"); + return NULL; + } + /* NPE ID 0x00, 0x10, 0x20... */ + plat->npe = (npe_spec.args[0] << 4); + + /* Check if this device has an MDIO bus */ + mdio_np = of_get_child_by_name(np, "mdio"); + if (mdio_np) { + plat->has_mdio = true; + mdio_bus_np = mdio_np; + /* DO NOT put the mdio_np, it will be used */ + } + + /* Get the rx queue as a resource from queue manager */ + ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0, + &queue_spec); + if (ret) { + dev_err(dev, "no rx queue phandle\n"); + return NULL; + } + plat->rxq = queue_spec.args[0]; + + /* Get the txready queue as resource from queue manager */ + ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0, + &queue_spec); + if (ret) { + dev_err(dev, "no txready queue phandle\n"); + return NULL; + } + plat->txreadyq = queue_spec.args[0]; + + return plat; +} +#else +static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev) +{ + return NULL; +} +#endif + static int ixp4xx_eth_probe(struct platform_device *pdev) { - char phy_id[MII_BUS_ID_SIZE + 3]; struct phy_device *phydev = NULL; struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; struct eth_plat_info *plat; - resource_size_t regs_phys; struct net_device *ndev; struct resource *res; struct port *port; int err; - plat = dev_get_platdata(dev); + if (np) { + plat = ixp4xx_of_get_platdata(dev); + if (!plat) + return -ENODEV; + } else { + plat = dev_get_platdata(dev); + if (!plat) + return -ENODEV; + plat->npe = pdev->id; + switch (plat->npe) { + case IXP4XX_ETH_NPEA: + /* If the MDIO bus is not up yet, defer probe */ + break; + case IXP4XX_ETH_NPEB: + /* On all except IXP43x, NPE-B is used for the MDIO bus. + * If there is no NPE-B in the feature set, bail out, + * else we have the MDIO bus here. + */ + if (!cpu_is_ixp43x()) { + if (!(ixp4xx_read_feature_bits() & + IXP4XX_FEATURE_NPEB_ETH0)) + return -ENODEV; + /* Else register the MDIO bus on NPE-B */ + plat->has_mdio = true; + } + break; + case IXP4XX_ETH_NPEC: + /* IXP43x lacks NPE-B and uses NPE-C for the MDIO bus + * access, if there is no NPE-C, no bus, nothing works, + * so bail out. + */ + if (cpu_is_ixp43x()) { + if (!(ixp4xx_read_feature_bits() & + IXP4XX_FEATURE_NPEC_ETH)) + return -ENODEV; + /* Else register the MDIO bus on NPE-B */ + plat->has_mdio = true; + } + break; + default: + return -ENODEV; + } + } if (!(ndev = devm_alloc_etherdev(dev, sizeof(struct port)))) return -ENOMEM; @@ -1378,75 +1479,42 @@ static int ixp4xx_eth_probe(struct platform_device *pdev) SET_NETDEV_DEV(ndev, dev); port = netdev_priv(ndev); port->netdev = ndev; - port->id = pdev->id; + port->id = plat->npe; /* Get the port resource and remap */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; - regs_phys = res->start; port->regs = devm_ioremap_resource(dev, res); if (IS_ERR(port->regs)) return PTR_ERR(port->regs); - switch (port->id) { - case IXP4XX_ETH_NPEA: - /* If the MDIO bus is not up yet, defer probe */ - if (!mdio_bus) - return -EPROBE_DEFER; - break; - case IXP4XX_ETH_NPEB: - /* - * On all except IXP43x, NPE-B is used for the MDIO bus. - * If there is no NPE-B in the feature set, bail out, else - * register the MDIO bus. - */ - if (!cpu_is_ixp43x()) { - if (!(ixp4xx_read_feature_bits() & - IXP4XX_FEATURE_NPEB_ETH0)) - return -ENODEV; - /* Else register the MDIO bus on NPE-B */ - if ((err = ixp4xx_mdio_register(port->regs))) - return err; - } - if (!mdio_bus) - return -EPROBE_DEFER; - break; - case IXP4XX_ETH_NPEC: - /* - * IXP43x lacks NPE-B and uses NPE-C for the MDIO bus access, - * of there is no NPE-C, no bus, nothing works, so bail out. - */ - if (cpu_is_ixp43x()) { - if (!(ixp4xx_read_feature_bits() & - IXP4XX_FEATURE_NPEC_ETH)) - return -ENODEV; - /* Else register the MDIO bus on NPE-C */ - if ((err = ixp4xx_mdio_register(port->regs))) - return err; + /* Register the MDIO bus if we have it */ + if (plat->has_mdio) { + err = ixp4xx_mdio_register(port->regs); + if (err) { + dev_err(dev, "failed to register MDIO bus\n"); + return err; } - if (!mdio_bus) - return -EPROBE_DEFER; - break; - default: - return -ENODEV; } + /* If the instance with the MDIO bus has not yet appeared, + * defer probing until it gets probed. + */ + if (!mdio_bus) + return -EPROBE_DEFER; ndev->netdev_ops = &ixp4xx_netdev_ops; ndev->ethtool_ops = &ixp4xx_ethtool_ops; ndev->tx_queue_len = 100; + /* Inherit the DMA masks from the platform device */ + ndev->dev.dma_mask = dev->dma_mask; + ndev->dev.coherent_dma_mask = dev->coherent_dma_mask; netif_napi_add(ndev, &port->napi, eth_poll, NAPI_WEIGHT); if (!(port->npe = npe_request(NPE_ID(port->id)))) return -EIO; - port->mem_res = request_mem_region(regs_phys, REGS_SIZE, ndev->name); - if (!port->mem_res) { - err = -EBUSY; - goto err_npe_rel; - } - port->plat = plat; npe_port_tab[NPE_ID(port->id)] = port; memcpy(ndev->dev_addr, plat->hwaddr, ETH_ALEN); @@ -1459,12 +1527,24 @@ static int ixp4xx_eth_probe(struct platform_device *pdev) __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control); udelay(50); - snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, - mdio_bus->id, plat->phy); - phydev = phy_connect(ndev, phy_id, &ixp4xx_adjust_link, - PHY_INTERFACE_MODE_MII); - if (IS_ERR(phydev)) { - err = PTR_ERR(phydev); + if (np) { + phydev = of_phy_get_and_connect(ndev, np, ixp4xx_adjust_link); + } else { + phydev = mdiobus_get_phy(mdio_bus, plat->phy); + if (IS_ERR(phydev)) { + err = PTR_ERR(phydev); + dev_err(dev, "could not connect phydev (%d)\n", err); + goto err_free_mem; + } + err = phy_connect_direct(ndev, phydev, ixp4xx_adjust_link, + PHY_INTERFACE_MODE_MII); + if (err) + goto err_free_mem; + + } + if (!phydev) { + err = -ENODEV; + dev_err(dev, "no phydev\n"); goto err_free_mem; } @@ -1482,8 +1562,6 @@ err_phy_dis: phy_disconnect(phydev); err_free_mem: npe_port_tab[NPE_ID(port->id)] = NULL; - release_resource(port->mem_res); -err_npe_rel: npe_release(port->npe); return err; } @@ -1499,12 +1577,21 @@ static int ixp4xx_eth_remove(struct platform_device *pdev) ixp4xx_mdio_remove(); npe_port_tab[NPE_ID(port->id)] = NULL; npe_release(port->npe); - release_resource(port->mem_res); return 0; } +static const struct of_device_id ixp4xx_eth_of_match[] = { + { + .compatible = "intel,ixp4xx-ethernet", + }, + { }, +}; + static struct platform_driver ixp4xx_eth_driver = { - .driver.name = DRV_NAME, + .driver = { + .name = DRV_NAME, + .of_match_table = of_match_ptr(ixp4xx_eth_of_match), + }, .probe = ixp4xx_eth_probe, .remove = ixp4xx_eth_remove, }; diff --git a/drivers/net/fddi/Kconfig b/drivers/net/fddi/Kconfig index f722079dfb6a..846bf41c2717 100644 --- a/drivers/net/fddi/Kconfig +++ b/drivers/net/fddi/Kconfig @@ -38,22 +38,6 @@ config DEFXX To compile this driver as a module, choose M here: the module will be called defxx. If unsure, say N. -config DEFXX_MMIO - bool - prompt "Use MMIO instead of PIO" if PCI || EISA - depends on DEFXX - default n if PCI || EISA - default y - help - This instructs the driver to use EISA or PCI memory-mapped I/O - (MMIO) as appropriate instead of programmed I/O ports (PIO). - Enabling this gives an improvement in processing time in parts - of the driver, but it may cause problems with EISA (DEFEA) - adapters. TURBOchannel does not have the concept of I/O ports, - so MMIO is always used for these (DEFTA) adapters. - - If unsure, say N. - config SKFP tristate "SysKonnect FDDI PCI support" depends on FDDI && PCI diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index 077c68498f04..6d1e3f49a3d3 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -21,7 +21,7 @@ * LVS Lawrence V. Stefani <lstefani@yahoo.com> * * Maintainers: - * macro Maciej W. Rozycki <macro@linux-mips.org> + * macro Maciej W. Rozycki <macro@orcam.me.uk> * * Credits: * I'd like to thank Patricia Cross for helping me get started with @@ -197,6 +197,7 @@ * 23 Oct 2006 macro Big-endian host support. * 14 Dec 2006 macro TURBOchannel support. * 01 Jul 2014 macro Fixes for DMA on 64-bit hosts. + * 10 Mar 2021 macro Dynamic MMIO vs port I/O. */ /* Include files */ @@ -225,8 +226,8 @@ /* Version information string should be updated prior to each new release! */ #define DRV_NAME "defxx" -#define DRV_VERSION "v1.11" -#define DRV_RELDATE "2014/07/01" +#define DRV_VERSION "v1.12" +#define DRV_RELDATE "2021/03/10" static const char version[] = DRV_NAME ": " DRV_VERSION " " DRV_RELDATE @@ -253,10 +254,10 @@ static const char version[] = #define DFX_BUS_TC(dev) 0 #endif -#ifdef CONFIG_DEFXX_MMIO -#define DFX_MMIO 1 +#if defined(CONFIG_EISA) || defined(CONFIG_PCI) +#define dfx_use_mmio bp->mmio #else -#define DFX_MMIO 0 +#define dfx_use_mmio true #endif /* Define module-wide (static) routines */ @@ -374,8 +375,6 @@ static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data) static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data) { struct device __maybe_unused *bdev = bp->bus_dev; - int dfx_bus_tc = DFX_BUS_TC(bdev); - int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; if (dfx_use_mmio) dfx_writel(bp, offset, data); @@ -398,8 +397,6 @@ static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data) static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data) { struct device __maybe_unused *bdev = bp->bus_dev; - int dfx_bus_tc = DFX_BUS_TC(bdev); - int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; if (dfx_use_mmio) dfx_readl(bp, offset, data); @@ -421,7 +418,7 @@ static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data) * None * * Arguments: - * bdev - pointer to device information + * bp - pointer to board information * bar_start - pointer to store the start addresses * bar_len - pointer to store the lengths of the areas * @@ -431,13 +428,13 @@ static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data) * Side Effects: * None */ -static void dfx_get_bars(struct device *bdev, +static void dfx_get_bars(DFX_board_t *bp, resource_size_t *bar_start, resource_size_t *bar_len) { + struct device *bdev = bp->bus_dev; int dfx_bus_pci = dev_is_pci(bdev); int dfx_bus_eisa = DFX_BUS_EISA(bdev); int dfx_bus_tc = DFX_BUS_TC(bdev); - int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; if (dfx_bus_pci) { int num = dfx_use_mmio ? 0 : 1; @@ -495,6 +492,13 @@ static const struct net_device_ops dfx_netdev_ops = { .ndo_set_mac_address = dfx_ctl_set_mac_address, }; +static void dfx_register_res_err(const char *print_name, bool mmio, + unsigned long start, unsigned long len) +{ + pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, aborting\n", + print_name, mmio ? "MMIO" : "I/O", len, start); +} + /* * ================ * = dfx_register = @@ -528,8 +532,6 @@ static int dfx_register(struct device *bdev) static int version_disp; int dfx_bus_pci = dev_is_pci(bdev); int dfx_bus_eisa = DFX_BUS_EISA(bdev); - int dfx_bus_tc = DFX_BUS_TC(bdev); - int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; const char *print_name = dev_name(bdev); struct net_device *dev; DFX_board_t *bp; /* board pointer */ @@ -567,46 +569,48 @@ static int dfx_register(struct device *bdev) bp->bus_dev = bdev; dev_set_drvdata(bdev, dev); - dfx_get_bars(bdev, bar_start, bar_len); - if (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0) { - pr_err("%s: Cannot use MMIO, no address set, aborting\n", - print_name); - pr_err("%s: Run ECU and set adapter's MMIO location\n", - print_name); - pr_err("%s: Or recompile driver with \"CONFIG_DEFXX_MMIO=n\"" - "\n", print_name); - err = -ENXIO; - goto err_out; + bp->mmio = true; + + dfx_get_bars(bp, bar_start, bar_len); + if (bar_len[0] == 0 || + (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0)) { + bp->mmio = false; + dfx_get_bars(bp, bar_start, bar_len); } - if (dfx_use_mmio) + if (dfx_use_mmio) { region = request_mem_region(bar_start[0], bar_len[0], - print_name); - else - region = request_region(bar_start[0], bar_len[0], print_name); + bdev->driver->name); + if (!region && (dfx_bus_eisa || dfx_bus_pci)) { + bp->mmio = false; + dfx_get_bars(bp, bar_start, bar_len); + } + } + if (!dfx_use_mmio) + region = request_region(bar_start[0], bar_len[0], + bdev->driver->name); if (!region) { - pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, " - "aborting\n", dfx_use_mmio ? "MMIO" : "I/O", print_name, - (long)bar_len[0], (long)bar_start[0]); + dfx_register_res_err(print_name, dfx_use_mmio, + bar_start[0], bar_len[0]); err = -EBUSY; goto err_out_disable; } if (bar_start[1] != 0) { - region = request_region(bar_start[1], bar_len[1], print_name); + region = request_region(bar_start[1], bar_len[1], + bdev->driver->name); if (!region) { - pr_err("%s: Cannot reserve I/O resource " - "0x%lx @ 0x%lx, aborting\n", print_name, - (long)bar_len[1], (long)bar_start[1]); + dfx_register_res_err(print_name, 0, + bar_start[1], bar_len[1]); err = -EBUSY; goto err_out_csr_region; } } if (bar_start[2] != 0) { - region = request_region(bar_start[2], bar_len[2], print_name); + region = request_region(bar_start[2], bar_len[2], + bdev->driver->name); if (!region) { - pr_err("%s: Cannot reserve I/O resource " - "0x%lx @ 0x%lx, aborting\n", print_name, - (long)bar_len[2], (long)bar_start[2]); + dfx_register_res_err(print_name, 0, + bar_start[2], bar_len[2]); err = -EBUSY; goto err_out_bh_region; } @@ -721,7 +725,6 @@ static void dfx_bus_init(struct net_device *dev) int dfx_bus_pci = dev_is_pci(bdev); int dfx_bus_eisa = DFX_BUS_EISA(bdev); int dfx_bus_tc = DFX_BUS_TC(bdev); - int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; u8 val; DBG_printk("In dfx_bus_init...\n"); @@ -1041,7 +1044,6 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name, int dfx_bus_pci = dev_is_pci(bdev); int dfx_bus_eisa = DFX_BUS_EISA(bdev); int dfx_bus_tc = DFX_BUS_TC(bdev); - int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; int alloc_size; /* total buffer size needed */ char *top_v, *curr_v; /* virtual addrs into memory block */ dma_addr_t top_p, curr_p; /* physical addrs into memory block */ @@ -3695,8 +3697,6 @@ static void dfx_unregister(struct device *bdev) struct net_device *dev = dev_get_drvdata(bdev); DFX_board_t *bp = netdev_priv(dev); int dfx_bus_pci = dev_is_pci(bdev); - int dfx_bus_tc = DFX_BUS_TC(bdev); - int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; resource_size_t bar_start[3] = {0}; /* pointers to ports */ resource_size_t bar_len[3] = {0}; /* resource lengths */ int alloc_size; /* total buffer size used */ @@ -3716,7 +3716,7 @@ static void dfx_unregister(struct device *bdev) dfx_bus_uninit(dev); - dfx_get_bars(bdev, bar_start, bar_len); + dfx_get_bars(bp, bar_start, bar_len); if (bar_start[2] != 0) release_region(bar_start[2], bar_len[2]); if (bar_start[1] != 0) @@ -3748,7 +3748,7 @@ static const struct pci_device_id dfx_pci_table[] = { MODULE_DEVICE_TABLE(pci, dfx_pci_table); static struct pci_driver dfx_pci_driver = { - .name = "defxx", + .name = DRV_NAME, .id_table = dfx_pci_table, .probe = dfx_pci_register, .remove = dfx_pci_unregister, @@ -3779,7 +3779,7 @@ MODULE_DEVICE_TABLE(eisa, dfx_eisa_table); static struct eisa_driver dfx_eisa_driver = { .id_table = dfx_eisa_table, .driver = { - .name = "defxx", + .name = DRV_NAME, .bus = &eisa_bus_type, .probe = dfx_dev_register, .remove = dfx_dev_unregister, @@ -3800,7 +3800,7 @@ MODULE_DEVICE_TABLE(tc, dfx_tc_table); static struct tc_driver dfx_tc_driver = { .id_table = dfx_tc_table, .driver = { - .name = "defxx", + .name = DRV_NAME, .bus = &tc_bus_type, .probe = dfx_dev_register, .remove = dfx_dev_unregister, diff --git a/drivers/net/fddi/defxx.h b/drivers/net/fddi/defxx.h index 9d30fde2ef3c..8193713e12db 100644 --- a/drivers/net/fddi/defxx.h +++ b/drivers/net/fddi/defxx.h @@ -16,7 +16,7 @@ * LVS Lawrence V. Stefani <lstefani@yahoo.com> * * Maintainers: - * macro Maciej W. Rozycki <macro@linux-mips.org> + * macro Maciej W. Rozycki <macro@orcam.me.uk> * * Modification History: * Date Name Description @@ -27,6 +27,7 @@ * 04 Aug 2003 macro Converted to the DMA API. * 23 Oct 2006 macro Big-endian host support. * 14 Dec 2006 macro TURBOchannel support. + * 10 Mar 2021 macro Dynamic MMIO vs port I/O. */ #ifndef _DEFXX_H_ @@ -1776,6 +1777,8 @@ typedef struct DFX_board_tag int port; } base; /* base address */ struct device *bus_dev; + /* Whether to use MMIO or port I/O. */ + bool mmio; u32 full_duplex_enb; /* FDDI Full Duplex enable (1 == on, 2 == off) */ u32 req_ttrt; /* requested TTRT value (in 80ns units) */ u32 burst_size; /* adapter burst size (enumerated) */ diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c index eaf85db53a5e..14f07050b6b1 100644 --- a/drivers/net/fddi/defza.c +++ b/drivers/net/fddi/defza.c @@ -60,7 +60,7 @@ static const char version[] = DRV_NAME ": " DRV_VERSION " " DRV_RELDATE " Maciej W. Rozycki\n"; -MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>"); +MODULE_AUTHOR("Maciej W. Rozycki <macro@orcam.me.uk>"); MODULE_DESCRIPTION("DEC FDDIcontroller 700 (DEFZA-xx) driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/fddi/skfp/h/smc.h b/drivers/net/fddi/skfp/h/smc.h index 706fa619b703..3814a2ff64ae 100644 --- a/drivers/net/fddi/skfp/h/smc.h +++ b/drivers/net/fddi/skfp/h/smc.h @@ -228,7 +228,7 @@ struct s_phy { u_char timer1_exp ; u_char timer2_exp ; u_char pcm_pad1[1] ; - int cem_pst ; /* CEM privae state; used for dual homing */ + int cem_pst ; /* CEM private state; used for dual homing */ struct lem_counter lem ; #ifdef AMDPLC struct s_plc plc ; diff --git a/drivers/net/fddi/skfp/h/smt.h b/drivers/net/fddi/skfp/h/smt.h index a0dbc0f57a55..b19c7a99d32a 100644 --- a/drivers/net/fddi/skfp/h/smt.h +++ b/drivers/net/fddi/skfp/h/smt.h @@ -411,7 +411,7 @@ struct smt_p_reason { #define SMT_RDF_ILLEGAL 0x00000005 /* read only (PMF) */ #define SMT_RDF_NOPARAM 0x6 /* parameter not supported (PMF) */ #define SMT_RDF_RANGE 0x8 /* out of range */ -#define SMT_RDF_AUTHOR 0x9 /* not autohorized */ +#define SMT_RDF_AUTHOR 0x9 /* not authorized */ #define SMT_RDF_LENGTH 0x0a /* length error */ #define SMT_RDF_TOOLONG 0x0b /* length error */ #define SMT_RDF_SBA 0x0d /* SBA denied */ @@ -450,7 +450,7 @@ struct smt_p_version { struct smt_p_0015 { struct smt_para para ; /* generic parameter header */ - u_int res_type ; /* recsource type */ + u_int res_type ; /* resource type */ } ; #define SYNC_BW 0x00000001L /* Synchronous Bandwidth */ @@ -489,7 +489,7 @@ struct smt_p_0017 { struct smt_p_0018 { struct smt_para para ; /* generic parameter header */ int sba_ov_req ; /* total sync bandwidth req for overhead*/ -} ; /* measuered in bytes per T_Neg */ +} ; /* measured in bytes per T_Neg */ /* * P19 : SBA Allocation Address @@ -562,7 +562,7 @@ struct smt_p_fsc { #define FSC_TYPE2 2 /* Special A/C indicator forwarding */ /* - * P00 21 : user defined authoriziation (see pmf.c) + * P00 21 : user defined authorization (see pmf.c) */ #define SMT_P_AUTHOR 0x0021 @@ -764,10 +764,8 @@ struct smt_sif_operation { struct smt_p_setcount setcount ; /* Set Count mandatory */ #endif /* must be last */ - struct smt_p_lem lem[1] ; /* phy lem status */ + struct smt_p_lem lem[]; /* phy lem status */ } ; -#define SIZEOF_SMT_SIF_OPERATION (sizeof(struct smt_sif_operation)- \ - sizeof(struct smt_p_lem)) /* * ECF : echo frame diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c index 774a6e3b0a67..6b68a53f1b38 100644 --- a/drivers/net/fddi/skfp/smt.c +++ b/drivers/net/fddi/skfp/smt.c @@ -1063,9 +1063,9 @@ static void smt_send_sif_operation(struct s_smc *smc, struct fddi_addr *dest, #endif if (!(mb = smt_build_frame(smc,SMT_SIF_OPER,SMT_REPLY, - SIZEOF_SMT_SIF_OPERATION+ports*sizeof(struct smt_p_lem)))) + struct_size(sif, lem, ports)))) return ; - sif = smtod(mb, struct smt_sif_operation *) ; + sif = smtod(mb, typeof(sif)); smt_fill_timestamp(smc,&sif->ts) ; /* set time stamp */ smt_fill_mac_status(smc,&sif->status) ; /* set mac status */ smt_fill_mac_counter(smc,&sif->mc) ; /* set mac counter field */ diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 42f31c681846..1ab94b5f9bbf 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -461,6 +461,7 @@ static struct socket *geneve_create_sock(struct net *net, bool ipv6, if (err < 0) return ERR_PTR(err); + udp_allow_gso(sock->sk); return sock; } @@ -891,7 +892,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; - if (!pskb_network_may_pull(skb, sizeof(struct iphdr))) + if (!pskb_inet_may_pull(skb)) return -EINVAL; sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); @@ -988,7 +989,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; - if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) + if (!pskb_inet_may_pull(skb)) return -EINVAL; sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 59ac04a610ad..442c520ab8f3 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -269,7 +269,7 @@ int rndis_filter_receive(struct net_device *ndev, int rndis_filter_set_device_mac(struct netvsc_device *ndev, const char *mac); -void netvsc_switch_datapath(struct net_device *nv_dev, bool vf); +int netvsc_switch_datapath(struct net_device *nv_dev, bool vf); #define NVSP_INVALID_PROTOCOL_VERSION ((u32)0xFFFFFFFF) @@ -1718,4 +1718,8 @@ struct rndis_message { #define TRANSPORT_INFO_IPV6_TCP 0x10 #define TRANSPORT_INFO_IPV6_UDP 0x20 +#define RETRY_US_LO 5000 +#define RETRY_US_HI 10000 +#define RETRY_MAX 2000 /* >10 sec */ + #endif /* _HYPERV_NET_H */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index c64cc7639c39..9d07c9ce4be2 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -31,12 +31,13 @@ * Switch the data path from the synthetic interface to the VF * interface. */ -void netvsc_switch_datapath(struct net_device *ndev, bool vf) +int netvsc_switch_datapath(struct net_device *ndev, bool vf) { struct net_device_context *net_device_ctx = netdev_priv(ndev); struct hv_device *dev = net_device_ctx->device_ctx; struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev); struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt; + int ret, retry = 0; /* Block sending traffic to VF if it's about to be gone */ if (!vf) @@ -51,15 +52,41 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf) init_pkt->msg.v4_msg.active_dp.active_datapath = NVSP_DATAPATH_SYNTHETIC; +again: trace_nvsp_send(ndev, init_pkt); - vmbus_sendpacket(dev->channel, init_pkt, + ret = vmbus_sendpacket(dev->channel, init_pkt, sizeof(struct nvsp_message), - (unsigned long)init_pkt, - VM_PKT_DATA_INBAND, + (unsigned long)init_pkt, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + + /* If failed to switch to/from VF, let data_path_is_vf stay false, + * so we use synthetic path to send data. + */ + if (ret) { + if (ret != -EAGAIN) { + netdev_err(ndev, + "Unable to send sw datapath msg, err: %d\n", + ret); + return ret; + } + + if (retry++ < RETRY_MAX) { + usleep_range(RETRY_US_LO, RETRY_US_HI); + goto again; + } else { + netdev_err( + ndev, + "Retry failed to send sw datapath msg, err: %d\n", + ret); + return ret; + } + } + wait_for_completion(&nv_dev->channel_init_wait); net_device_ctx->data_path_is_vf = vf; + + return 0; } /* Worker to setup sub channels on initial setup @@ -1017,6 +1044,26 @@ static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send, } /* RCU already held by caller */ +/* Batching/bouncing logic is designed to attempt to optimize + * performance. + * + * For small, non-LSO packets we copy the packet to a send buffer + * which is pre-registered with the Hyper-V side. This enables the + * hypervisor to avoid remapping the aperture to access the packet + * descriptor and data. + * + * If we already started using a buffer and the netdev is transmitting + * a burst of packets, keep on copying into the buffer until it is + * full or we are done collecting a burst. If there is an existing + * buffer with space for the RNDIS descriptor but not the packet, copy + * the RNDIS descriptor to the buffer, keeping the packet in place. + * + * If we do batching and send more than one packet using a single + * NetVSC message, free the SKBs of the packets copied, except for the + * last packet. This is done to streamline the handling of the case + * where the last packet only had the RNDIS descriptor copied to the + * send buffer, with the data pointers included in the NetVSC message. + */ int netvsc_send(struct net_device *ndev, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 15f262b70489..f682a5572d84 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -38,9 +38,6 @@ #include "hyperv_net.h" #define RING_SIZE_MIN 64 -#define RETRY_US_LO 5000 -#define RETRY_US_HI 10000 -#define RETRY_MAX 2000 /* >10 sec */ #define LINKCHANGE_INT (2 * HZ) #define VF_TAKEOVER_INT (HZ / 10) @@ -1612,34 +1609,23 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) { - memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) + ethtool_sprintf(&p, netvsc_stats[i].name); - for (i = 0; i < ARRAY_SIZE(vf_stats); i++) { - memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < ARRAY_SIZE(vf_stats); i++) + ethtool_sprintf(&p, vf_stats[i].name); for (i = 0; i < nvdev->num_chn; i++) { - sprintf(p, "tx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_xdp_drop", i); - p += ETH_GSTRING_LEN; + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); + ethtool_sprintf(&p, "rx_queue_%u_xdp_drop", i); } for_each_present_cpu(cpu) { - for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) { - sprintf(p, pcpu_stats[i].name, cpu); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) + ethtool_sprintf(&p, pcpu_stats[i].name, cpu); } break; @@ -2311,6 +2297,7 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) { struct device *parent = vf_netdev->dev.parent; struct net_device_context *ndev_ctx; + struct net_device *ndev; struct pci_dev *pdev; u32 serial; @@ -2333,8 +2320,17 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) if (!ndev_ctx->vf_alloc) continue; - if (ndev_ctx->vf_serial == serial) - return hv_get_drvdata(ndev_ctx->device_ctx); + if (ndev_ctx->vf_serial != serial) + continue; + + ndev = hv_get_drvdata(ndev_ctx->device_ctx); + if (ndev->addr_len != vf_netdev->addr_len || + memcmp(ndev->perm_addr, vf_netdev->perm_addr, + ndev->addr_len) != 0) + continue; + + return ndev; + } netdev_notice(vf_netdev, @@ -2413,6 +2409,7 @@ static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event) struct netvsc_device *netvsc_dev; struct net_device *ndev; bool vf_is_up = false; + int ret; if (event != NETDEV_GOING_DOWN) vf_is_up = netif_running(vf_netdev); @@ -2429,9 +2426,17 @@ static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event) if (net_device_ctx->data_path_is_vf == vf_is_up) return NOTIFY_OK; - netvsc_switch_datapath(ndev, vf_is_up); - netdev_info(ndev, "Data path switched %s VF: %s\n", - vf_is_up ? "to" : "from", vf_netdev->name); + ret = netvsc_switch_datapath(ndev, vf_is_up); + + if (ret) { + netdev_err(ndev, + "Data path failed to switch %s VF: %s, err: %d\n", + vf_is_up ? "to" : "from", vf_netdev->name, ret); + return NOTIFY_DONE; + } else { + netdev_info(ndev, "Data path switched %s VF: %s\n", + vf_is_up ? "to" : "from", vf_netdev->name); + } return NOTIFY_OK; } diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index c0bf7d78276e..da9135231c07 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -268,7 +268,7 @@ static int hwsim_get_radio(struct sk_buff *skb, struct hwsim_phy *phy, struct netlink_callback *cb, int flags) { void *hdr; - int res = -EMSGSIZE; + int res; hdr = genlmsg_put(skb, portid, seq, &hwsim_genl_family, flags, MAC802154_HWSIM_CMD_GET_RADIO); diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig index b68f1289b89e..8f99cfa14680 100644 --- a/drivers/net/ipa/Kconfig +++ b/drivers/net/ipa/Kconfig @@ -1,6 +1,6 @@ config QCOM_IPA tristate "Qualcomm IPA support" - depends on 64BIT && NET && QCOM_SMEM + depends on NET && QCOM_SMEM depends on ARCH_QCOM || COMPILE_TEST depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST) select QCOM_MDT_LOADER if ARCH_QCOM @@ -12,8 +12,7 @@ config QCOM_IPA that is capable of generic hardware handling of IP packets, including routing, filtering, and NAT. Currently the IPA driver supports only basic transport of network traffic - between the AP and modem, on the Qualcomm SDM845 and SC7180 - SoCs. + between the AP and modem. Note that if selected, the selection type must match that of QCOM_Q6V5_COMMON (Y or M). diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile index afe5df1e6eee..1efe1a88104b 100644 --- a/drivers/net/ipa/Makefile +++ b/drivers/net/ipa/Makefile @@ -7,6 +7,8 @@ ipa-y := ipa_main.o ipa_clock.o ipa_reg.o ipa_mem.o \ ipa_table.o ipa_interrupt.o gsi.o gsi_trans.o \ ipa_gsi.o ipa_smp2p.o ipa_uc.o \ ipa_endpoint.o ipa_cmd.o ipa_modem.o \ - ipa_qmi.o ipa_qmi_msg.o + ipa_resource.o ipa_qmi.o ipa_qmi_msg.o -ipa-y += ipa_data-sdm845.o ipa_data-sc7180.o +ipa-y += ipa_data-v3.5.1.o ipa_data-v4.2.o \ + ipa_data-v4.5.o ipa_data-v4.9.o \ + ipa_data-v4.11.o diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index 390d3403386a..9f06663cef26 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -198,7 +198,7 @@ static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id) gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id)); } -/* Turn off all GSI interrupts initially */ +/* Turn off all GSI interrupts initially; there is no gsi_irq_teardown() */ static void gsi_irq_setup(struct gsi *gsi) { /* Disable all interrupt types */ @@ -217,12 +217,6 @@ static void gsi_irq_setup(struct gsi *gsi) iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); } -/* Turn off all GSI interrupts when we're all done */ -static void gsi_irq_teardown(struct gsi *gsi) -{ - /* Nothing to do */ -} - /* Event ring commands are performed one at a time. Their completion * is signaled by the event ring control GSI interrupt type, which is * only enabled when we issue an event ring command. Only the event @@ -351,7 +345,7 @@ void *gsi_ring_virt(struct gsi_ring *ring, u32 index) /* Return the 32-bit DMA address associated with a ring index */ static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index) { - return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE; + return lower_32_bits(ring->addr) + index * GSI_RING_ELEMENT_SIZE; } /* Return the ring index of a 32-bit ring offset */ @@ -701,17 +695,16 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id) val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK); iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); - val = u32_encode_bits(size, EV_R_LENGTH_FMASK); + val = ev_r_length_encoded(gsi->version, size); iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id)); /* The context 2 and 3 registers store the low-order and * high-order 32 bits of the address of the event ring, * respectively. */ - val = evt_ring->ring.addr & GENMASK(31, 0); + val = lower_32_bits(evt_ring->ring.addr); iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id)); - - val = evt_ring->ring.addr >> 32; + val = upper_32_bits(evt_ring->ring.addr); iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id)); /* Enable interrupt moderation by setting the moderation delay */ @@ -787,7 +780,7 @@ static void gsi_channel_trans_quiesce(struct gsi_channel *channel) } } -/* Program a channel for use */ +/* Program a channel for use; there is no gsi_channel_deprogram() */ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) { size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE; @@ -802,24 +795,23 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) channel->tre_ring.index = 0; /* We program all channels as GPI type/protocol */ - val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, CHTYPE_PROTOCOL_FMASK); + val = chtype_protocol_encoded(gsi->version, GSI_CHANNEL_TYPE_GPI); if (channel->toward_ipa) val |= CHTYPE_DIR_FMASK; val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK); val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK); iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); - val = u32_encode_bits(size, R_LENGTH_FMASK); + val = r_length_encoded(gsi->version, size); iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id)); /* The context 2 and 3 registers store the low-order and * high-order 32 bits of the address of the channel ring, * respectively. */ - val = channel->tre_ring.addr & GENMASK(31, 0); + val = lower_32_bits(channel->tre_ring.addr); iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id)); - - val = channel->tre_ring.addr >> 32; + val = upper_32_bits(channel->tre_ring.addr); iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id)); /* Command channel gets low weighted round-robin priority */ @@ -829,14 +821,14 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */ - /* We enable the doorbell engine for IPA v3.5.1 */ - if (gsi->version == IPA_VERSION_3_5_1 && doorbell) + /* No need to use the doorbell engine starting at IPA v4.0 */ + if (gsi->version < IPA_VERSION_4_0 && doorbell) val |= USE_DB_ENG_FMASK; /* v4.0 introduces an escape buffer for prefetch. We use it * on all but the AP command channel. */ - if (gsi->version != IPA_VERSION_3_5_1 && !channel->command) { + if (gsi->version >= IPA_VERSION_4_0 && !channel->command) { /* If not otherwise set, prefetch buffers are used */ if (gsi->version < IPA_VERSION_4_5) val |= USE_ESCAPE_BUF_ONLY_FMASK; @@ -844,6 +836,9 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY, PREFETCH_MODE_FMASK); } + /* All channels set DB_IN_BYTES */ + if (gsi->version >= IPA_VERSION_4_9) + val |= DB_IN_BYTES; iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id)); @@ -873,11 +868,6 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) /* All done! */ } -static void gsi_channel_deprogram(struct gsi_channel *channel) -{ - /* Nothing to do */ -} - static int __gsi_channel_start(struct gsi_channel *channel, bool start) { struct gsi *gsi = channel->gsi; @@ -975,7 +965,7 @@ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell) gsi_channel_reset_command(channel); /* Due to a hardware quirk we may need to reset RX channels twice. */ - if (gsi->version == IPA_VERSION_3_5_1 && !channel->toward_ipa) + if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa) gsi_channel_reset_command(channel); gsi_channel_program(channel, doorbell); @@ -1337,10 +1327,9 @@ static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev) int ret; ret = platform_get_irq_byname(pdev, "gsi"); - if (ret <= 0) { - dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret); + if (ret <= 0) return ret ? : -EINVAL; - } + irq = ret; ret = request_irq(irq, gsi_isr, 0, "gsi", gsi); @@ -1366,7 +1355,7 @@ static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel, u32 tre_index; /* Event xfer_ptr records the TRE it's associated with */ - tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0); + tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr)); tre_index = gsi_ring_index(&channel->tre_ring, tre_offset); return gsi_channel_trans_mapped(channel, tre_index); @@ -1439,20 +1428,18 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index) /* Initialize a ring, including allocating DMA memory for its entries */ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count) { - size_t size = count * GSI_RING_ELEMENT_SIZE; + u32 size = count * GSI_RING_ELEMENT_SIZE; struct device *dev = gsi->dev; dma_addr_t addr; - /* Hardware requires a 2^n ring size, with alignment equal to size */ + /* Hardware requires a 2^n ring size, with alignment equal to size. + * The DMA address returned by dma_alloc_coherent() is guaranteed to + * be a power-of-2 number of pages, which satisfies the requirement. + */ ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); - if (ring->virt && addr % size) { - dma_free_coherent(dev, size, ring->virt, addr); - dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n", - size); - return -EINVAL; /* Not a good error value, but distinct */ - } else if (!ring->virt) { + if (!ring->virt) return -ENOMEM; - } + ring->addr = addr; ring->count = count; @@ -1625,18 +1612,6 @@ static u32 gsi_event_bitmap_init(u32 evt_ring_max) return event_bitmap; } -/* Setup function for event rings */ -static void gsi_evt_ring_setup(struct gsi *gsi) -{ - /* Nothing to do */ -} - -/* Inverse of gsi_evt_ring_setup() */ -static void gsi_evt_ring_teardown(struct gsi *gsi) -{ - /* Nothing to do */ -} - /* Setup function for a single channel */ static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id) { @@ -1686,7 +1661,6 @@ static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id) netif_napi_del(&channel->napi); - gsi_channel_deprogram(channel); gsi_channel_de_alloc_command(gsi, channel_id); gsi_evt_ring_reset_command(gsi, evt_ring_id); gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); @@ -1761,7 +1735,6 @@ static int gsi_channel_setup(struct gsi *gsi) u32 mask; int ret; - gsi_evt_ring_setup(gsi); gsi_irq_enable(gsi); mutex_lock(&gsi->mutex); @@ -1821,7 +1794,6 @@ err_unwind: mutex_unlock(&gsi->mutex); gsi_irq_disable(gsi); - gsi_evt_ring_teardown(gsi); return ret; } @@ -1850,7 +1822,6 @@ static void gsi_channel_teardown(struct gsi *gsi) mutex_unlock(&gsi->mutex); gsi_irq_disable(gsi); - gsi_evt_ring_teardown(gsi); } /* Setup function for GSI. GSI firmware must be loaded and initialized */ @@ -1858,7 +1829,6 @@ int gsi_setup(struct gsi *gsi) { struct device *dev = gsi->dev; u32 val; - int ret; /* Here is where we first touch the GSI hardware */ val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET); @@ -1867,7 +1837,7 @@ int gsi_setup(struct gsi *gsi) return -EIO; } - gsi_irq_setup(gsi); + gsi_irq_setup(gsi); /* No matching teardown required */ val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET); @@ -1901,18 +1871,13 @@ int gsi_setup(struct gsi *gsi) /* Writing 1 indicates IRQ interrupts; 0 would be MSI */ iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET); - ret = gsi_channel_setup(gsi); - if (ret) - gsi_irq_teardown(gsi); - - return ret; + return gsi_channel_setup(gsi); } /* Inverse of gsi_setup() */ void gsi_teardown(struct gsi *gsi) { gsi_channel_teardown(gsi); - gsi_irq_teardown(gsi); } /* Initialize a channel's event ring */ @@ -1954,7 +1919,7 @@ static void gsi_channel_evt_ring_exit(struct gsi_channel *channel) gsi_evt_ring_id_free(gsi, evt_ring_id); } -/* Init function for event rings */ +/* Init function for event rings; there is no gsi_evt_ring_exit() */ static void gsi_evt_ring_init(struct gsi *gsi) { u32 evt_ring_id = 0; @@ -1966,12 +1931,6 @@ static void gsi_evt_ring_init(struct gsi *gsi) while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX); } -/* Inverse of gsi_evt_ring_init() */ -static void gsi_evt_ring_exit(struct gsi *gsi) -{ - /* Nothing to do */ -} - static bool gsi_channel_data_valid(struct gsi *gsi, const struct ipa_gsi_endpoint_data *data) { @@ -2116,7 +2075,7 @@ static int gsi_channel_init(struct gsi *gsi, u32 count, /* IPA v4.2 requires the AP to allocate channels for the modem */ modem_alloc = gsi->version == IPA_VERSION_4_2; - gsi_evt_ring_init(gsi); + gsi_evt_ring_init(gsi); /* No matching exit required */ /* The endpoint data array is indexed by endpoint name */ for (i = 0; i < count; i++) { @@ -2150,7 +2109,6 @@ err_unwind: } gsi_channel_exit_one(&gsi->channel[data->channel_id]); } - gsi_evt_ring_exit(gsi); return ret; } @@ -2164,8 +2122,6 @@ static void gsi_channel_exit(struct gsi *gsi) gsi_channel_exit_one(&gsi->channel[channel_id]); while (channel_id--); gsi->modem_channel_bitmap = 0; - - gsi_evt_ring_exit(gsi); } /* Init function for GSI. GSI hardware does not need to be "ready" */ diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h index efc980f96109..d5996bdb20ef 100644 --- a/drivers/net/ipa/gsi.h +++ b/drivers/net/ipa/gsi.h @@ -16,8 +16,8 @@ #include "ipa_version.h" /* Maximum number of channels and event rings supported by the driver */ -#define GSI_CHANNEL_COUNT_MAX 17 -#define GSI_EVT_RING_COUNT_MAX 13 +#define GSI_CHANNEL_COUNT_MAX 23 +#define GSI_EVT_RING_COUNT_MAX 20 /* Maximum TLV FIFO size for a channel; 64 here is arbitrary (and high) */ #define GSI_TLV_MAX 64 diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h index 1785c9d3344d..ea333a244cf5 100644 --- a/drivers/net/ipa/gsi_private.h +++ b/drivers/net/ipa/gsi_private.h @@ -14,7 +14,7 @@ struct gsi_trans; struct gsi_ring; struct gsi_channel; -#define GSI_RING_ELEMENT_SIZE 16 /* bytes */ +#define GSI_RING_ELEMENT_SIZE 16 /* bytes; must be a power of 2 */ /* Return the entry that follows one provided in a transaction pool */ void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element); @@ -100,7 +100,7 @@ void gsi_channel_doorbell(struct gsi_channel *channel); /** * gsi_ring_virt() - Return virtual address for a ring entry * @ring: Ring whose address is to be translated - * @addr: Index (slot number) of entry + * @index: Index (slot number) of entry */ void *gsi_ring_virt(struct gsi_ring *ring, u32 index); diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h index 1622d8cf8dea..b4ac0258d6e1 100644 --- a/drivers/net/ipa/gsi_reg.h +++ b/drivers/net/ipa/gsi_reg.h @@ -64,6 +64,21 @@ (0x0000c01c + 0x1000 * (ee)) /* All other register offsets are relative to gsi->virt */ + +/** enum gsi_channel_type - CHTYPE_PROTOCOL field values in CH_C_CNTXT_0 */ +enum gsi_channel_type { + GSI_CHANNEL_TYPE_MHI = 0x0, + GSI_CHANNEL_TYPE_XHCI = 0x1, + GSI_CHANNEL_TYPE_GPI = 0x2, + GSI_CHANNEL_TYPE_XDCI = 0x3, + GSI_CHANNEL_TYPE_WDI2 = 0x4, + GSI_CHANNEL_TYPE_GCI = 0x5, + GSI_CHANNEL_TYPE_WDI3 = 0x6, + GSI_CHANNEL_TYPE_MHIP = 0x7, + GSI_CHANNEL_TYPE_AQC = 0x8, + GSI_CHANNEL_TYPE_11AD = 0x9, +}; + #define GSI_CH_C_CNTXT_0_OFFSET(ch) \ GSI_EE_N_CH_C_CNTXT_0_OFFSET((ch), GSI_EE_AP) #define GSI_EE_N_CH_C_CNTXT_0_OFFSET(ch, ee) \ @@ -78,19 +93,35 @@ #define CHSTATE_FMASK GENMASK(23, 20) #define ELEMENT_SIZE_FMASK GENMASK(31, 24) -/** enum gsi_channel_type - CHTYPE_PROTOCOL field values in CH_C_CNTXT_0 */ -enum gsi_channel_type { - GSI_CHANNEL_TYPE_MHI = 0x0, - GSI_CHANNEL_TYPE_XHCI = 0x1, - GSI_CHANNEL_TYPE_GPI = 0x2, - GSI_CHANNEL_TYPE_XDCI = 0x3, -}; +/* Encoded value for CH_C_CNTXT_0 register channel protocol fields */ +static inline u32 +chtype_protocol_encoded(enum ipa_version version, enum gsi_channel_type type) +{ + u32 val; + + val = u32_encode_bits(type, CHTYPE_PROTOCOL_FMASK); + if (version < IPA_VERSION_4_5) + return val; + + /* Encode upper bit(s) as well */ + type >>= hweight32(CHTYPE_PROTOCOL_FMASK); + val |= u32_encode_bits(type, CHTYPE_PROTOCOL_MSB_FMASK); + + return val; +} #define GSI_CH_C_CNTXT_1_OFFSET(ch) \ GSI_EE_N_CH_C_CNTXT_1_OFFSET((ch), GSI_EE_AP) #define GSI_EE_N_CH_C_CNTXT_1_OFFSET(ch, ee) \ (0x0001c004 + 0x4000 * (ee) + 0x80 * (ch)) -#define R_LENGTH_FMASK GENMASK(15, 0) + +/* Encoded value for CH_C_CNTXT_1 register R_LENGTH field */ +static inline u32 r_length_encoded(enum ipa_version version, u32 length) +{ + if (version < IPA_VERSION_4_9) + return u32_encode_bits(length, GENMASK(15, 0)); + return u32_encode_bits(length, GENMASK(19, 0)); +} #define GSI_CH_C_CNTXT_2_OFFSET(ch) \ GSI_EE_N_CH_C_CNTXT_2_OFFSET((ch), GSI_EE_AP) @@ -114,6 +145,9 @@ enum gsi_channel_type { /* The next two fields are present for IPA v4.5 and above */ #define PREFETCH_MODE_FMASK GENMASK(13, 10) #define EMPTY_LVL_THRSHOLD_FMASK GENMASK(23, 16) +/* The next field is present for IPA v4.9 and above */ +#define DB_IN_BYTES GENMASK(24, 24) + /** enum gsi_prefetch_mode - PREFETCH_MODE field in CH_C_QOS */ enum gsi_prefetch_mode { GSI_USE_PREFETCH_BUFS = 0x0, @@ -146,19 +180,25 @@ enum gsi_prefetch_mode { GSI_EE_N_EV_CH_E_CNTXT_0_OFFSET((ev), GSI_EE_AP) #define GSI_EE_N_EV_CH_E_CNTXT_0_OFFSET(ev, ee) \ (0x0001d000 + 0x4000 * (ee) + 0x80 * (ev)) +/* enum gsi_channel_type defines EV_CHTYPE field values in EV_CH_E_CNTXT_0 */ #define EV_CHTYPE_FMASK GENMASK(3, 0) #define EV_EE_FMASK GENMASK(7, 4) #define EV_EVCHID_FMASK GENMASK(15, 8) #define EV_INTYPE_FMASK GENMASK(16, 16) #define EV_CHSTATE_FMASK GENMASK(23, 20) #define EV_ELEMENT_SIZE_FMASK GENMASK(31, 24) -/* enum gsi_channel_type defines EV_CHTYPE field values in EV_CH_E_CNTXT_0 */ #define GSI_EV_CH_E_CNTXT_1_OFFSET(ev) \ GSI_EE_N_EV_CH_E_CNTXT_1_OFFSET((ev), GSI_EE_AP) #define GSI_EE_N_EV_CH_E_CNTXT_1_OFFSET(ev, ee) \ (0x0001d004 + 0x4000 * (ee) + 0x80 * (ev)) -#define EV_R_LENGTH_FMASK GENMASK(15, 0) +/* Encoded value for EV_CH_C_CNTXT_1 register EV_R_LENGTH field */ +static inline u32 ev_r_length_encoded(enum ipa_version version, u32 length) +{ + if (version < IPA_VERSION_4_9) + return u32_encode_bits(length, GENMASK(15, 0)); + return u32_encode_bits(length, GENMASK(19, 0)); +} #define GSI_EV_CH_E_CNTXT_2_OFFSET(ev) \ GSI_EE_N_EV_CH_E_CNTXT_2_OFFSET((ev), GSI_EE_AP) @@ -248,6 +288,7 @@ enum gsi_ch_cmd_opcode { GSI_CH_STOP = 0x2, GSI_CH_RESET = 0x9, GSI_CH_DE_ALLOC = 0xa, + GSI_CH_DB_STOP = 0xb, }; #define GSI_EV_CH_CMD_OFFSET \ @@ -278,6 +319,7 @@ enum gsi_generic_cmd_opcode { GSI_GENERIC_ALLOCATE_CHANNEL = 0x2, }; +/* The next register is present for IPA v3.5.1 and above */ #define GSI_GSI_HW_PARAM_2_OFFSET \ GSI_EE_N_GSI_HW_PARAM_2_OFFSET(GSI_EE_AP) #define GSI_EE_N_GSI_HW_PARAM_2_OFFSET(ee) \ @@ -300,7 +342,7 @@ enum gsi_generic_cmd_opcode { enum gsi_iram_size { IRAM_SIZE_ONE_KB = 0x0, IRAM_SIZE_TWO_KB = 0x1, -/* The next two values are available for IPA v4.0 and above */ + /* The next two values are available for IPA v4.0 and above */ IRAM_SIZE_TWO_N_HALF_KB = 0x2, IRAM_SIZE_THREE_KB = 0x3, /* The next two values are available for IPA v4.5 and above */ @@ -424,6 +466,8 @@ enum gsi_general_id { GSI_EE_N_ERROR_LOG_OFFSET(GSI_EE_AP) #define GSI_EE_N_ERROR_LOG_OFFSET(ee) \ (0x0001f200 + 0x4000 * (ee)) + +/* Fields below are present for IPA v3.5.1 and above */ #define ERR_ARG3_FMASK GENMASK(3, 0) #define ERR_ARG2_FMASK GENMASK(7, 4) #define ERR_ARG1_FMASK GENMASK(11, 8) @@ -474,7 +518,4 @@ enum gsi_generic_ee_result { GENERIC_EE_NO_RESOURCES = 0x7, }; -#define USB_MAX_PACKET_FMASK GENMASK(15, 15) /* 0: HS; 1: SS */ -#define MHI_BASE_CHANNEL_FMASK GENMASK(31, 24) - #endif /* _GSI_REG_H_ */ diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c index 6c3ed5b17b80..8c795a6a8598 100644 --- a/drivers/net/ipa/gsi_trans.c +++ b/drivers/net/ipa/gsi_trans.c @@ -91,7 +91,7 @@ int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count, void *virt; #ifdef IPA_VALIDATE - if (!size || size % 8) + if (!size) return -EINVAL; if (count < max_alloc) return -EINVAL; @@ -141,7 +141,7 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool, void *virt; #ifdef IPA_VALIDATE - if (!size || size % 8) + if (!size) return -EINVAL; if (count < max_alloc) return -EINVAL; @@ -153,11 +153,10 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool, size = __roundup_pow_of_two(size); total_size = (count + max_alloc - 1) * size; - /* The allocator will give us a power-of-2 number of pages. But we - * can't guarantee that, so request it. That way we won't waste any - * memory that would be available beyond the required space. - * - * Note that gsi_trans_pool_exit_dma() assumes the total allocated + /* The allocator will give us a power-of-2 number of pages + * sufficient to satisfy our request. Round up our requested + * size to avoid any unused space in the allocation. This way + * gsi_trans_pool_exit_dma() can assume the total allocated * size is exactly (count * size). */ total_size = get_order(total_size) << PAGE_SHIFT; diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h index 3a4ab8a94d82..17fd1822d8a9 100644 --- a/drivers/net/ipa/gsi_trans.h +++ b/drivers/net/ipa/gsi_trans.h @@ -71,7 +71,7 @@ struct gsi_trans { /** * gsi_trans_pool_init() - Initialize a pool of structures for transactions - * @gsi: GSI pointer + * @pool: GSI transaction poll pointer * @size: Size of elements in the pool * @count: Minimum number of elements in the pool * @max_alloc: Maximum number of elements allocated at a time from pool @@ -123,7 +123,8 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool, void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr); /** - * gsi_trans_pool_exit() - Inverse of gsi_trans_pool_init() + * gsi_trans_pool_exit_dma() - Inverse of gsi_trans_pool_init_dma() + * @dev: Device used for DMA * @pool: Pool pointer */ void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool); diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h index 802077631371..e7ff376cb5b7 100644 --- a/drivers/net/ipa/ipa.h +++ b/drivers/net/ipa/ipa.h @@ -44,6 +44,8 @@ enum ipa_flag { * @version: IPA hardware version * @pdev: Platform device * @completion: Used to signal pipeline clear transfer complete + * @nb: Notifier block used for remoteproc SSR + * @notifier: Remoteproc SSR notifier * @smp2p: SMP2P information * @clock: IPA clocking information * @table_addr: DMA address of filter/route table content @@ -58,13 +60,12 @@ enum ipa_flag { * @mem_size: Total size (bytes) of memory at @mem_virt * @mem: Array of IPA-local memory region descriptors * @imem_iova: I/O virtual address of IPA region in IMEM - * @imem_size; Size of IMEM region + * @imem_size: Size of IMEM region * @smem_iova: I/O virtual address of IPA region in SMEM - * @smem_size; Size of SMEM region + * @smem_size: Size of SMEM region * @zero_addr: DMA address of preallocated zero-filled memory * @zero_virt: Virtual address of preallocated zero-filled memory * @zero_size: Size (bytes) of preallocated zero-filled memory - * @wakeup_source: Wakeup source information * @available: Bit mask indicating endpoints hardware supports * @filter_map: Bit mask indicating endpoints that support filtering * @initialized: Bit mask indicating endpoints initialized diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index d73b03a80ef8..525cdf28d9ea 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -71,13 +71,12 @@ struct ipa_cmd_hw_hdr_init_local { /* IPA_CMD_REGISTER_WRITE */ -/* For IPA v4.0+, this opcode gets modified with pipeline clear options */ - +/* For IPA v4.0+, the pipeline clear options are encoded in the opcode */ #define REGISTER_WRITE_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8) #define REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9) struct ipa_cmd_register_write { - __le16 flags; /* Unused/reserved for IPA v3.5.1 */ + __le16 flags; /* Unused/reserved prior to IPA v4.0 */ __le16 offset; __le32 value; __le32 value_mask; @@ -85,12 +84,12 @@ struct ipa_cmd_register_write { }; /* Field masks for ipa_cmd_register_write structure fields */ -/* The next field is present for IPA v4.0 and above */ +/* The next field is present for IPA v4.0+ */ #define REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK GENMASK(14, 11) -/* The next field is present for IPA v3.5.1 only */ +/* The next field is not present for IPA v4.0+ */ #define REGISTER_WRITE_FLAGS_SKIP_CLEAR_FMASK GENMASK(15, 15) -/* The next field and its values are present for IPA v3.5.1 only */ +/* The next field and its values are not present for IPA v4.0+ */ #define REGISTER_WRITE_CLEAR_OPTIONS_FMASK GENMASK(1, 0) /* IPA_CMD_IP_PACKET_INIT */ @@ -123,7 +122,7 @@ struct ipa_cmd_hw_dma_mem_mem { /* Field masks for ipa_cmd_hw_dma_mem_mem structure fields */ #define DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK GENMASK(0, 0) -/* The next two fields are present for IPA v3.5.1 only. */ +/* The next two fields are not present for IPA v4.0+ */ #define DMA_SHARED_MEM_FLAGS_SKIP_CLEAR_FMASK GENMASK(1, 1) #define DMA_SHARED_MEM_FLAGS_CLEAR_OPTIONS_FMASK GENMASK(3, 2) @@ -154,7 +153,7 @@ static void ipa_cmd_validate_build(void) * of entries, as and IPv4 and IPv6 route tables have the same number * of entries. */ -#define TABLE_SIZE (TABLE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE) +#define TABLE_SIZE (TABLE_COUNT_MAX * sizeof(__le64)) #define TABLE_COUNT_MAX max_t(u32, IPA_ROUTE_COUNT_MAX, IPA_FILTER_COUNT_MAX) BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK)); BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK)); @@ -253,11 +252,12 @@ static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa, u32 bit_count; /* The maximum offset in a register_write immediate command depends - * on the version of IPA. IPA v3.5.1 supports a 16 bit offset, but - * newer versions allow some additional high-order bits. + * on the version of IPA. A 16 bit offset is always supported, + * but starting with IPA v4.0 some additional high-order bits are + * allowed. */ bit_count = BITS_PER_BYTE * sizeof(payload->offset); - if (ipa->version != IPA_VERSION_3_5_1) + if (ipa->version >= IPA_VERSION_4_0) bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK); BUILD_BUG_ON(bit_count > 32); offset_max = ~0U >> (32 - bit_count); @@ -456,7 +456,11 @@ void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value, /* pipeline_clear_src_grp is not used */ clear_option = clear_full ? pipeline_clear_full : pipeline_clear_hps; - if (ipa->version != IPA_VERSION_3_5_1) { + /* IPA v4.0+ represents the pipeline clear options in the opcode. It + * also supports a larger offset by encoding additional high-order + * bits in the payload flags field. + */ + if (ipa->version >= IPA_VERSION_4_0) { u16 offset_high; u32 val; diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h index 6dd3d35cf315..b99262281f41 100644 --- a/drivers/net/ipa/ipa_cmd.h +++ b/drivers/net/ipa/ipa_cmd.h @@ -20,11 +20,18 @@ struct gsi_channel; /** * enum ipa_cmd_opcode: IPA immediate commands * - * All immediate commands are issued using the AP command TX endpoint. - * The numeric values here are the opcodes for IPA v3.5.1 hardware. + * @IPA_CMD_IP_V4_FILTER_INIT: Initialize IPv4 filter table + * @IPA_CMD_IP_V6_FILTER_INIT: Initialize IPv6 filter table + * @IPA_CMD_IP_V4_ROUTING_INIT: Initialize IPv4 routing table + * @IPA_CMD_IP_V6_ROUTING_INIT: Initialize IPv6 routing table + * @IPA_CMD_HDR_INIT_LOCAL: Initialize IPA-local header memory + * @IPA_CMD_REGISTER_WRITE: Register write performed by IPA + * @IPA_CMD_IP_PACKET_INIT: Set up next packet's destination endpoint + * @IPA_CMD_DMA_SHARED_MEM: DMA command performed by IPA + * @IPA_CMD_IP_PACKET_TAG_STATUS: Have next packet generate tag * status + * @IPA_CMD_NONE: Special (invalid) "not a command" value * - * IPA_CMD_NONE is a special (invalid) value that's used to indicate - * a request is *not* an immediate command. + * All immediate commands are issued using the AP command TX endpoint. */ enum ipa_cmd_opcode { IPA_CMD_NONE = 0x0, @@ -96,7 +103,7 @@ static inline bool ipa_cmd_data_valid(struct ipa *ipa) * * Return: 0 if successful, or a negative error code */ -int ipa_cmd_pool_init(struct gsi_channel *gsi_channel, u32 tre_count); +int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_count); /** * ipa_cmd_pool_exit() - Inverse of ipa_cmd_pool_init() @@ -124,7 +131,7 @@ void ipa_cmd_table_init_add(struct gsi_trans *trans, enum ipa_cmd_opcode opcode, /** * ipa_cmd_hdr_init_local_add() - Add a header init command to a transaction - * @ipa: IPA structure + * @trans: GSI transaction * @offset: Offset of header memory in IPA local space * @size: Size of header memory * @addr: DMA address of buffer to be written from diff --git a/drivers/net/ipa/ipa_data-sdm845.c b/drivers/net/ipa/ipa_data-v3.5.1.c index 88c9c3562ab7..ead1a82f32f5 100644 --- a/drivers/net/ipa/ipa_data-sdm845.c +++ b/drivers/net/ipa/ipa_data-v3.5.1.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2020 Linaro Ltd. + * Copyright (C) 2019-2021 Linaro Ltd. */ #include <linux/log2.h> @@ -11,7 +11,49 @@ #include "ipa_endpoint.h" #include "ipa_mem.h" -/* Endpoint configuration for the SDM845 SoC. */ +/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.5.1 */ +enum ipa_resource_type { + /* Source resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF, + IPA_RESOURCE_TYPE_SRC_HPS_DMARS, + IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES, + + /* Destination resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0, + IPA_RESOURCE_TYPE_DST_DPS_DMARS, +}; + +/* Resource groups used for an SoC having IPA v3.5.1 */ +enum ipa_rsrc_group_id { + /* Source resource group identifiers */ + IPA_RSRC_GROUP_SRC_LWA_DL = 0, + IPA_RSRC_GROUP_SRC_UL_DL, + IPA_RSRC_GROUP_SRC_MHI_DMA, + IPA_RSRC_GROUP_SRC_UC_RX_Q, + IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */ + + /* Destination resource group identifiers */ + IPA_RSRC_GROUP_DST_LWA_DL = 0, + IPA_RSRC_GROUP_DST_UL_DL_DPL, + IPA_RSRC_GROUP_DST_UNUSED_2, + IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */ +}; + +/* QSB configuration data for an SoC having IPA v3.5.1 */ +static const struct ipa_qsb_data ipa_qsb_data[] = { + [IPA_QSB_MASTER_DDR] = { + .max_writes = 8, + .max_reads = 8, + }, + [IPA_QSB_MASTER_PCIE] = { + .max_writes = 4, + .max_reads = 12, + }, +}; + +/* Endpoint datdata for an SoC having IPA v3.5.1 */ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { [IPA_ENDPOINT_AP_COMMAND_TX] = { .ee_id = GSI_EE_AP, @@ -24,11 +66,13 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .tlv_count = 20, }, .endpoint = { - .seq_type = IPA_SEQ_DMA_ONLY, .config = { - .resource_group = 1, + .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, .dma_mode = true, .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX, + .tx = { + .seq_type = IPA_SEQ_DMA, + }, }, }, }, @@ -43,9 +87,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .tlv_count = 8, }, .endpoint = { - .seq_type = IPA_SEQ_INVALID, .config = { - .resource_group = 1, + .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, .aggregation = true, .status_enable = true, .rx = { @@ -66,14 +109,14 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { }, .endpoint = { .filter_support = true, - .seq_type = - IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, .config = { - .resource_group = 1, + .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, .checksum = true, .qmap = true, .status_enable = true, .tx = { + .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC, + .seq_rep_type = IPA_SEQ_REP_DMA_PARSER, .status_endpoint = IPA_ENDPOINT_MODEM_AP_RX, }, @@ -91,9 +134,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .tlv_count = 8, }, .endpoint = { - .seq_type = IPA_SEQ_INVALID, .config = { - .resource_group = 1, + .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, .checksum = true, .qmap = true, .aggregation = true, @@ -103,12 +145,6 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { }, }, }, - [IPA_ENDPOINT_MODEM_COMMAND_TX] = { - .ee_id = GSI_EE_MODEM, - .channel_id = 1, - .endpoint_id = 4, - .toward_ipa = true, - }, [IPA_ENDPOINT_MODEM_LAN_TX] = { .ee_id = GSI_EE_MODEM, .channel_id = 0, @@ -118,12 +154,6 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .filter_support = true, }, }, - [IPA_ENDPOINT_MODEM_LAN_RX] = { - .ee_id = GSI_EE_MODEM, - .channel_id = 3, - .endpoint_id = 13, - .toward_ipa = false, - }, [IPA_ENDPOINT_MODEM_AP_TX] = { .ee_id = GSI_EE_MODEM, .channel_id = 4, @@ -141,102 +171,105 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { }, }; -/* For the SDM845, resource groups are allocated this way: - * group 0: LWA_DL - * group 1: UL_DL - */ -static const struct ipa_resource_src ipa_resource_src[] = { - { - .type = IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS, - .limits[0] = { - .min = 1, - .max = 255, +/* Source resource configuration data for an SoC having IPA v3.5.1 */ +static const struct ipa_resource ipa_resource_src[] = { + [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = { + .limits[IPA_RSRC_GROUP_SRC_LWA_DL] = { + .min = 1, .max = 255, }, - .limits[1] = { - .min = 1, - .max = 255, + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 1, .max = 255, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 1, .max = 63, }, }, - { - .type = IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS, - .limits[0] = { - .min = 10, - .max = 10, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = { + .limits[IPA_RSRC_GROUP_SRC_LWA_DL] = { + .min = 10, .max = 10, }, - .limits[1] = { - .min = 10, - .max = 10, + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 10, .max = 10, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 8, .max = 8, }, }, - { - .type = IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF, - .limits[0] = { - .min = 12, - .max = 12, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = { + .limits[IPA_RSRC_GROUP_SRC_LWA_DL] = { + .min = 12, .max = 12, }, - .limits[1] = { - .min = 14, - .max = 14, + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 14, .max = 14, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 8, .max = 8, }, }, - { - .type = IPA_RESOURCE_TYPE_SRC_HPS_DMARS, - .limits[0] = { - .min = 0, - .max = 63, + [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = { + .limits[IPA_RSRC_GROUP_SRC_LWA_DL] = { + .min = 0, .max = 63, }, - .limits[1] = { - .min = 0, - .max = 63, + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 0, .max = 63, + }, + .limits[IPA_RSRC_GROUP_SRC_MHI_DMA] = { + .min = 0, .max = 63, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 0, .max = 63, }, }, - { - .type = IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES, - .limits[0] = { - .min = 14, - .max = 14, + [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = { + .limits[IPA_RSRC_GROUP_SRC_LWA_DL] = { + .min = 14, .max = 14, }, - .limits[1] = { - .min = 20, - .max = 20, + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 20, .max = 20, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 14, .max = 14, }, }, }; -static const struct ipa_resource_dst ipa_resource_dst[] = { - { - .type = IPA_RESOURCE_TYPE_DST_DATA_SECTORS, - .limits[0] = { - .min = 4, - .max = 4, +/* Destination resource configuration data for an SoC having IPA v3.5.1 */ +static const struct ipa_resource ipa_resource_dst[] = { + [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = { + .limits[IPA_RSRC_GROUP_DST_LWA_DL] = { + .min = 4, .max = 4, }, .limits[1] = { - .min = 4, - .max = 4, + .min = 4, .max = 4, }, + .limits[IPA_RSRC_GROUP_DST_UNUSED_2] = { + .min = 3, .max = 3, + } }, - { - .type = IPA_RESOURCE_TYPE_DST_DPS_DMARS, - .limits[0] = { - .min = 2, - .max = 63, + [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = { + .limits[IPA_RSRC_GROUP_DST_LWA_DL] = { + .min = 2, .max = 63, }, - .limits[1] = { - .min = 1, - .max = 63, + .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .min = 1, .max = 63, }, + .limits[IPA_RSRC_GROUP_DST_UNUSED_2] = { + .min = 1, .max = 2, + } }, }; -/* Resource configuration for the SDM845 SoC. */ +/* Resource configuration data for an SoC having IPA v3.5.1 */ static const struct ipa_resource_data ipa_resource_data = { + .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT, + .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT, .resource_src_count = ARRAY_SIZE(ipa_resource_src), .resource_src = ipa_resource_src, .resource_dst_count = ARRAY_SIZE(ipa_resource_dst), .resource_dst = ipa_resource_dst, }; -/* IPA-resident memory region configuration for the SDM845 SoC. */ +/* IPA-resident memory region data for an SoC having IPA v3.5.1 */ static const struct ipa_mem ipa_mem_local_data[] = { [IPA_MEM_UC_SHARED] = { .offset = 0x0000, @@ -293,11 +326,6 @@ static const struct ipa_mem ipa_mem_local_data[] = { .size = 0x0140, .canary_count = 2, }, - [IPA_MEM_AP_HEADER] = { - .offset = 0x07c8, - .size = 0x0000, - .canary_count = 0, - }, [IPA_MEM_MODEM_PROC_CTX] = { .offset = 0x07d0, .size = 0x0200, @@ -320,7 +348,8 @@ static const struct ipa_mem ipa_mem_local_data[] = { }, }; -static struct ipa_mem_data ipa_mem_data = { +/* Memory configuration data for an SoC having IPA v3.5.1 */ +static const struct ipa_mem_data ipa_mem_data = { .local_count = ARRAY_SIZE(ipa_mem_local_data), .local = ipa_mem_local_data, .imem_addr = 0x146bd000, @@ -330,7 +359,7 @@ static struct ipa_mem_data ipa_mem_data = { }; /* Interconnect bandwidths are in 1000 byte/second units */ -static struct ipa_interconnect_data ipa_interconnect_data[] = { +static const struct ipa_interconnect_data ipa_interconnect_data[] = { { .name = "memory", .peak_bandwidth = 600000, /* 600 MBps */ @@ -349,15 +378,23 @@ static struct ipa_interconnect_data ipa_interconnect_data[] = { }, }; -static struct ipa_clock_data ipa_clock_data = { +/* Clock and interconnect configuration data for an SoC having IPA v3.5.1 */ +static const struct ipa_clock_data ipa_clock_data = { .core_clock_rate = 75 * 1000 * 1000, /* Hz */ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), .interconnect_data = ipa_interconnect_data, }; -/* Configuration data for the SDM845 SoC. */ -const struct ipa_data ipa_data_sdm845 = { +/* Configuration data for an SoC having IPA v3.5.1 */ +const struct ipa_data ipa_data_v3_5_1 = { .version = IPA_VERSION_3_5_1, + .backward_compat = BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK | + BCR_TX_NOT_USING_BRESP_FMASK | + BCR_SUSPEND_L2_IRQ_FMASK | + BCR_HOLB_DROP_L2_IRQ_FMASK | + BCR_DUAL_TX_FMASK, + .qsb_count = ARRAY_SIZE(ipa_qsb_data), + .qsb_data = ipa_qsb_data, .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data), .endpoint_data = ipa_gsi_endpoint_data, .resource_data = &ipa_resource_data, diff --git a/drivers/net/ipa/ipa_data-v4.11.c b/drivers/net/ipa/ipa_data-v4.11.c new file mode 100644 index 000000000000..05806ceae8b5 --- /dev/null +++ b/drivers/net/ipa/ipa_data-v4.11.c @@ -0,0 +1,382 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Copyright (C) 2021 Linaro Ltd. */ + +#include <linux/log2.h> + +#include "gsi.h" +#include "ipa_data.h" +#include "ipa_endpoint.h" +#include "ipa_mem.h" + +/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.11 */ +enum ipa_resource_type { + /* Source resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF, + IPA_RESOURCE_TYPE_SRC_HPS_DMARS, + IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES, + + /* Destination resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0, + IPA_RESOURCE_TYPE_DST_DPS_DMARS, +}; + +/* Resource groups used for an SoC having IPA v4.11 */ +enum ipa_rsrc_group_id { + /* Source resource group identifiers */ + IPA_RSRC_GROUP_SRC_UL_DL = 0, + IPA_RSRC_GROUP_SRC_UC_RX_Q, + IPA_RSRC_GROUP_SRC_UNUSED_2, + IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */ + + /* Destination resource group identifiers */ + IPA_RSRC_GROUP_DST_UL_DL_DPL = 0, + IPA_RSRC_GROUP_DST_UNUSED_1, + IPA_RSRC_GROUP_DST_DRB_IP, + IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */ +}; + +/* QSB configuration data for an SoC having IPA v4.11 */ +static const struct ipa_qsb_data ipa_qsb_data[] = { + [IPA_QSB_MASTER_DDR] = { + .max_writes = 12, + .max_reads = 13, + .max_reads_beats = 120, + }, +}; + +/* Endpoint configuration data for an SoC having IPA v4.11 */ +static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { + [IPA_ENDPOINT_AP_COMMAND_TX] = { + .ee_id = GSI_EE_AP, + .channel_id = 5, + .endpoint_id = 7, + .toward_ipa = true, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 20, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .dma_mode = true, + .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX, + .tx = { + .seq_type = IPA_SEQ_DMA, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_LAN_RX] = { + .ee_id = GSI_EE_AP, + .channel_id = 14, + .endpoint_id = 9, + .toward_ipa = false, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 9, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .aggregation = true, + .status_enable = true, + .rx = { + .pad_align = ilog2(sizeof(u32)), + }, + }, + }, + }, + [IPA_ENDPOINT_AP_MODEM_TX] = { + .ee_id = GSI_EE_AP, + .channel_id = 2, + .endpoint_id = 2, + .toward_ipa = true, + .channel = { + .tre_count = 512, + .event_count = 512, + .tlv_count = 16, + }, + .endpoint = { + .filter_support = true, + .config = { + .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .qmap = true, + .status_enable = true, + .tx = { + .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC, + .status_endpoint = + IPA_ENDPOINT_MODEM_AP_RX, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_MODEM_RX] = { + .ee_id = GSI_EE_AP, + .channel_id = 7, + .endpoint_id = 16, + .toward_ipa = false, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 9, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .qmap = true, + .aggregation = true, + .rx = { + .aggr_close_eof = true, + }, + }, + }, + }, + [IPA_ENDPOINT_MODEM_AP_TX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 0, + .endpoint_id = 5, + .toward_ipa = true, + .endpoint = { + .filter_support = true, + }, + }, + [IPA_ENDPOINT_MODEM_AP_RX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 7, + .endpoint_id = 14, + .toward_ipa = false, + }, + [IPA_ENDPOINT_MODEM_DL_NLO_TX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 2, + .endpoint_id = 8, + .toward_ipa = true, + .endpoint = { + .filter_support = true, + }, + }, +}; + +/* Source resource configuration data for an SoC having IPA v4.11 */ +static const struct ipa_resource ipa_resource_src[] = { + [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 6, .max = 6, + }, + }, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 8, .max = 8, + }, + }, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 18, .max = 18, + }, + }, + [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 2, .max = 2, + }, + }, + [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 15, .max = 15, + }, + }, +}; + +/* Destination resource configuration data for an SoC having IPA v4.11 */ +static const struct ipa_resource ipa_resource_dst[] = { + [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = { + .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .min = 3, .max = 3, + }, + .limits[IPA_RSRC_GROUP_DST_DRB_IP] = { + .min = 25, .max = 25, + }, + }, + [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = { + .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .min = 2, .max = 2, + }, + }, +}; + +/* Resource configuration data for an SoC having IPA v4.11 */ +static const struct ipa_resource_data ipa_resource_data = { + .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT, + .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT, + .resource_src_count = ARRAY_SIZE(ipa_resource_src), + .resource_src = ipa_resource_src, + .resource_dst_count = ARRAY_SIZE(ipa_resource_dst), + .resource_dst = ipa_resource_dst, +}; + +/* IPA-resident memory region data for an SoC having IPA v4.11 */ +static const struct ipa_mem ipa_mem_local_data[] = { + [IPA_MEM_UC_SHARED] = { + .offset = 0x0000, + .size = 0x0080, + .canary_count = 0, + }, + [IPA_MEM_UC_INFO] = { + .offset = 0x0080, + .size = 0x0200, + .canary_count = 0, + }, + [IPA_MEM_V4_FILTER_HASHED] = { + .offset = 0x0288, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V4_FILTER] = { + .offset = 0x0308, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V6_FILTER_HASHED] = { + .offset = 0x0388, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V6_FILTER] = { + .offset = 0x0408, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V4_ROUTE_HASHED] = { + .offset = 0x0488, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V4_ROUTE] = { + .offset = 0x0508, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V6_ROUTE_HASHED] = { + .offset = 0x0588, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V6_ROUTE] = { + .offset = 0x0608, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_MODEM_HEADER] = { + .offset = 0x0688, + .size = 0x0240, + .canary_count = 2, + }, + [IPA_MEM_AP_HEADER] = { + .offset = 0x08c8, + .size = 0x0200, + .canary_count = 0, + }, + [IPA_MEM_MODEM_PROC_CTX] = { + .offset = 0x0ad0, + .size = 0x0200, + .canary_count = 2, + }, + [IPA_MEM_AP_PROC_CTX] = { + .offset = 0x0cd0, + .size = 0x0200, + .canary_count = 0, + }, + [IPA_MEM_NAT_TABLE] = { + .offset = 0x0ee0, + .size = 0x0d00, + .canary_count = 4, + }, + [IPA_MEM_PDN_CONFIG] = { + .offset = 0x1be8, + .size = 0x0050, + .canary_count = 0, + }, + [IPA_MEM_STATS_QUOTA_MODEM] = { + .offset = 0x1c40, + .size = 0x0030, + .canary_count = 4, + }, + [IPA_MEM_STATS_QUOTA_AP] = { + .offset = 0x1c70, + .size = 0x0048, + .canary_count = 0, + }, + [IPA_MEM_STATS_TETHERING] = { + .offset = 0x1cb8, + .size = 0x0238, + .canary_count = 0, + }, + [IPA_MEM_STATS_DROP] = { + .offset = 0x1ef0, + .size = 0x0020, + .canary_count = 0, + }, + [IPA_MEM_MODEM] = { + .offset = 0x1f18, + .size = 0x100c, + .canary_count = 2, + }, + [IPA_MEM_UC_EVENT_RING] = { + .offset = 0x3000, + .size = 0x0000, + .canary_count = 1, + }, +}; + +/* Memory configuration data for an SoC having IPA v4.11 */ +static const struct ipa_mem_data ipa_mem_data = { + .local_count = ARRAY_SIZE(ipa_mem_local_data), + .local = ipa_mem_local_data, + .imem_addr = 0x146a8000, + .imem_size = 0x00002000, + .smem_id = 497, + .smem_size = 0x00009000, +}; + +/* Interconnect rates are in 1000 byte/second units */ +static const struct ipa_interconnect_data ipa_interconnect_data[] = { + { + .name = "memory", + .peak_bandwidth = 465000, /* 465 MBps */ + .average_bandwidth = 80000, /* 80 MBps */ + }, + /* Average rate is unused for the next two interconnects */ + { + .name = "imem", + .peak_bandwidth = 68570, /* 68.57 MBps */ + .average_bandwidth = 80000, /* 80 MBps (unused?) */ + }, + { + .name = "config", + .peak_bandwidth = 30000, /* 30 MBps */ + .average_bandwidth = 0, /* unused */ + }, +}; + +/* Clock and interconnect configuration data for an SoC having IPA v4.11 */ +static const struct ipa_clock_data ipa_clock_data = { + .core_clock_rate = 60 * 1000 * 1000, /* Hz */ + .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), + .interconnect_data = ipa_interconnect_data, +}; + +/* Configuration data for an SoC having IPA v4.11 */ +const struct ipa_data ipa_data_v4_11 = { + .version = IPA_VERSION_4_11, + .qsb_count = ARRAY_SIZE(ipa_qsb_data), + .qsb_data = ipa_qsb_data, + .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data), + .endpoint_data = ipa_gsi_endpoint_data, + .resource_data = &ipa_resource_data, + .mem_data = &ipa_mem_data, + .clock_data = &ipa_clock_data, +}; diff --git a/drivers/net/ipa/ipa_data-sc7180.c b/drivers/net/ipa/ipa_data-v4.2.c index 997b51ceb7d7..8744f19c6401 100644 --- a/drivers/net/ipa/ipa_data-sc7180.c +++ b/drivers/net/ipa/ipa_data-v4.2.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2019-2020 Linaro Ltd. */ +/* Copyright (C) 2019-2021 Linaro Ltd. */ #include <linux/log2.h> @@ -9,7 +9,41 @@ #include "ipa_endpoint.h" #include "ipa_mem.h" -/* Endpoint configuration for the SC7180 SoC. */ +/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.2 */ +enum ipa_resource_type { + /* Source resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF, + IPA_RESOURCE_TYPE_SRC_HPS_DMARS, + IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES, + + /* Destination resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0, + IPA_RESOURCE_TYPE_DST_DPS_DMARS, +}; + +/* Resource groups used for an SoC having IPA v4.2 */ +enum ipa_rsrc_group_id { + /* Source resource group identifiers */ + IPA_RSRC_GROUP_SRC_UL_DL = 0, + IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */ + + /* Destination resource group identifiers */ + IPA_RSRC_GROUP_DST_UL_DL_DPL = 0, + IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */ +}; + +/* QSB configuration data for an SoC having IPA v4.2 */ +static const struct ipa_qsb_data ipa_qsb_data[] = { + [IPA_QSB_MASTER_DDR] = { + .max_writes = 8, + .max_reads = 12, + /* no outstanding read byte (beat) limit */ + }, +}; + +/* Endpoint configuration data for an SoC having IPA v4.2 */ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { [IPA_ENDPOINT_AP_COMMAND_TX] = { .ee_id = GSI_EE_AP, @@ -22,11 +56,13 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .tlv_count = 20, }, .endpoint = { - .seq_type = IPA_SEQ_DMA_ONLY, .config = { - .resource_group = 0, + .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, .dma_mode = true, .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX, + .tx = { + .seq_type = IPA_SEQ_DMA, + }, }, }, }, @@ -41,9 +77,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .tlv_count = 6, }, .endpoint = { - .seq_type = IPA_SEQ_INVALID, .config = { - .resource_group = 0, + .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, .aggregation = true, .status_enable = true, .rx = { @@ -64,14 +99,14 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { }, .endpoint = { .filter_support = true, - .seq_type = - IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP, .config = { - .resource_group = 0, + .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, .checksum = true, .qmap = true, .status_enable = true, .tx = { + .seq_type = IPA_SEQ_1_PASS_SKIP_LAST_UC, + .seq_rep_type = IPA_SEQ_REP_DMA_PARSER, .status_endpoint = IPA_ENDPOINT_MODEM_AP_RX, }, @@ -89,9 +124,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .tlv_count = 6, }, .endpoint = { - .seq_type = IPA_SEQ_INVALID, .config = { - .resource_group = 0, + .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, .checksum = true, .qmap = true, .aggregation = true, @@ -130,73 +164,60 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { }, }; -/* For the SC7180, resource groups are allocated this way: - * group 0: UL_DL - */ -static const struct ipa_resource_src ipa_resource_src[] = { - { - .type = IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS, - .limits[0] = { - .min = 3, - .max = 63, +/* Source resource configuration data for an SoC having IPA v4.2 */ +static const struct ipa_resource ipa_resource_src[] = { + [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 3, .max = 63, }, }, - { - .type = IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS, - .limits[0] = { - .min = 3, - .max = 3, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 3, .max = 3, }, }, - { - .type = IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF, - .limits[0] = { - .min = 10, - .max = 10, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 10, .max = 10, }, }, - { - .type = IPA_RESOURCE_TYPE_SRC_HPS_DMARS, - .limits[0] = { - .min = 1, - .max = 1, + [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 1, .max = 1, }, }, - { - .type = IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES, - .limits[0] = { - .min = 5, - .max = 5, + [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 5, .max = 5, }, }, }; -static const struct ipa_resource_dst ipa_resource_dst[] = { - { - .type = IPA_RESOURCE_TYPE_DST_DATA_SECTORS, - .limits[0] = { - .min = 3, - .max = 3, +/* Destination resource configuration data for an SoC having IPA v4.2 */ +static const struct ipa_resource ipa_resource_dst[] = { + [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = { + .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .min = 3, .max = 3, }, }, - { - .type = IPA_RESOURCE_TYPE_DST_DPS_DMARS, - .limits[0] = { - .min = 1, - .max = 63, + [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = { + .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .min = 1, .max = 63, }, }, }; -/* Resource configuration for the SC7180 SoC. */ +/* Resource configuration data for an SoC having IPA v4.2 */ static const struct ipa_resource_data ipa_resource_data = { + .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT, + .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT, .resource_src_count = ARRAY_SIZE(ipa_resource_src), .resource_src = ipa_resource_src, .resource_dst_count = ARRAY_SIZE(ipa_resource_dst), .resource_dst = ipa_resource_dst, }; -/* IPA-resident memory region configuration for the SC7180 SoC. */ +/* IPA-resident memory region data for an SoC having IPA v4.2 */ static const struct ipa_mem ipa_mem_local_data[] = { [IPA_MEM_UC_SHARED] = { .offset = 0x0000, @@ -206,7 +227,7 @@ static const struct ipa_mem ipa_mem_local_data[] = { [IPA_MEM_UC_INFO] = { .offset = 0x0080, .size = 0x0200, - .canary_count = 2, + .canary_count = 0, }, [IPA_MEM_V4_FILTER_HASHED] = { .offset = 0x0288, @@ -253,11 +274,6 @@ static const struct ipa_mem ipa_mem_local_data[] = { .size = 0x0140, .canary_count = 2, }, - [IPA_MEM_AP_HEADER] = { - .offset = 0x05e8, - .size = 0x0000, - .canary_count = 0, - }, [IPA_MEM_MODEM_PROC_CTX] = { .offset = 0x05f0, .size = 0x0200, @@ -273,7 +289,7 @@ static const struct ipa_mem ipa_mem_local_data[] = { .size = 0x0050, .canary_count = 2, }, - [IPA_MEM_STATS_QUOTA] = { + [IPA_MEM_STATS_QUOTA_MODEM] = { .offset = 0x0a50, .size = 0x0060, .canary_count = 2, @@ -283,11 +299,6 @@ static const struct ipa_mem ipa_mem_local_data[] = { .size = 0x0140, .canary_count = 0, }, - [IPA_MEM_STATS_DROP] = { - .offset = 0x0bf0, - .size = 0, - .canary_count = 0, - }, [IPA_MEM_MODEM] = { .offset = 0x0bf0, .size = 0x140c, @@ -300,7 +311,8 @@ static const struct ipa_mem ipa_mem_local_data[] = { }, }; -static struct ipa_mem_data ipa_mem_data = { +/* Memory configuration data for an SoC having IPA v4.2 */ +static const struct ipa_mem_data ipa_mem_data = { .local_count = ARRAY_SIZE(ipa_mem_local_data), .local = ipa_mem_local_data, .imem_addr = 0x146a8000, @@ -309,8 +321,8 @@ static struct ipa_mem_data ipa_mem_data = { .smem_size = 0x00002000, }; -/* Interconnect bandwidths are in 1000 byte/second units */ -static struct ipa_interconnect_data ipa_interconnect_data[] = { +/* Interconnect rates are in 1000 byte/second units */ +static const struct ipa_interconnect_data ipa_interconnect_data[] = { { .name = "memory", .peak_bandwidth = 465000, /* 465 MBps */ @@ -329,15 +341,19 @@ static struct ipa_interconnect_data ipa_interconnect_data[] = { }, }; -static struct ipa_clock_data ipa_clock_data = { +/* Clock and interconnect configuration data for an SoC having IPA v4.2 */ +static const struct ipa_clock_data ipa_clock_data = { .core_clock_rate = 100 * 1000 * 1000, /* Hz */ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), .interconnect_data = ipa_interconnect_data, }; -/* Configuration data for the SC7180 SoC. */ -const struct ipa_data ipa_data_sc7180 = { +/* Configuration data for an SoC having IPA v4.2 */ +const struct ipa_data ipa_data_v4_2 = { .version = IPA_VERSION_4_2, + /* backward_compat value is 0 */ + .qsb_count = ARRAY_SIZE(ipa_qsb_data), + .qsb_data = ipa_qsb_data, .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data), .endpoint_data = ipa_gsi_endpoint_data, .resource_data = &ipa_resource_data, diff --git a/drivers/net/ipa/ipa_data-v4.5.c b/drivers/net/ipa/ipa_data-v4.5.c new file mode 100644 index 000000000000..5f67a3a909ee --- /dev/null +++ b/drivers/net/ipa/ipa_data-v4.5.c @@ -0,0 +1,437 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Copyright (C) 2021 Linaro Ltd. */ + +#include <linux/log2.h> + +#include "gsi.h" +#include "ipa_data.h" +#include "ipa_endpoint.h" +#include "ipa_mem.h" + +/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.5 */ +enum ipa_resource_type { + /* Source resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF, + IPA_RESOURCE_TYPE_SRC_HPS_DMARS, + IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES, + + /* Destination resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0, + IPA_RESOURCE_TYPE_DST_DPS_DMARS, +}; + +/* Resource groups used for an SoC having IPA v4.5 */ +enum ipa_rsrc_group_id { + /* Source resource group identifiers */ + IPA_RSRC_GROUP_SRC_UNUSED_0 = 0, + IPA_RSRC_GROUP_SRC_UL_DL, + IPA_RSRC_GROUP_SRC_UNUSED_2, + IPA_RSRC_GROUP_SRC_UNUSED_3, + IPA_RSRC_GROUP_SRC_UC_RX_Q, + IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */ + + /* Destination resource group identifiers */ + IPA_RSRC_GROUP_DST_UNUSED_0 = 0, + IPA_RSRC_GROUP_DST_UL_DL_DPL, + IPA_RSRC_GROUP_DST_UNUSED_2, + IPA_RSRC_GROUP_DST_UNUSED_3, + IPA_RSRC_GROUP_DST_UC, + IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */ +}; + +/* QSB configuration data for an SoC having IPA v4.5 */ +static const struct ipa_qsb_data ipa_qsb_data[] = { + [IPA_QSB_MASTER_DDR] = { + .max_writes = 8, + .max_reads = 0, /* no limit (hardware max) */ + .max_reads_beats = 120, + }, + [IPA_QSB_MASTER_PCIE] = { + .max_writes = 8, + .max_reads = 12, + /* no outstanding read byte (beat) limit */ + }, +}; + +/* Endpoint configuration data for an SoC having IPA v4.5 */ +static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { + [IPA_ENDPOINT_AP_COMMAND_TX] = { + .ee_id = GSI_EE_AP, + .channel_id = 9, + .endpoint_id = 7, + .toward_ipa = true, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 20, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .dma_mode = true, + .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX, + .tx = { + .seq_type = IPA_SEQ_DMA, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_LAN_RX] = { + .ee_id = GSI_EE_AP, + .channel_id = 10, + .endpoint_id = 16, + .toward_ipa = false, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 9, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .aggregation = true, + .status_enable = true, + .rx = { + .pad_align = ilog2(sizeof(u32)), + }, + }, + }, + }, + [IPA_ENDPOINT_AP_MODEM_TX] = { + .ee_id = GSI_EE_AP, + .channel_id = 7, + .endpoint_id = 2, + .toward_ipa = true, + .channel = { + .tre_count = 512, + .event_count = 512, + .tlv_count = 16, + }, + .endpoint = { + .filter_support = true, + .config = { + .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .qmap = true, + .status_enable = true, + .tx = { + .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC, + .status_endpoint = + IPA_ENDPOINT_MODEM_AP_RX, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_MODEM_RX] = { + .ee_id = GSI_EE_AP, + .channel_id = 1, + .endpoint_id = 14, + .toward_ipa = false, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 9, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .qmap = true, + .aggregation = true, + .rx = { + .aggr_close_eof = true, + }, + }, + }, + }, + [IPA_ENDPOINT_MODEM_AP_TX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 0, + .endpoint_id = 5, + .toward_ipa = true, + .endpoint = { + .filter_support = true, + }, + }, + [IPA_ENDPOINT_MODEM_AP_RX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 7, + .endpoint_id = 21, + .toward_ipa = false, + }, + [IPA_ENDPOINT_MODEM_DL_NLO_TX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 2, + .endpoint_id = 8, + .toward_ipa = true, + .endpoint = { + .filter_support = true, + }, + }, +}; + +/* Source resource configuration data for an SoC having IPA v4.5 */ +static const struct ipa_resource ipa_resource_src[] = { + [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 1, .max = 11, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 1, .max = 63, + }, + }, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 14, .max = 14, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 3, .max = 3, + }, + }, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 18, .max = 18, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 8, .max = 8, + }, + }, + [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = { + .limits[IPA_RSRC_GROUP_SRC_UNUSED_0] = { + .min = 0, .max = 63, + }, + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 0, .max = 63, + }, + .limits[IPA_RSRC_GROUP_SRC_UNUSED_2] = { + .min = 0, .max = 63, + }, + .limits[IPA_RSRC_GROUP_SRC_UNUSED_3] = { + .min = 0, .max = 63, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 0, .max = 63, + }, + }, + [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 24, .max = 24, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 8, .max = 8, + }, + }, +}; + +/* Destination resource configuration data for an SoC having IPA v4.5 */ +static const struct ipa_resource ipa_resource_dst[] = { + [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = { + .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .min = 16, .max = 16, + }, + .limits[IPA_RSRC_GROUP_DST_UNUSED_2] = { + .min = 2, .max = 2, + }, + .limits[IPA_RSRC_GROUP_DST_UNUSED_3] = { + .min = 2, .max = 2, + }, + }, + [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = { + .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .min = 2, .max = 63, + }, + .limits[IPA_RSRC_GROUP_DST_UNUSED_2] = { + .min = 1, .max = 2, + }, + .limits[IPA_RSRC_GROUP_DST_UNUSED_3] = { + .min = 1, .max = 2, + }, + .limits[IPA_RSRC_GROUP_DST_UC] = { + .min = 0, .max = 2, + }, + }, +}; + +/* Resource configuration data for an SoC having IPA v4.5 */ +static const struct ipa_resource_data ipa_resource_data = { + .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT, + .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT, + .resource_src_count = ARRAY_SIZE(ipa_resource_src), + .resource_src = ipa_resource_src, + .resource_dst_count = ARRAY_SIZE(ipa_resource_dst), + .resource_dst = ipa_resource_dst, +}; + +/* IPA-resident memory region data for an SoC having IPA v4.5 */ +static const struct ipa_mem ipa_mem_local_data[] = { + [IPA_MEM_UC_SHARED] = { + .offset = 0x0000, + .size = 0x0080, + .canary_count = 0, + }, + [IPA_MEM_UC_INFO] = { + .offset = 0x0080, + .size = 0x0200, + .canary_count = 0, + }, + [IPA_MEM_V4_FILTER_HASHED] = { + .offset = 0x0288, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V4_FILTER] = { + .offset = 0x0308, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V6_FILTER_HASHED] = { + .offset = 0x0388, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V6_FILTER] = { + .offset = 0x0408, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V4_ROUTE_HASHED] = { + .offset = 0x0488, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V4_ROUTE] = { + .offset = 0x0508, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V6_ROUTE_HASHED] = { + .offset = 0x0588, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V6_ROUTE] = { + .offset = 0x0608, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_MODEM_HEADER] = { + .offset = 0x0688, + .size = 0x0240, + .canary_count = 2, + }, + [IPA_MEM_AP_HEADER] = { + .offset = 0x08c8, + .size = 0x0200, + .canary_count = 0, + }, + [IPA_MEM_MODEM_PROC_CTX] = { + .offset = 0x0ad0, + .size = 0x0b20, + .canary_count = 2, + }, + [IPA_MEM_AP_PROC_CTX] = { + .offset = 0x15f0, + .size = 0x0200, + .canary_count = 0, + }, + [IPA_MEM_NAT_TABLE] = { + .offset = 0x1800, + .size = 0x0d00, + .canary_count = 4, + }, + [IPA_MEM_STATS_QUOTA_MODEM] = { + .offset = 0x2510, + .size = 0x0030, + .canary_count = 4, + }, + [IPA_MEM_STATS_QUOTA_AP] = { + .offset = 0x2540, + .size = 0x0048, + .canary_count = 0, + }, + [IPA_MEM_STATS_TETHERING] = { + .offset = 0x2588, + .size = 0x0238, + .canary_count = 0, + }, + [IPA_MEM_STATS_FILTER_ROUTE] = { + .offset = 0x27c0, + .size = 0x0800, + .canary_count = 0, + }, + [IPA_MEM_STATS_DROP] = { + .offset = 0x2fc0, + .size = 0x0020, + .canary_count = 0, + }, + [IPA_MEM_MODEM] = { + .offset = 0x2fe8, + .size = 0x0800, + .canary_count = 2, + }, + [IPA_MEM_UC_EVENT_RING] = { + .offset = 0x3800, + .size = 0x1000, + .canary_count = 1, + }, + [IPA_MEM_PDN_CONFIG] = { + .offset = 0x4800, + .size = 0x0050, + .canary_count = 0, + }, +}; + +/* Memory configuration data for an SoC having IPA v4.5 */ +static const struct ipa_mem_data ipa_mem_data = { + .local_count = ARRAY_SIZE(ipa_mem_local_data), + .local = ipa_mem_local_data, + .imem_addr = 0x14688000, + .imem_size = 0x00003000, + .smem_id = 497, + .smem_size = 0x00009000, +}; + +/* Interconnect rates are in 1000 byte/second units */ +static const struct ipa_interconnect_data ipa_interconnect_data[] = { + { + .name = "memory-a", + .peak_bandwidth = 600000, /* 600 MBps */ + .average_bandwidth = 150000, /* 150 MBps */ + }, + { + .name = "memory-b", + .peak_bandwidth = 1804000, /* 1.804 GBps */ + .average_bandwidth = 150000, /* 150 MBps */ + }, + /* Average rate is unused for the next two interconnects */ + { + .name = "imem", + .peak_bandwidth = 450000, /* 450 MBps */ + .average_bandwidth = 75000, /* 75 MBps (unused?) */ + }, + { + .name = "config", + .peak_bandwidth = 171400, /* 171.4 MBps */ + .average_bandwidth = 0, /* unused */ + }, +}; + +/* Clock and interconnect configuration data for an SoC having IPA v4.5 */ +static const struct ipa_clock_data ipa_clock_data = { + .core_clock_rate = 150 * 1000 * 1000, /* Hz (150? 60?) */ + .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), + .interconnect_data = ipa_interconnect_data, +}; + +/* Configuration data for an SoC having IPA v4.5 */ +const struct ipa_data ipa_data_v4_5 = { + .version = IPA_VERSION_4_5, + .qsb_count = ARRAY_SIZE(ipa_qsb_data), + .qsb_data = ipa_qsb_data, + .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data), + .endpoint_data = ipa_gsi_endpoint_data, + .resource_data = &ipa_resource_data, + .mem_data = &ipa_mem_data, + .clock_data = &ipa_clock_data, +}; diff --git a/drivers/net/ipa/ipa_data-v4.9.c b/drivers/net/ipa/ipa_data-v4.9.c new file mode 100644 index 000000000000..e41be790f45e --- /dev/null +++ b/drivers/net/ipa/ipa_data-v4.9.c @@ -0,0 +1,430 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Copyright (C) 2021 Linaro Ltd. */ + +#include <linux/log2.h> + +#include "gsi.h" +#include "ipa_data.h" +#include "ipa_endpoint.h" +#include "ipa_mem.h" + +/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.9 */ +enum ipa_resource_type { + /* Source resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF, + IPA_RESOURCE_TYPE_SRC_HPS_DMARS, + IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES, + + /* Destination resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0, + IPA_RESOURCE_TYPE_DST_DPS_DMARS, +}; + +/* Resource groups used for an SoC having IPA v4.9 */ +enum ipa_rsrc_group_id { + /* Source resource group identifiers */ + IPA_RSRC_GROUP_SRC_UL_DL = 0, + IPA_RSRC_GROUP_SRC_DMA, + IPA_RSRC_GROUP_SRC_UC_RX_Q, + IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */ + + /* Destination resource group identifiers */ + IPA_RSRC_GROUP_DST_UL_DL_DPL = 0, + IPA_RSRC_GROUP_DST_DMA, + IPA_RSRC_GROUP_DST_UC, + IPA_RSRC_GROUP_DST_DRB_IP, + IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */ +}; + +/* QSB configuration data for an SoC having IPA v4.9 */ +static const struct ipa_qsb_data ipa_qsb_data[] = { + [IPA_QSB_MASTER_DDR] = { + .max_writes = 8, + .max_reads = 0, /* no limit (hardware max) */ + .max_reads_beats = 120, + }, +}; + +/* Endpoint configuration data for an SoC having IPA v4.9 */ +static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { + [IPA_ENDPOINT_AP_COMMAND_TX] = { + .ee_id = GSI_EE_AP, + .channel_id = 6, + .endpoint_id = 7, + .toward_ipa = true, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 20, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .dma_mode = true, + .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX, + .tx = { + .seq_type = IPA_SEQ_DMA, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_LAN_RX] = { + .ee_id = GSI_EE_AP, + .channel_id = 7, + .endpoint_id = 11, + .toward_ipa = false, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 9, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .aggregation = true, + .status_enable = true, + .rx = { + .pad_align = ilog2(sizeof(u32)), + }, + }, + }, + }, + [IPA_ENDPOINT_AP_MODEM_TX] = { + .ee_id = GSI_EE_AP, + .channel_id = 2, + .endpoint_id = 2, + .toward_ipa = true, + .channel = { + .tre_count = 512, + .event_count = 512, + .tlv_count = 16, + }, + .endpoint = { + .filter_support = true, + .config = { + .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .qmap = true, + .status_enable = true, + .tx = { + .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC, + .status_endpoint = + IPA_ENDPOINT_MODEM_AP_RX, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_MODEM_RX] = { + .ee_id = GSI_EE_AP, + .channel_id = 12, + .endpoint_id = 20, + .toward_ipa = false, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 9, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .qmap = true, + .aggregation = true, + .rx = { + .aggr_close_eof = true, + }, + }, + }, + }, + [IPA_ENDPOINT_MODEM_AP_TX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 0, + .endpoint_id = 5, + .toward_ipa = true, + .endpoint = { + .filter_support = true, + }, + }, + [IPA_ENDPOINT_MODEM_AP_RX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 7, + .endpoint_id = 16, + .toward_ipa = false, + }, + [IPA_ENDPOINT_MODEM_DL_NLO_TX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 2, + .endpoint_id = 8, + .toward_ipa = true, + .endpoint = { + .filter_support = true, + }, + }, +}; + +/* Source resource configuration data for an SoC having IPA v4.9 */ +static const struct ipa_resource ipa_resource_src[] = { + [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 1, .max = 12, + }, + .limits[IPA_RSRC_GROUP_SRC_DMA] = { + .min = 1, .max = 1, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 1, .max = 12, + }, + }, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 20, .max = 20, + }, + .limits[IPA_RSRC_GROUP_SRC_DMA] = { + .min = 2, .max = 2, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 3, .max = 3, + }, + }, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 38, .max = 38, + }, + .limits[IPA_RSRC_GROUP_SRC_DMA] = { + .min = 4, .max = 4, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 8, .max = 8, + }, + }, + [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 0, .max = 4, + }, + .limits[IPA_RSRC_GROUP_SRC_DMA] = { + .min = 0, .max = 4, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 0, .max = 4, + }, + }, + [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 30, .max = 30, + }, + .limits[IPA_RSRC_GROUP_SRC_DMA] = { + .min = 8, .max = 8, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 8, .max = 8, + }, + }, +}; + +/* Destination resource configuration data for an SoC having IPA v4.9 */ +static const struct ipa_resource ipa_resource_dst[] = { + [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = { + .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .min = 9, .max = 9, + }, + .limits[IPA_RSRC_GROUP_DST_DMA] = { + .min = 1, .max = 1, + }, + .limits[IPA_RSRC_GROUP_DST_UC] = { + .min = 1, .max = 1, + }, + .limits[IPA_RSRC_GROUP_DST_DRB_IP] = { + .min = 39, .max = 39, + }, + }, + [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = { + .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .min = 2, .max = 3, + }, + .limits[IPA_RSRC_GROUP_DST_DMA] = { + .min = 1, .max = 2, + }, + .limits[IPA_RSRC_GROUP_DST_UC] = { + .min = 0, .max = 2, + }, + }, +}; + +/* Resource configuration data for an SoC having IPA v4.9 */ +static const struct ipa_resource_data ipa_resource_data = { + .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT, + .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT, + .resource_src_count = ARRAY_SIZE(ipa_resource_src), + .resource_src = ipa_resource_src, + .resource_dst_count = ARRAY_SIZE(ipa_resource_dst), + .resource_dst = ipa_resource_dst, +}; + +/* IPA-resident memory region data for an SoC having IPA v4.9 */ +static const struct ipa_mem ipa_mem_local_data[] = { + [IPA_MEM_UC_SHARED] = { + .offset = 0x0000, + .size = 0x0080, + .canary_count = 0, + }, + [IPA_MEM_UC_INFO] = { + .offset = 0x0080, + .size = 0x0200, + .canary_count = 0, + }, + [IPA_MEM_V4_FILTER_HASHED] = { .offset = 0x0288, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V4_FILTER] = { + .offset = 0x0308, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V6_FILTER_HASHED] = { + .offset = 0x0388, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V6_FILTER] = { + .offset = 0x0408, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V4_ROUTE_HASHED] = { + .offset = 0x0488, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V4_ROUTE] = { + .offset = 0x0508, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V6_ROUTE_HASHED] = { + .offset = 0x0588, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V6_ROUTE] = { + .offset = 0x0608, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_MODEM_HEADER] = { + .offset = 0x0688, + .size = 0x0240, + .canary_count = 2, + }, + [IPA_MEM_AP_HEADER] = { + .offset = 0x08c8, + .size = 0x0200, + .canary_count = 0, + }, + [IPA_MEM_MODEM_PROC_CTX] = { + .offset = 0x0ad0, + .size = 0x0b20, + .canary_count = 2, + }, + [IPA_MEM_AP_PROC_CTX] = { + .offset = 0x15f0, + .size = 0x0200, + .canary_count = 0, + }, + [IPA_MEM_NAT_TABLE] = { + .offset = 0x1800, + .size = 0x0d00, + .canary_count = 4, + }, + [IPA_MEM_STATS_QUOTA_MODEM] = { + .offset = 0x2510, + .size = 0x0030, + .canary_count = 4, + }, + [IPA_MEM_STATS_QUOTA_AP] = { + .offset = 0x2540, + .size = 0x0048, + .canary_count = 0, + }, + [IPA_MEM_STATS_TETHERING] = { + .offset = 0x2588, + .size = 0x0238, + .canary_count = 0, + }, + [IPA_MEM_STATS_FILTER_ROUTE] = { + .offset = 0x27c0, + .size = 0x0800, + .canary_count = 0, + }, + [IPA_MEM_STATS_DROP] = { + .offset = 0x2fc0, + .size = 0x0020, + .canary_count = 0, + }, + [IPA_MEM_MODEM] = { + .offset = 0x2fe8, + .size = 0x0800, + .canary_count = 2, + }, + [IPA_MEM_UC_EVENT_RING] = { + .offset = 0x3800, + .size = 0x1000, + .canary_count = 1, + }, + [IPA_MEM_PDN_CONFIG] = { + .offset = 0x4800, + .size = 0x0050, + .canary_count = 0, + }, +}; + +/* Memory configuration data for an SoC having IPA v4.9 */ +static const struct ipa_mem_data ipa_mem_data = { + .local_count = ARRAY_SIZE(ipa_mem_local_data), + .local = ipa_mem_local_data, + .imem_addr = 0x146bd000, + .imem_size = 0x00002000, + .smem_id = 497, + .smem_size = 0x00009000, +}; + +/* Interconnect rates are in 1000 byte/second units */ +static const struct ipa_interconnect_data ipa_interconnect_data[] = { + { + .name = "ipa_to_llcc", + .peak_bandwidth = 600000, /* 600 MBps */ + .average_bandwidth = 150000, /* 150 MBps */ + }, + { + .name = "llcc_to_ebi1", + .peak_bandwidth = 1804000, /* 1.804 GBps */ + .average_bandwidth = 150000, /* 150 MBps */ + }, + /* Average rate is unused for the next interconnect */ + { + .name = "appss_to_ipa", + .peak_bandwidth = 74000, /* 74 MBps */ + .average_bandwidth = 0, /* unused */ + }, + +}; + +/* Clock and interconnect configuration data for an SoC having IPA v4.9 */ +static const struct ipa_clock_data ipa_clock_data = { + .core_clock_rate = 60 * 1000 * 1000, /* Hz */ + .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), + .interconnect_data = ipa_interconnect_data, +}; + +/* Configuration data for an SoC having IPA v4.9. */ +const struct ipa_data ipa_data_v4_9 = { + .version = IPA_VERSION_4_9, + .qsb_count = ARRAY_SIZE(ipa_qsb_data), + .qsb_data = ipa_qsb_data, + .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data), + .endpoint_data = ipa_gsi_endpoint_data, + .resource_data = &ipa_resource_data, + .mem_data = &ipa_mem_data, + .clock_data = &ipa_clock_data, +}; diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h index b476fc373f7f..5c4c8d72d7d8 100644 --- a/drivers/net/ipa/ipa_data.h +++ b/drivers/net/ipa/ipa_data.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2020 Linaro Ltd. + * Copyright (C) 2019-2021 Linaro Ltd. */ #ifndef _IPA_DATA_H_ #define _IPA_DATA_H_ @@ -18,8 +18,9 @@ * Boot-time configuration data is used to define the configuration of the * IPA and GSI resources to use for a given platform. This data is supplied * via the Device Tree match table, associated with a particular compatible - * string. The data defines information about resources, endpoints, and - * channels. + * string. The data defines information about how resources, endpoints and + * channels, memory, clocking and so on are allocated and used for the + * platform. * * Resources are data structures used internally by the IPA hardware. The * configuration data defines the number (or limits of the number) of various @@ -45,9 +46,26 @@ * the IPA endpoint. */ -/* The maximum value returned by ipa_resource_group_{src,dst}_count() */ -#define IPA_RESOURCE_GROUP_SRC_MAX 5 -#define IPA_RESOURCE_GROUP_DST_MAX 5 +/* The maximum possible number of source or destination resource groups */ +#define IPA_RESOURCE_GROUP_MAX 8 + +/** enum ipa_qsb_master_id - array index for IPA QSB configuration data */ +enum ipa_qsb_master_id { + IPA_QSB_MASTER_DDR, + IPA_QSB_MASTER_PCIE, +}; + +/** + * struct ipa_qsb_data - Qualcomm System Bus configuration data + * @max_writes: Maximum outstanding write requests for this master + * @max_reads: Maximum outstanding read requests for this master + * @max_reads_beats: Max outstanding read bytes in 8-byte "beats" (if non-zero) + */ +struct ipa_qsb_data { + u8 max_writes; + u8 max_reads; + u8 max_reads_beats; /* Not present for IPA v3.5.1 */ +}; /** * struct gsi_channel_data - GSI channel configuration data @@ -57,10 +75,10 @@ * * A GSI channel is a unidirectional means of transferring data to or * from (and through) the IPA. A GSI channel has a ring buffer made - * up of "transfer elements" (TREs) that specify individual data transfers - * or IPA immediate commands. TREs are filled by the AP, and control - * is passed to IPA hardware by writing the last written element - * into a doorbell register. + * up of "transfer ring elements" (TREs) that specify individual data + * transfers or IPA immediate commands. TREs are filled by the AP, + * and control is passed to IPA hardware by writing the last written + * element into a doorbell register. * * When data transfer commands have completed the GSI generates an * event (a structure of data) and optionally signals the AP with @@ -72,19 +90,23 @@ * that can be included in a single transaction. */ struct gsi_channel_data { - u16 tre_count; - u16 event_count; + u16 tre_count; /* must be a power of 2 */ + u16 event_count; /* must be a power of 2 */ u8 tlv_count; }; /** * struct ipa_endpoint_tx_data - configuration data for TX endpoints + * @seq_type: primary packet processing sequencer type + * @seq_rep_type: sequencer type for replication processing * @status_endpoint: endpoint to which status elements are sent * * The @status_endpoint is only valid if the endpoint's @status_enable * flag is set. */ struct ipa_endpoint_tx_data { + enum ipa_seq_type seq_type; + enum ipa_seq_rep_type seq_rep_type; enum ipa_endpoint_name status_endpoint; }; @@ -136,7 +158,6 @@ struct ipa_endpoint_config_data { /** * struct ipa_endpoint_data - IPA endpoint configuration data * @filter_support: whether endpoint supports filtering - * @seq_type: hardware sequencer type used for endpoint * @config: hardware configuration (see above) * * Not all endpoints support the IPA filtering capability. A filter table @@ -146,25 +167,21 @@ struct ipa_endpoint_config_data { * in the system, and indicate whether they support filtering. * * The remaining endpoint configuration data applies only to AP endpoints. - * The IPA hardware is implemented by sequencers, and the AP must program - * the type(s) of these sequencers at initialization time. The remaining - * endpoint configuration data is defined above. */ struct ipa_endpoint_data { bool filter_support; - /* The next two are specified only for AP endpoints */ - enum ipa_seq_type seq_type; + /* Everything else is specified only for AP endpoints */ struct ipa_endpoint_config_data config; }; /** * struct ipa_gsi_endpoint_data - GSI channel/IPA endpoint data - * ee: GSI execution environment ID - * channel_id: GSI channel ID - * endpoint_id: IPA endpoint ID - * toward_ipa: direction of data transfer - * gsi: GSI channel configuration data (see above) - * ipa: IPA endpoint configuration data (see above) + * @ee_id: GSI execution environment ID + * @channel_id: GSI channel ID + * @endpoint_id: IPA endpoint ID + * @toward_ipa: direction of data transfer + * @channel: GSI channel configuration data (see above) + * @endpoint: IPA endpoint configuration data (see above) */ struct ipa_gsi_endpoint_data { u8 ee_id; /* enum gsi_ee_id */ @@ -176,21 +193,6 @@ struct ipa_gsi_endpoint_data { struct ipa_endpoint_data endpoint; }; -/** enum ipa_resource_type_src - source resource types */ -enum ipa_resource_type_src { - IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS, - IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS, - IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF, - IPA_RESOURCE_TYPE_SRC_HPS_DMARS, - IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES, -}; - -/** enum ipa_resource_type_dst - destination resource types */ -enum ipa_resource_type_dst { - IPA_RESOURCE_TYPE_DST_DATA_SECTORS, - IPA_RESOURCE_TYPE_DST_DPS_DMARS, -}; - /** * struct ipa_resource_limits - minimum and maximum resource counts * @min: minimum number of resources of a given type @@ -202,27 +204,17 @@ struct ipa_resource_limits { }; /** - * struct ipa_resource_src - source endpoint group resource usage - * @type: source group resource type - * @limits: array of limits to use for each resource group - */ -struct ipa_resource_src { - enum ipa_resource_type_src type; - struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_SRC_MAX]; -}; - -/** - * struct ipa_resource_dst - destination endpoint group resource usage - * @type: destination group resource type - * @limits: array of limits to use for each resource group + * struct ipa_resource - resource group source or destination resource usage + * @limits: array of resource limits, indexed by group */ -struct ipa_resource_dst { - enum ipa_resource_type_dst type; - struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_DST_MAX]; +struct ipa_resource { + struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_MAX]; }; /** * struct ipa_resource_data - IPA resource configuration data + * @rsrc_group_src_count: number of source resource groups supported + * @rsrc_group_dst_count: number of destination resource groups supported * @resource_src_count: number of entries in the resource_src array * @resource_src: source endpoint group resources * @resource_dst_count: number of entries in the resource_dst array @@ -234,10 +226,12 @@ struct ipa_resource_dst { * programming it at initialization time, so we specify it here. */ struct ipa_resource_data { + u32 rsrc_group_src_count; + u32 rsrc_group_dst_count; u32 resource_src_count; - const struct ipa_resource_src *resource_src; + const struct ipa_resource *resource_src; u32 resource_dst_count; - const struct ipa_resource_dst *resource_dst; + const struct ipa_resource *resource_dst; }; /** @@ -247,7 +241,7 @@ struct ipa_resource_data { * @imem_addr: physical address of IPA region within IMEM * @imem_size: size in bytes of IPA IMEM region * @smem_id: item identifier for IPA region within SMEM memory - * @imem_size: size in bytes of the IPA SMEM region + * @smem_size: size in bytes of the IPA SMEM region */ struct ipa_mem_data { u32 local_count; @@ -285,22 +279,31 @@ struct ipa_clock_data { /** * struct ipa_data - combined IPA/GSI configuration data * @version: IPA hardware version - * @endpoint_count: number of entries in endpoint_data array + * @backward_compat: BCR register value (prior to IPA v4.5 only) + * @qsb_count: number of entries in the qsb_data array + * @qsb_data: Qualcomm System Bus configuration data + * @endpoint_count: number of entries in the endpoint_data array * @endpoint_data: IPA endpoint/GSI channel data * @resource_data: IPA resource configuration data - * @mem_count: number of entries in mem_data array - * @mem_data: IPA-local shared memory region data + * @mem_data: IPA memory region data + * @clock_data: IPA clock and interconnect data */ struct ipa_data { enum ipa_version version; - u32 endpoint_count; /* # entries in endpoint_data[] */ + u32 backward_compat; + u32 qsb_count; /* number of entries in qsb_data[] */ + const struct ipa_qsb_data *qsb_data; + u32 endpoint_count; /* number of entries in endpoint_data[] */ const struct ipa_gsi_endpoint_data *endpoint_data; const struct ipa_resource_data *resource_data; const struct ipa_mem_data *mem_data; const struct ipa_clock_data *clock_data; }; -extern const struct ipa_data ipa_data_sdm845; -extern const struct ipa_data ipa_data_sc7180; +extern const struct ipa_data ipa_data_v3_5_1; +extern const struct ipa_data ipa_data_v4_2; +extern const struct ipa_data ipa_data_v4_5; +extern const struct ipa_data ipa_data_v4_9; +extern const struct ipa_data ipa_data_v4_11; #endif /* _IPA_DATA_H_ */ diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 7209ee3c3124..ccc99ad983eb 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2020 Linaro Ltd. + * Copyright (C) 2019-2021 Linaro Ltd. */ #include <linux/types.h> @@ -88,6 +88,11 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, if (ipa_gsi_endpoint_data_empty(data)) return true; + /* IPA v4.5+ uses checksum offload, not yet supported by RMNet */ + if (ipa->version >= IPA_VERSION_4_5) + if (data->endpoint.config.checksum) + return false; + if (!data->toward_ipa) { if (data->endpoint.filter_support) { dev_err(dev, "filtering not supported for " @@ -230,6 +235,17 @@ static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, const struct ipa_gsi_endpoint_data *data) { + const struct ipa_gsi_endpoint_data *dp = data; + enum ipa_endpoint_name name; + + if (ipa->version < IPA_VERSION_4_5) + return true; + + /* IPA v4.5+ uses checksum offload, not yet supported by RMNet */ + for (name = 0; name < count; name++, dp++) + if (data->endpoint.config.checksum) + return false; + return true; } @@ -266,7 +282,7 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) * if (endpoint->toward_ipa) * assert(ipa->version != IPA_VERSION_4.2); * else - * assert(ipa->version == IPA_VERSION_3_5_1); + * assert(ipa->version < IPA_VERSION_4_0); */ mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; @@ -347,7 +363,7 @@ ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) { bool suspended; - if (endpoint->ipa->version != IPA_VERSION_3_5_1) + if (endpoint->ipa->version >= IPA_VERSION_4_0) return enable; /* For IPA v4.0+, no change made */ /* assert(!endpoint->toward_ipa); */ @@ -397,7 +413,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) /* We need one command per modem TX endpoint. We can get an upper * bound on that by assuming all initialized endpoints are modem->IPA. * That won't happen, and we could be more precise, but this is fine - * for now. We need to end the transaction with a "tag process." + * for now. End the transaction with commands to clear the pipeline. */ count = hweight32(initialized) + ipa_cmd_pipeline_clear_count(); trans = ipa_cmd_trans_alloc(ipa, count); @@ -468,6 +484,20 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) iowrite32(val, endpoint->ipa->reg_virt + offset); } +static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint) +{ + u32 offset; + u32 val; + + if (!endpoint->toward_ipa) + return; + + offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id); + val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK); + + iowrite32(val, endpoint->ipa->reg_virt + offset); +} + /** * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register * @endpoint: Endpoint pointer @@ -515,7 +545,7 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) /* Where IPA will write the length */ offset = offsetof(struct rmnet_map_header, pkt_len); /* Upper bits are stored in HDR_EXT with IPA v4.5 */ - if (version == IPA_VERSION_4_5) + if (version >= IPA_VERSION_4_5) offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK); val |= HDR_OFST_PKT_SIZE_VALID_FMASK; @@ -562,7 +592,7 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) /* IPA v4.5 adds some most-significant bits to a few fields, * two of which are defined in the HDR (not HDR_EXT) register. */ - if (ipa->version == IPA_VERSION_4_5) { + if (ipa->version >= IPA_VERSION_4_5) { /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */ if (endpoint->data->qmap && !endpoint->toward_ipa) { u32 offset; @@ -776,7 +806,7 @@ static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds) if (!microseconds) return 0; /* Nothing to compute if timer period is 0 */ - if (ipa->version == IPA_VERSION_4_5) + if (ipa->version >= IPA_VERSION_4_5) return hol_block_timer_qtime_val(ipa, microseconds); /* Use 64 bit arithmetic to avoid overflow... */ @@ -795,7 +825,7 @@ static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds) * The best precision is achieved when the base value is as * large as possible. Find the highest set bit in the tick * count, and extract the number of bits in the base field - * such that that high bit is included. + * such that high bit is included. */ high = fls(ticks); /* 1..32 */ width = HWEIGHT32(BASE_VALUE_FMASK); @@ -884,18 +914,17 @@ static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint) static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) { u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id); - u32 seq_type = endpoint->seq_type; u32 val = 0; if (!endpoint->toward_ipa) return; /* Register not valid for RX endpoints */ - /* Sequencer type is made up of four nibbles */ - val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK); - val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK); - /* The second two apply to replicated packets */ - val |= u32_encode_bits((seq_type >> 8) & 0xf, HPS_REP_SEQ_TYPE_FMASK); - val |= u32_encode_bits((seq_type >> 12) & 0xf, DPS_REP_SEQ_TYPE_FMASK); + /* Low-order byte configures primary packet processing */ + val |= u32_encode_bits(endpoint->data->tx.seq_type, SEQ_TYPE_FMASK); + + /* Second byte configures replicated packet processing */ + val |= u32_encode_bits(endpoint->data->tx.seq_rep_type, + SEQ_REP_TYPE_FMASK); iowrite32(val, endpoint->ipa->reg_virt + offset); } @@ -1435,7 +1464,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) if (ret) goto out_suspend_again; - /* Finally, reset and reconfigure the channel again (re-enabling the + /* Finally, reset and reconfigure the channel again (re-enabling * the doorbell engine if appropriate). Sleep for 1 millisecond to * complete the channel reset sequence. Finish by suspending the * channel again (if necessary). @@ -1469,8 +1498,7 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) * is active, we need to handle things specially to recover. * All other cases just need to reset the underlying GSI channel. */ - special = ipa->version == IPA_VERSION_3_5_1 && - !endpoint->toward_ipa && + special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa && endpoint->data->aggregation; if (special && ipa_endpoint_aggr_active(endpoint)) ret = ipa_endpoint_reset_rx_aggr(endpoint); @@ -1490,6 +1518,7 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint) else (void)ipa_endpoint_program_suspend(endpoint, false); ipa_endpoint_init_cfg(endpoint); + ipa_endpoint_init_nat(endpoint); ipa_endpoint_init_hdr(endpoint); ipa_endpoint_init_hdr_ext(endpoint); ipa_endpoint_init_hdr_metadata_mask(endpoint); @@ -1568,8 +1597,10 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) (void)ipa_endpoint_program_suspend(endpoint, true); } - /* IPA v3.5.1 doesn't use channel stop for suspend */ - stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; + /* Starting with IPA v4.0, endpoints are suspended by stopping the + * underlying GSI channel rather than using endpoint suspend mode. + */ + stop_channel = endpoint->ipa->version >= IPA_VERSION_4_0; ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel); if (ret) dev_err(dev, "error %d suspending channel %u\n", ret, @@ -1589,8 +1620,10 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) if (!endpoint->toward_ipa) (void)ipa_endpoint_program_suspend(endpoint, false); - /* IPA v3.5.1 doesn't use channel start for resume */ - start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; + /* Starting with IPA v4.0, the underlying GSI channel must be + * restarted for resume. + */ + start_channel = endpoint->ipa->version >= IPA_VERSION_4_0; ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel); if (ret) dev_err(dev, "error %d resuming channel %u\n", ret, @@ -1738,7 +1771,7 @@ int ipa_endpoint_config(struct ipa *ipa) /* Make sure it's pointing in the right direction */ endpoint = &ipa->endpoint[endpoint_id]; - if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) { + if ((endpoint_id < rx_base) != endpoint->toward_ipa) { dev_err(dev, "endpoint id %u wrong direction\n", endpoint_id); ret = -EINVAL; @@ -1766,7 +1799,6 @@ static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, endpoint->ipa = ipa; endpoint->ee_id = data->ee_id; - endpoint->seq_type = data->endpoint.seq_type; endpoint->channel_id = data->channel_id; endpoint->endpoint_id = data->endpoint_id; endpoint->toward_ipa = data->toward_ipa; @@ -1775,7 +1807,7 @@ static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, ipa->initialized |= BIT(endpoint->endpoint_id); } -void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint) +static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint) { endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id); diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h index 881ecc27bd6e..0a859d10312d 100644 --- a/drivers/net/ipa/ipa_endpoint.h +++ b/drivers/net/ipa/ipa_endpoint.h @@ -25,15 +25,16 @@ struct ipa_gsi_endpoint_data; #define IPA_MTU ETH_DATA_LEN enum ipa_endpoint_name { - IPA_ENDPOINT_AP_MODEM_TX, - IPA_ENDPOINT_MODEM_LAN_TX, - IPA_ENDPOINT_MODEM_COMMAND_TX, IPA_ENDPOINT_AP_COMMAND_TX, - IPA_ENDPOINT_MODEM_AP_TX, IPA_ENDPOINT_AP_LAN_RX, + IPA_ENDPOINT_AP_MODEM_TX, IPA_ENDPOINT_AP_MODEM_RX, - IPA_ENDPOINT_MODEM_AP_RX, + IPA_ENDPOINT_MODEM_COMMAND_TX, + IPA_ENDPOINT_MODEM_LAN_TX, IPA_ENDPOINT_MODEM_LAN_RX, + IPA_ENDPOINT_MODEM_AP_TX, + IPA_ENDPOINT_MODEM_AP_RX, + IPA_ENDPOINT_MODEM_DL_NLO_TX, IPA_ENDPOINT_COUNT, /* Number of names (not an index) */ }; @@ -41,19 +42,30 @@ enum ipa_endpoint_name { /** * struct ipa_endpoint - IPA endpoint information - * @channel_id: EP's GSI channel - * @evt_ring_id: EP's GSI channel event ring + * @ipa: IPA pointer + * @ee_id: Execution environmnent endpoint is associated with + * @channel_id: GSI channel used by the endpoint + * @endpoint_id: IPA endpoint number + * @toward_ipa: Endpoint direction (true = TX, false = RX) + * @data: Endpoint configuration data + * @trans_tre_max: Maximum number of TRE descriptors per transaction + * @evt_ring_id: GSI event ring used by the endpoint + * @netdev: Network device pointer, if endpoint uses one + * @replenish_enabled: Whether receive buffer replenishing is enabled + * @replenish_ready: Number of replenish transactions without doorbell + * @replenish_saved: Replenish requests held while disabled + * @replenish_backlog: Number of buffers needed to fill hardware queue + * @replenish_work: Work item used for repeated replenish failures */ struct ipa_endpoint { struct ipa *ipa; - enum ipa_seq_type seq_type; enum gsi_ee_id ee_id; u32 channel_id; u32 endpoint_id; bool toward_ipa; const struct ipa_endpoint_config_data *data; - u32 trans_tre_max; /* maximum descriptors per transaction */ + u32 trans_tre_max; u32 evt_ring_id; /* Net device this endpoint is associated with, if any */ @@ -75,8 +87,6 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa); int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb); -void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint); - int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint); void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint); diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c index 61dd7605bcb6..c46df0b7c4e5 100644 --- a/drivers/net/ipa/ipa_interrupt.c +++ b/drivers/net/ipa/ipa_interrupt.c @@ -54,12 +54,14 @@ static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id) bool uc_irq = ipa_interrupt_uc(interrupt, irq_id); struct ipa *ipa = interrupt->ipa; u32 mask = BIT(irq_id); + u32 offset; /* For microcontroller interrupts, clear the interrupt right away, * "to avoid clearing unhandled interrupts." */ + offset = ipa_reg_irq_clr_offset(ipa->version); if (uc_irq) - iowrite32(mask, ipa->reg_virt + IPA_REG_IRQ_CLR_OFFSET); + iowrite32(mask, ipa->reg_virt + offset); if (irq_id < IPA_IRQ_COUNT && interrupt->handler[irq_id]) interrupt->handler[irq_id](interrupt->ipa, irq_id); @@ -69,7 +71,7 @@ static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id) * so defer clearing until after the handler has been called. */ if (!uc_irq) - iowrite32(mask, ipa->reg_virt + IPA_REG_IRQ_CLR_OFFSET); + iowrite32(mask, ipa->reg_virt + offset); } /* Process all IPA interrupt types that have been signaled */ @@ -77,13 +79,15 @@ static void ipa_interrupt_process_all(struct ipa_interrupt *interrupt) { struct ipa *ipa = interrupt->ipa; u32 enabled = interrupt->enabled; + u32 offset; u32 mask; /* The status register indicates which conditions are present, * including conditions whose interrupt is not enabled. Handle * only the enabled ones. */ - mask = ioread32(ipa->reg_virt + IPA_REG_IRQ_STTS_OFFSET); + offset = ipa_reg_irq_stts_offset(ipa->version); + mask = ioread32(ipa->reg_virt + offset); while ((mask &= enabled)) { do { u32 irq_id = __ffs(mask); @@ -92,7 +96,7 @@ static void ipa_interrupt_process_all(struct ipa_interrupt *interrupt) ipa_interrupt_process(interrupt, irq_id); } while (mask); - mask = ioread32(ipa->reg_virt + IPA_REG_IRQ_STTS_OFFSET); + mask = ioread32(ipa->reg_virt + offset); } } @@ -115,14 +119,17 @@ static irqreturn_t ipa_isr(int irq, void *dev_id) { struct ipa_interrupt *interrupt = dev_id; struct ipa *ipa = interrupt->ipa; + u32 offset; u32 mask; - mask = ioread32(ipa->reg_virt + IPA_REG_IRQ_STTS_OFFSET); + offset = ipa_reg_irq_stts_offset(ipa->version); + mask = ioread32(ipa->reg_virt + offset); if (mask & interrupt->enabled) return IRQ_WAKE_THREAD; /* Nothing in the mask was supposed to cause an interrupt */ - iowrite32(mask, ipa->reg_virt + IPA_REG_IRQ_CLR_OFFSET); + offset = ipa_reg_irq_clr_offset(ipa->version); + iowrite32(mask, ipa->reg_virt + offset); dev_err(&ipa->pdev->dev, "%s: unexpected interrupt, mask 0x%08x\n", __func__, mask); @@ -136,15 +143,22 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt, { struct ipa *ipa = interrupt->ipa; u32 mask = BIT(endpoint_id); + u32 offset; u32 val; /* assert(mask & ipa->available); */ - val = ioread32(ipa->reg_virt + IPA_REG_IRQ_SUSPEND_EN_OFFSET); + + /* IPA version 3.0 does not support TX_SUSPEND interrupt control */ + if (ipa->version == IPA_VERSION_3_0) + return; + + offset = ipa_reg_irq_suspend_en_offset(ipa->version); + val = ioread32(ipa->reg_virt + offset); if (enable) val |= mask; else val &= ~mask; - iowrite32(val, ipa->reg_virt + IPA_REG_IRQ_SUSPEND_EN_OFFSET); + iowrite32(val, ipa->reg_virt + offset); } /* Enable TX_SUSPEND for an endpoint */ @@ -165,10 +179,18 @@ ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt, u32 endpoint_id) void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt) { struct ipa *ipa = interrupt->ipa; + u32 offset; u32 val; - val = ioread32(ipa->reg_virt + IPA_REG_IRQ_SUSPEND_INFO_OFFSET); - iowrite32(val, ipa->reg_virt + IPA_REG_IRQ_SUSPEND_CLR_OFFSET); + offset = ipa_reg_irq_suspend_info_offset(ipa->version); + val = ioread32(ipa->reg_virt + offset); + + /* SUSPEND interrupt status isn't cleared on IPA version 3.0 */ + if (ipa->version == IPA_VERSION_3_0) + return; + + offset = ipa_reg_irq_suspend_clr_offset(ipa->version); + iowrite32(val, ipa->reg_virt + offset); } /* Simulate arrival of an IPA TX_SUSPEND interrupt */ @@ -182,13 +204,15 @@ void ipa_interrupt_add(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq, ipa_irq_handler_t handler) { struct ipa *ipa = interrupt->ipa; + u32 offset; /* assert(ipa_irq < IPA_IRQ_COUNT); */ interrupt->handler[ipa_irq] = handler; /* Update the IPA interrupt mask to enable it */ interrupt->enabled |= BIT(ipa_irq); - iowrite32(interrupt->enabled, ipa->reg_virt + IPA_REG_IRQ_EN_OFFSET); + offset = ipa_reg_irq_en_offset(ipa->version); + iowrite32(interrupt->enabled, ipa->reg_virt + offset); } /* Remove the handler for an IPA interrupt type */ @@ -196,11 +220,13 @@ void ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq) { struct ipa *ipa = interrupt->ipa; + u32 offset; /* assert(ipa_irq < IPA_IRQ_COUNT); */ /* Update the IPA interrupt mask to disable it */ interrupt->enabled &= ~BIT(ipa_irq); - iowrite32(interrupt->enabled, ipa->reg_virt + IPA_REG_IRQ_EN_OFFSET); + offset = ipa_reg_irq_en_offset(ipa->version); + iowrite32(interrupt->enabled, ipa->reg_virt + offset); interrupt->handler[ipa_irq] = NULL; } @@ -211,6 +237,7 @@ struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa) struct device *dev = &ipa->pdev->dev; struct ipa_interrupt *interrupt; unsigned int irq; + u32 offset; int ret; ret = platform_get_irq_byname(ipa->pdev, "ipa"); @@ -228,7 +255,8 @@ struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa) interrupt->irq = irq; /* Start with all IPA interrupts disabled */ - iowrite32(0, ipa->reg_virt + IPA_REG_IRQ_EN_OFFSET); + offset = ipa_reg_irq_en_offset(ipa->version); + iowrite32(0, ipa->reg_virt + offset); ret = request_threaded_irq(irq, ipa_isr, ipa_isr_thread, IRQF_ONESHOT, "ipa", interrupt); diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h index b5d63a0cd19e..d5c486a6800d 100644 --- a/drivers/net/ipa/ipa_interrupt.h +++ b/drivers/net/ipa/ipa_interrupt.h @@ -24,6 +24,7 @@ typedef void (*ipa_irq_handler_t)(struct ipa *ipa, enum ipa_irq_id irq_id); /** * ipa_interrupt_add() - Register a handler for an IPA interrupt type + * @interrupt: IPA interrupt structure * @irq_id: IPA interrupt type * @handler: Handler function for the interrupt * diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c index 97c1b55405cb..9915603ed10b 100644 --- a/drivers/net/ipa/ipa_main.c +++ b/drivers/net/ipa/ipa_main.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2020 Linaro Ltd. + * Copyright (C) 2018-2021 Linaro Ltd. */ #include <linux/types.h> @@ -22,6 +22,7 @@ #include "ipa_clock.h" #include "ipa_data.h" #include "ipa_endpoint.h" +#include "ipa_resource.h" #include "ipa_cmd.h" #include "ipa_reg.h" #include "ipa_mem.h" @@ -66,7 +67,7 @@ */ /* The name of the GSI firmware file relative to /lib/firmware */ -#define IPA_FWS_PATH "ipa_fws.mdt" +#define IPA_FW_PATH_DEFAULT "ipa_fws.mdt" #define IPA_PAS_ID 15 /* Shift of 19.2 MHz timestamp to achieve lower resolution timestamps */ @@ -146,13 +147,13 @@ int ipa_setup(struct ipa *ipa) if (ret) goto err_endpoint_teardown; - ret = ipa_mem_setup(ipa); + ret = ipa_mem_setup(ipa); /* No matching teardown required */ if (ret) goto err_command_disable; - ret = ipa_table_setup(ipa); + ret = ipa_table_setup(ipa); /* No matching teardown required */ if (ret) - goto err_mem_teardown; + goto err_command_disable; /* Enable the exception handling endpoint, and tell the hardware * to use it by default. @@ -160,7 +161,7 @@ int ipa_setup(struct ipa *ipa) exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]; ret = ipa_endpoint_enable_one(exception_endpoint); if (ret) - goto err_table_teardown; + goto err_command_disable; ipa_endpoint_default_route_set(ipa, exception_endpoint->endpoint_id); @@ -178,10 +179,6 @@ int ipa_setup(struct ipa *ipa) err_default_route_clear: ipa_endpoint_default_route_clear(ipa); ipa_endpoint_disable_one(exception_endpoint); -err_table_teardown: - ipa_table_teardown(ipa); -err_mem_teardown: - ipa_mem_teardown(ipa); err_command_disable: ipa_endpoint_disable_one(command_endpoint); err_endpoint_teardown: @@ -210,8 +207,6 @@ static void ipa_teardown(struct ipa *ipa) ipa_endpoint_default_route_clear(ipa); exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]; ipa_endpoint_disable_one(exception_endpoint); - ipa_table_teardown(ipa); - ipa_mem_teardown(ipa); command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; ipa_endpoint_disable_one(command_endpoint); ipa_endpoint_teardown(ipa); @@ -222,13 +217,13 @@ static void ipa_teardown(struct ipa *ipa) gsi_teardown(&ipa->gsi); } -/* Configure QMB Core Master Port selection */ +/* Configure bus access behavior for IPA components */ static void ipa_hardware_config_comp(struct ipa *ipa) { u32 val; - /* Nothing to configure for IPA v3.5.1 */ - if (ipa->version == IPA_VERSION_3_5_1) + /* Nothing to configure prior to IPA v4.0 */ + if (ipa->version < IPA_VERSION_4_0) return; val = ioread32(ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET); @@ -249,56 +244,59 @@ static void ipa_hardware_config_comp(struct ipa *ipa) iowrite32(val, ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET); } -/* Configure DDR and PCIe max read/write QSB values */ -static void ipa_hardware_config_qsb(struct ipa *ipa) +/* Configure DDR and (possibly) PCIe max read/write QSB values */ +static void +ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data) { - enum ipa_version version = ipa->version; - u32 max0; - u32 max1; + const struct ipa_qsb_data *data0; + const struct ipa_qsb_data *data1; u32 val; - /* QMB_0 represents DDR; QMB_1 represents PCIe */ - val = u32_encode_bits(8, GEN_QMB_0_MAX_WRITES_FMASK); - switch (version) { - case IPA_VERSION_4_2: - max1 = 0; /* PCIe not present */ - break; - case IPA_VERSION_4_5: - max1 = 8; - break; - default: - max1 = 4; - break; - } - val |= u32_encode_bits(max1, GEN_QMB_1_MAX_WRITES_FMASK); + /* assert(data->qsb_count > 0); */ + /* assert(data->qsb_count < 3); */ + + /* QMB 0 represents DDR; QMB 1 (if present) represents PCIe */ + data0 = &data->qsb_data[IPA_QSB_MASTER_DDR]; + if (data->qsb_count > 1) + data1 = &data->qsb_data[IPA_QSB_MASTER_PCIE]; + + /* Max outstanding write accesses for QSB masters */ + val = u32_encode_bits(data0->max_writes, GEN_QMB_0_MAX_WRITES_FMASK); + if (data->qsb_count > 1) + val |= u32_encode_bits(data1->max_writes, + GEN_QMB_1_MAX_WRITES_FMASK); iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_WRITES_OFFSET); - max1 = 12; - switch (version) { - case IPA_VERSION_3_5_1: - max0 = 8; - break; - case IPA_VERSION_4_0: - case IPA_VERSION_4_1: - max0 = 12; - break; - case IPA_VERSION_4_2: - max0 = 12; - max1 = 0; /* PCIe not present */ - break; - case IPA_VERSION_4_5: - max0 = 0; /* No limit (hardware maximum) */ - break; - } - val = u32_encode_bits(max0, GEN_QMB_0_MAX_READS_FMASK); - val |= u32_encode_bits(max1, GEN_QMB_1_MAX_READS_FMASK); - if (version != IPA_VERSION_3_5_1) { - /* GEN_QMB_0_MAX_READS_BEATS is 0 */ - /* GEN_QMB_1_MAX_READS_BEATS is 0 */ + /* Max outstanding read accesses for QSB masters */ + val = u32_encode_bits(data0->max_reads, GEN_QMB_0_MAX_READS_FMASK); + if (ipa->version >= IPA_VERSION_4_0) + val |= u32_encode_bits(data0->max_reads_beats, + GEN_QMB_0_MAX_READS_BEATS_FMASK); + if (data->qsb_count > 1) { + val |= u32_encode_bits(data1->max_reads, + GEN_QMB_1_MAX_READS_FMASK); + if (ipa->version >= IPA_VERSION_4_0) + val |= u32_encode_bits(data1->max_reads_beats, + GEN_QMB_1_MAX_READS_BEATS_FMASK); } iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_READS_OFFSET); } +/* The internal inactivity timer clock is used for the aggregation timer */ +#define TIMER_FREQUENCY 32000 /* 32 KHz inactivity timer clock */ + +/* Compute the value to use in the COUNTER_CFG register AGGR_GRANULARITY + * field to represent the given number of microseconds. The value is one + * less than the number of timer ticks in the requested period. 0 is not + * a valid granularity value. + */ +static u32 ipa_aggr_granularity_val(u32 usec) +{ + /* assert(usec != 0); */ + + return DIV_ROUND_CLOSEST(usec * TIMER_FREQUENCY, USEC_PER_SEC) - 1; +} + /* IPA uses unified Qtime starting at IPA v4.5, implementing various * timestamps and timers independent of the IPA core clock rate. The * Qtimer is based on a 56-bit timestamp incremented at each tick of @@ -385,21 +383,22 @@ static void ipa_hardware_dcd_deconfig(struct ipa *ipa) /** * ipa_hardware_config() - Primitive hardware initialization * @ipa: IPA pointer + * @data: IPA configuration data */ -static void ipa_hardware_config(struct ipa *ipa) +static void ipa_hardware_config(struct ipa *ipa, const struct ipa_data *data) { enum ipa_version version = ipa->version; u32 granularity; u32 val; - /* IPA v4.5 has no backward compatibility register */ + /* IPA v4.5+ has no backward compatibility register */ if (version < IPA_VERSION_4_5) { - val = ipa_reg_bcr_val(version); + val = data->backward_compat; iowrite32(val, ipa->reg_virt + IPA_REG_BCR_OFFSET); } /* Implement some hardware workarounds */ - if (version != IPA_VERSION_3_5_1 && version < IPA_VERSION_4_5) { + if (version >= IPA_VERSION_4_0 && version < IPA_VERSION_4_5) { /* Enable open global clocks (not needed for IPA v4.5) */ val = GLOBAL_FMASK; val |= GLOBAL_2X_CLK_FMASK; @@ -414,7 +413,7 @@ static void ipa_hardware_config(struct ipa *ipa) ipa_hardware_config_comp(ipa); /* Configure system bus limits */ - ipa_hardware_config_qsb(ipa); + ipa_hardware_config_qsb(ipa, data); if (version < IPA_VERSION_4_5) { /* Configure aggregation timer granularity */ @@ -448,151 +447,6 @@ static void ipa_hardware_deconfig(struct ipa *ipa) ipa_hardware_dcd_deconfig(ipa); } -#ifdef IPA_VALIDATION - -static bool ipa_resource_limits_valid(struct ipa *ipa, - const struct ipa_resource_data *data) -{ - u32 group_count; - u32 i; - u32 j; - - /* We program at most 6 source or destination resource group limits */ - BUILD_BUG_ON(IPA_RESOURCE_GROUP_SRC_MAX > 6); - - group_count = ipa_resource_group_src_count(ipa->version); - if (!group_count || group_count > IPA_RESOURCE_GROUP_SRC_MAX) - return false; - - /* Return an error if a non-zero resource limit is specified - * for a resource group not supported by hardware. - */ - for (i = 0; i < data->resource_src_count; i++) { - const struct ipa_resource_src *resource; - - resource = &data->resource_src[i]; - for (j = group_count; j < IPA_RESOURCE_GROUP_SRC_MAX; j++) - if (resource->limits[j].min || resource->limits[j].max) - return false; - } - - group_count = ipa_resource_group_dst_count(ipa->version); - if (!group_count || group_count > IPA_RESOURCE_GROUP_DST_MAX) - return false; - - for (i = 0; i < data->resource_dst_count; i++) { - const struct ipa_resource_dst *resource; - - resource = &data->resource_dst[i]; - for (j = group_count; j < IPA_RESOURCE_GROUP_DST_MAX; j++) - if (resource->limits[j].min || resource->limits[j].max) - return false; - } - - return true; -} - -#else /* !IPA_VALIDATION */ - -static bool ipa_resource_limits_valid(struct ipa *ipa, - const struct ipa_resource_data *data) -{ - return true; -} - -#endif /* !IPA_VALIDATION */ - -static void -ipa_resource_config_common(struct ipa *ipa, u32 offset, - const struct ipa_resource_limits *xlimits, - const struct ipa_resource_limits *ylimits) -{ - u32 val; - - val = u32_encode_bits(xlimits->min, X_MIN_LIM_FMASK); - val |= u32_encode_bits(xlimits->max, X_MAX_LIM_FMASK); - if (ylimits) { - val |= u32_encode_bits(ylimits->min, Y_MIN_LIM_FMASK); - val |= u32_encode_bits(ylimits->max, Y_MAX_LIM_FMASK); - } - - iowrite32(val, ipa->reg_virt + offset); -} - -static void ipa_resource_config_src(struct ipa *ipa, - const struct ipa_resource_src *resource) -{ - u32 group_count = ipa_resource_group_src_count(ipa->version); - const struct ipa_resource_limits *ylimits; - u32 offset; - - offset = IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type); - ylimits = group_count == 1 ? NULL : &resource->limits[1]; - ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits); - - if (group_count < 2) - return; - - offset = IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type); - ylimits = group_count == 3 ? NULL : &resource->limits[3]; - ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits); - - if (group_count < 4) - return; - - offset = IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource->type); - ylimits = group_count == 5 ? NULL : &resource->limits[5]; - ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits); -} - -static void ipa_resource_config_dst(struct ipa *ipa, - const struct ipa_resource_dst *resource) -{ - u32 group_count = ipa_resource_group_dst_count(ipa->version); - const struct ipa_resource_limits *ylimits; - u32 offset; - - offset = IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type); - ylimits = group_count == 1 ? NULL : &resource->limits[1]; - ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits); - - if (group_count < 2) - return; - - offset = IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type); - ylimits = group_count == 3 ? NULL : &resource->limits[3]; - ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits); - - if (group_count < 4) - return; - - offset = IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource->type); - ylimits = group_count == 5 ? NULL : &resource->limits[5]; - ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits); -} - -static int -ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data) -{ - u32 i; - - if (!ipa_resource_limits_valid(ipa, data)) - return -EINVAL; - - for (i = 0; i < data->resource_src_count; i++) - ipa_resource_config_src(ipa, &data->resource_src[i]); - - for (i = 0; i < data->resource_dst_count; i++) - ipa_resource_config_dst(ipa, &data->resource_dst[i]); - - return 0; -} - -static void ipa_resource_deconfig(struct ipa *ipa) -{ - /* Nothing to do */ -} - /** * ipa_config() - Configure IPA hardware * @ipa: IPA pointer @@ -610,7 +464,7 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data) */ ipa_clock_get(ipa); - ipa_hardware_config(ipa); + ipa_hardware_config(ipa, data); ret = ipa_endpoint_config(ipa); if (ret) @@ -620,23 +474,20 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data) if (ret) goto err_endpoint_deconfig; - ipa_table_config(ipa); + ipa_table_config(ipa); /* No deconfig required */ - /* Assign resource limitation to each group */ + /* Assign resource limitation to each group; no deconfig required */ ret = ipa_resource_config(ipa, data->resource_data); if (ret) - goto err_table_deconfig; + goto err_mem_deconfig; ret = ipa_modem_config(ipa); if (ret) - goto err_resource_deconfig; + goto err_mem_deconfig; return 0; -err_resource_deconfig: - ipa_resource_deconfig(ipa); -err_table_deconfig: - ipa_table_deconfig(ipa); +err_mem_deconfig: ipa_mem_deconfig(ipa); err_endpoint_deconfig: ipa_endpoint_deconfig(ipa); @@ -654,8 +505,6 @@ err_hardware_deconfig: static void ipa_deconfig(struct ipa *ipa) { ipa_modem_deconfig(ipa); - ipa_resource_deconfig(ipa); - ipa_table_deconfig(ipa); ipa_mem_deconfig(ipa); ipa_endpoint_deconfig(ipa); ipa_hardware_deconfig(ipa); @@ -668,6 +517,7 @@ static int ipa_firmware_load(struct device *dev) struct device_node *node; struct resource res; phys_addr_t phys; + const char *path; ssize_t size; void *virt; int ret; @@ -685,9 +535,17 @@ static int ipa_firmware_load(struct device *dev) return ret; } - ret = request_firmware(&fw, IPA_FWS_PATH, dev); + /* Use name from DTB if specified; use default for *any* error */ + ret = of_property_read_string(dev->of_node, "firmware-name", &path); if (ret) { - dev_err(dev, "error %d requesting \"%s\"\n", ret, IPA_FWS_PATH); + dev_dbg(dev, "error %d getting \"firmware-name\" resource\n", + ret); + path = IPA_FW_PATH_DEFAULT; + } + + ret = request_firmware(&fw, path, dev); + if (ret) { + dev_err(dev, "error %d requesting \"%s\"\n", ret, path); return ret; } @@ -700,13 +558,11 @@ static int ipa_firmware_load(struct device *dev) goto out_release_firmware; } - ret = qcom_mdt_load(dev, fw, IPA_FWS_PATH, IPA_PAS_ID, - virt, phys, size, NULL); + ret = qcom_mdt_load(dev, fw, path, IPA_PAS_ID, virt, phys, size, NULL); if (ret) - dev_err(dev, "error %d loading \"%s\"\n", ret, IPA_FWS_PATH); + dev_err(dev, "error %d loading \"%s\"\n", ret, path); else if ((ret = qcom_scm_pas_auth_and_reset(IPA_PAS_ID))) - dev_err(dev, "error %d authenticating \"%s\"\n", ret, - IPA_FWS_PATH); + dev_err(dev, "error %d authenticating \"%s\"\n", ret, path); memunmap(virt); out_release_firmware: @@ -718,11 +574,23 @@ out_release_firmware: static const struct of_device_id ipa_match[] = { { .compatible = "qcom,sdm845-ipa", - .data = &ipa_data_sdm845, + .data = &ipa_data_v3_5_1, }, { .compatible = "qcom,sc7180-ipa", - .data = &ipa_data_sc7180, + .data = &ipa_data_v4_2, + }, + { + .compatible = "qcom,sdx55-ipa", + .data = &ipa_data_v4_5, + }, + { + .compatible = "qcom,sm8350-ipa", + .data = &ipa_data_v4_9, + }, + { + .compatible = "qcom,sc7280-ipa", + .data = &ipa_data_v4_11, }, { }, }; @@ -735,8 +603,14 @@ MODULE_DEVICE_TABLE(of, ipa_match); static void ipa_validate_build(void) { #ifdef IPA_VALIDATE - /* We assume we're working on 64-bit hardware */ - BUILD_BUG_ON(!IS_ENABLED(CONFIG_64BIT)); + /* At one time we assumed a 64-bit build, allowing some do_div() + * calls to be replaced by simple division or modulo operations. + * We currently only perform divide and modulo operations on u32, + * u16, or size_t objects, and of those only size_t has any chance + * of being a 64-bit value. (It should be guaranteed 32 bits wide + * on a 32-bit build, but there is no harm in verifying that.) + */ + BUILD_BUG_ON(!IS_ENABLED(CONFIG_64BIT) && sizeof(size_t) != 4); /* Code assumes the EE ID for the AP is 0 (zeroed structure field) */ BUILD_BUG_ON(GSI_EE_AP != 0); diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c index f25029b9ec85..c5c3b1b7e67d 100644 --- a/drivers/net/ipa/ipa_mem.c +++ b/drivers/net/ipa/ipa_mem.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2020 Linaro Ltd. + * Copyright (C) 2019-2021 Linaro Ltd. */ #include <linux/types.h> @@ -53,6 +53,8 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem) * The AP informs the modem where its portions of memory are located * in a QMI exchange that occurs at modem startup. * + * There is no need for a matching ipa_mem_teardown() function. + * * Return: 0 if successful, or a negative error code */ int ipa_mem_setup(struct ipa *ipa) @@ -61,6 +63,7 @@ int ipa_mem_setup(struct ipa *ipa) struct gsi_trans *trans; u32 offset; u16 size; + u32 val; /* Get a transaction to define the header memory region and to zero * the processing context and modem memory regions. @@ -89,17 +92,13 @@ int ipa_mem_setup(struct ipa *ipa) gsi_trans_commit_wait(trans); /* Tell the hardware where the processing context area is located */ - iowrite32(ipa->mem_offset + ipa->mem[IPA_MEM_MODEM_PROC_CTX].offset, - ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET); + offset = ipa->mem_offset + ipa->mem[IPA_MEM_MODEM_PROC_CTX].offset; + val = proc_cntxt_base_addr_encoded(ipa->version, offset); + iowrite32(val, ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_OFFSET); return 0; } -void ipa_mem_teardown(struct ipa *ipa) -{ - /* Nothing to do */ -} - #ifdef IPA_VALIDATE static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id) diff --git a/drivers/net/ipa/ipa_mem.h b/drivers/net/ipa/ipa_mem.h index f99180f84f0d..a422aec69e5d 100644 --- a/drivers/net/ipa/ipa_mem.h +++ b/drivers/net/ipa/ipa_mem.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2020 Linaro Ltd. + * Copyright (C) 2019-2021 Linaro Ltd. */ #ifndef _IPA_MEM_H_ #define _IPA_MEM_H_ @@ -28,6 +28,7 @@ struct ipa_mem_data; * The set of memory regions is defined in configuration data. They are * subject to these constraints: * - a zero offset and zero size represents and undefined region + * - a region's size does not include space for its "canary" values * - a region's offset is defined to be *past* all "canary" values * - offset must be large enough to account for all canaries * - a region's size may be zero, but may still have canaries @@ -56,11 +57,18 @@ enum ipa_mem_id { IPA_MEM_AP_HEADER, /* 0 canaries */ IPA_MEM_MODEM_PROC_CTX, /* 2 canaries */ IPA_MEM_AP_PROC_CTX, /* 0 canaries */ - IPA_MEM_PDN_CONFIG, /* 2 canaries (IPA v4.0 and above) */ - IPA_MEM_STATS_QUOTA, /* 2 canaries (IPA v4.0 and above) */ + IPA_MEM_NAT_TABLE, /* 4 canaries (IPA v4.5 and above) */ + IPA_MEM_PDN_CONFIG, /* 0/2 canaries (IPA v4.0 and above) */ + IPA_MEM_STATS_QUOTA_MODEM, /* 2/4 canaries (IPA v4.0 and above) */ + IPA_MEM_STATS_QUOTA_AP, /* 0 canaries (IPA v4.0 and above) */ IPA_MEM_STATS_TETHERING, /* 0 canaries (IPA v4.0 and above) */ + IPA_MEM_STATS_V4_FILTER, /* 0 canaries (IPA v4.0-v4.2) */ + IPA_MEM_STATS_V6_FILTER, /* 0 canaries (IPA v4.0-v4.2) */ + IPA_MEM_STATS_V4_ROUTE, /* 0 canaries (IPA v4.0-v4.2) */ + IPA_MEM_STATS_V6_ROUTE, /* 0 canaries (IPA v4.0-v4.2) */ + IPA_MEM_STATS_FILTER_ROUTE, /* 0 canaries (IPA v4.5 and above) */ IPA_MEM_STATS_DROP, /* 0 canaries (IPA v4.0 and above) */ - IPA_MEM_MODEM, /* 0 canaries */ + IPA_MEM_MODEM, /* 0/2 canaries */ IPA_MEM_UC_EVENT_RING, /* 1 canary */ IPA_MEM_COUNT, /* Number of regions (not an index) */ }; @@ -69,7 +77,7 @@ enum ipa_mem_id { * struct ipa_mem - IPA local memory region description * @offset: offset in IPA memory space to base of the region * @size: size in bytes base of the region - * @canary_count # 32-bit "canary" values that precede region + * @canary_count: Number of 32-bit "canary" values that precede region */ struct ipa_mem { u32 offset; @@ -80,8 +88,7 @@ struct ipa_mem { int ipa_mem_config(struct ipa *ipa); void ipa_mem_deconfig(struct ipa *ipa); -int ipa_mem_setup(struct ipa *ipa); -void ipa_mem_teardown(struct ipa *ipa); +int ipa_mem_setup(struct ipa *ipa); /* No ipa_mem_teardown() needed */ int ipa_mem_zero_modem(struct ipa *ipa); diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c index 9b08eb823984..af9aedbde717 100644 --- a/drivers/net/ipa/ipa_modem.c +++ b/drivers/net/ipa/ipa_modem.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2020 Linaro Ltd. + * Copyright (C) 2018-2021 Linaro Ltd. */ #include <linux/errno.h> @@ -213,18 +213,18 @@ int ipa_modem_start(struct ipa *ipa) goto out_set_state; } - ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev; - ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev; - SET_NETDEV_DEV(netdev, &ipa->pdev->dev); priv = netdev_priv(netdev); priv->ipa = ipa; ret = register_netdev(netdev); - if (ret) - free_netdev(netdev); - else + if (!ret) { ipa->modem_netdev = netdev; + ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev; + ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev; + } else { + free_netdev(netdev); + } out_set_state: if (ret) @@ -240,7 +240,6 @@ int ipa_modem_stop(struct ipa *ipa) { struct net_device *netdev = ipa->modem_netdev; enum ipa_modem_state state; - int ret; /* Only attempt to stop the modem if it's running */ state = atomic_cmpxchg(&ipa->modem_state, IPA_MODEM_STATE_RUNNING, @@ -257,27 +256,20 @@ int ipa_modem_stop(struct ipa *ipa) /* Prevent the modem from triggering a call to ipa_setup() */ ipa_smp2p_disable(ipa); + /* Stop the queue and disable the endpoints if it's open */ if (netdev) { - /* Stop the queue and disable the endpoints if it's open */ - ret = ipa_stop(netdev); - if (ret) - goto out_set_state; - + (void)ipa_stop(netdev); + ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL; + ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL; ipa->modem_netdev = NULL; unregister_netdev(netdev); free_netdev(netdev); - } else { - ret = 0; } -out_set_state: - if (ret) - atomic_set(&ipa->modem_state, IPA_MODEM_STATE_RUNNING); - else - atomic_set(&ipa->modem_state, IPA_MODEM_STATE_STOPPED); + atomic_set(&ipa->modem_state, IPA_MODEM_STATE_STOPPED); smp_mb__after_atomic(); - return ret; + return 0; } /* Treat a "clean" modem stop the same as a crash */ diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c index e594bf3b600f..593665efbcf9 100644 --- a/drivers/net/ipa/ipa_qmi.c +++ b/drivers/net/ipa/ipa_qmi.c @@ -308,12 +308,12 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi) mem = &ipa->mem[IPA_MEM_V4_ROUTE]; req.v4_route_tbl_info_valid = 1; req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset; - req.v4_route_tbl_info.count = mem->size / IPA_TABLE_ENTRY_SIZE; + req.v4_route_tbl_info.count = mem->size / sizeof(__le64); mem = &ipa->mem[IPA_MEM_V6_ROUTE]; req.v6_route_tbl_info_valid = 1; req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset; - req.v6_route_tbl_info.count = mem->size / IPA_TABLE_ENTRY_SIZE; + req.v6_route_tbl_info.count = mem->size / sizeof(__le64); mem = &ipa->mem[IPA_MEM_V4_FILTER]; req.v4_filter_tbl_start_valid = 1; @@ -352,8 +352,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi) req.v4_hash_route_tbl_info_valid = 1; req.v4_hash_route_tbl_info.start = ipa->mem_offset + mem->offset; - req.v4_hash_route_tbl_info.count = - mem->size / IPA_TABLE_ENTRY_SIZE; + req.v4_hash_route_tbl_info.count = mem->size / sizeof(__le64); } mem = &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]; @@ -361,8 +360,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi) req.v6_hash_route_tbl_info_valid = 1; req.v6_hash_route_tbl_info.start = ipa->mem_offset + mem->offset; - req.v6_hash_route_tbl_info.count = - mem->size / IPA_TABLE_ENTRY_SIZE; + req.v6_hash_route_tbl_info.count = mem->size / sizeof(__le64); } mem = &ipa->mem[IPA_MEM_V4_FILTER_HASHED]; @@ -379,8 +377,8 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi) /* None of the stats fields are valid (IPA v4.0 and above) */ - if (ipa->version != IPA_VERSION_3_5_1) { - mem = &ipa->mem[IPA_MEM_STATS_QUOTA]; + if (ipa->version >= IPA_VERSION_4_0) { + mem = &ipa->mem[IPA_MEM_STATS_QUOTA_MODEM]; if (mem->size) { req.hw_stats_quota_base_addr_valid = 1; req.hw_stats_quota_base_addr = diff --git a/drivers/net/ipa/ipa_qmi.h b/drivers/net/ipa/ipa_qmi.h index 3993687593d0..b6f2055d35a6 100644 --- a/drivers/net/ipa/ipa_qmi.h +++ b/drivers/net/ipa/ipa_qmi.h @@ -13,11 +13,15 @@ struct ipa; /** * struct ipa_qmi - QMI state associated with an IPA - * @client_handle - used to send an QMI requests to the modem - * @server_handle - used to handle QMI requests from the modem - * @initialized - whether QMI initialization has completed - * @indication_register_received - tracks modem request receipt - * @init_driver_response_received - tracks modem response receipt + * @client_handle: Used to send an QMI requests to the modem + * @server_handle: Used to handle QMI requests from the modem + * @modem_sq: QMAP socket address for the modem QMI server + * @init_driver_work: Work structure used for INIT_DRIVER message handling + * @initial_boot: True if first boot has not yet completed + * @uc_ready: True once DRIVER_INIT_COMPLETE request received + * @modem_ready: True when INIT_DRIVER response received + * @indication_requested: True when INDICATION_REGISTER request received + * @indication_sent: True when INIT_COMPLETE indication sent */ struct ipa_qmi { struct qmi_handle client_handle; diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c index 73413371e3d3..6838e8065072 100644 --- a/drivers/net/ipa/ipa_qmi_msg.c +++ b/drivers/net/ipa/ipa_qmi_msg.c @@ -56,7 +56,7 @@ struct qmi_elem_info ipa_indication_register_req_ei[] = { .elem_size = sizeof_field(struct ipa_indication_register_req, ipa_mhi_ready_ind_valid), - .tlv_type = 0x11, + .tlv_type = 0x12, .offset = offsetof(struct ipa_indication_register_req, ipa_mhi_ready_ind_valid), }, @@ -66,11 +66,51 @@ struct qmi_elem_info ipa_indication_register_req_ei[] = { .elem_size = sizeof_field(struct ipa_indication_register_req, ipa_mhi_ready_ind), - .tlv_type = 0x11, + .tlv_type = 0x12, .offset = offsetof(struct ipa_indication_register_req, ipa_mhi_ready_ind), }, { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = + sizeof_field(struct ipa_indication_register_req, + endpoint_desc_ind_valid), + .tlv_type = 0x13, + .offset = offsetof(struct ipa_indication_register_req, + endpoint_desc_ind_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = + sizeof_field(struct ipa_indication_register_req, + endpoint_desc_ind), + .tlv_type = 0x13, + .offset = offsetof(struct ipa_indication_register_req, + endpoint_desc_ind), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = + sizeof_field(struct ipa_indication_register_req, + bw_change_ind_valid), + .tlv_type = 0x14, + .offset = offsetof(struct ipa_indication_register_req, + bw_change_ind_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = + sizeof_field(struct ipa_indication_register_req, + bw_change_ind), + .tlv_type = 0x14, + .offset = offsetof(struct ipa_indication_register_req, + bw_change_ind), + }, + { .data_type = QMI_EOTI, }, }; @@ -530,7 +570,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { hw_stats_quota_base_addr_valid), }, { - .data_type = QMI_SIGNED_4_BYTE_ENUM, + .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof_field(struct ipa_init_modem_driver_req, @@ -545,17 +585,17 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { .elem_size = sizeof_field(struct ipa_init_modem_driver_req, hw_stats_quota_size_valid), - .tlv_type = 0x1f, + .tlv_type = 0x20, .offset = offsetof(struct ipa_init_modem_driver_req, hw_stats_quota_size_valid), }, { - .data_type = QMI_SIGNED_4_BYTE_ENUM, + .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof_field(struct ipa_init_modem_driver_req, hw_stats_quota_size), - .tlv_type = 0x1f, + .tlv_type = 0x20, .offset = offsetof(struct ipa_init_modem_driver_req, hw_stats_quota_size), }, @@ -564,18 +604,38 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { .elem_len = 1, .elem_size = sizeof_field(struct ipa_init_modem_driver_req, + hw_stats_drop_base_addr_valid), + .tlv_type = 0x21, + .offset = offsetof(struct ipa_init_modem_driver_req, + hw_stats_drop_base_addr_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = + sizeof_field(struct ipa_init_modem_driver_req, + hw_stats_drop_base_addr), + .tlv_type = 0x21, + .offset = offsetof(struct ipa_init_modem_driver_req, + hw_stats_drop_base_addr), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = + sizeof_field(struct ipa_init_modem_driver_req, hw_stats_drop_size_valid), - .tlv_type = 0x1f, + .tlv_type = 0x22, .offset = offsetof(struct ipa_init_modem_driver_req, hw_stats_drop_size_valid), }, { - .data_type = QMI_SIGNED_4_BYTE_ENUM, + .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof_field(struct ipa_init_modem_driver_req, hw_stats_drop_size), - .tlv_type = 0x1f, + .tlv_type = 0x22, .offset = offsetof(struct ipa_init_modem_driver_req, hw_stats_drop_size), }, diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h index 12b6621f4b0e..3233d145fd87 100644 --- a/drivers/net/ipa/ipa_qmi_msg.h +++ b/drivers/net/ipa/ipa_qmi_msg.h @@ -24,7 +24,7 @@ * information for each field. The qmi_send_*() interfaces require * the message size to be provided. */ -#define IPA_QMI_INDICATION_REGISTER_REQ_SZ 12 /* -> server handle */ +#define IPA_QMI_INDICATION_REGISTER_REQ_SZ 20 /* -> server handle */ #define IPA_QMI_INDICATION_REGISTER_RSP_SZ 7 /* <- server handle */ #define IPA_QMI_INIT_DRIVER_REQ_SZ 162 /* client handle -> */ #define IPA_QMI_INIT_DRIVER_RSP_SZ 25 /* client handle <- */ @@ -44,6 +44,10 @@ struct ipa_indication_register_req { u8 data_usage_quota_reached; u8 ipa_mhi_ready_ind_valid; u8 ipa_mhi_ready_ind; + u8 endpoint_desc_ind_valid; + u8 endpoint_desc_ind; + u8 bw_change_ind_valid; + u8 bw_change_ind; }; /* The response to a IPA_QMI_INDICATION_REGISTER request consists only of diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h index 732e691e9aa6..286ea9634c49 100644 --- a/drivers/net/ipa/ipa_reg.h +++ b/drivers/net/ipa/ipa_reg.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2020 Linaro Ltd. + * Copyright (C) 2018-2021 Linaro Ltd. */ #ifndef _IPA_REG_H_ #define _IPA_REG_H_ @@ -66,14 +66,16 @@ struct ipa; */ #define IPA_REG_COMP_CFG_OFFSET 0x0000003c -/* The next field is not supported for IPA v4.1 */ +/* The next field is not supported for IPA v4.0+, not present for IPA v4.5+ */ #define ENABLE_FMASK GENMASK(0, 0) +/* The next field is present for IPA v4.7+ */ +#define RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS_FMASK GENMASK(0, 0) #define GSI_SNOC_BYPASS_DIS_FMASK GENMASK(1, 1) #define GEN_QMB_0_SNOC_BYPASS_DIS_FMASK GENMASK(2, 2) #define GEN_QMB_1_SNOC_BYPASS_DIS_FMASK GENMASK(3, 3) -/* The next field is not present for IPA v4.5 */ +/* The next field is not present for IPA v4.5+ */ #define IPA_DCMP_FAST_CLK_EN_FMASK GENMASK(4, 4) -/* The remaining fields are not present for IPA v3.5.1 */ +/* The next twelve fields are present for IPA v4.0+ */ #define IPA_QMB_SELECT_CONS_EN_FMASK GENMASK(5, 5) #define IPA_QMB_SELECT_PROD_EN_FMASK GENMASK(6, 6) #define GSI_MULTI_INORDER_RD_DIS_FMASK GENMASK(7, 7) @@ -86,9 +88,41 @@ struct ipa; #define GSI_SNOC_CNOC_LOOP_PROT_DISABLE_FMASK GENMASK(14, 14) #define GSI_MULTI_AXI_MASTERS_DIS_FMASK GENMASK(15, 15) #define IPA_QMB_SELECT_GLOBAL_EN_FMASK GENMASK(16, 16) -#define IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_FMASK GENMASK(20, 17) -/* The next field is present for IPA v4.5 */ -#define IPA_FULL_FLUSH_WAIT_RSC_CLOSE_EN_FMASK GENMASK(21, 21) +/* The next five fields are present for IPA v4.9+ */ +#define QMB_RAM_RD_CACHE_DISABLE_FMASK GENMASK(19, 19) +#define GENQMB_AOOOWR_FMASK GENMASK(20, 20) +#define IF_OUT_OF_BUF_STOP_RESET_MASK_EN_FMASK GENMASK(21, 21) +#define GEN_QMB_1_DYNAMIC_ASIZE_FMASK GENMASK(30, 30) +#define GEN_QMB_0_DYNAMIC_ASIZE_FMASK GENMASK(31, 31) + +/* Encoded value for COMP_CFG register ATOMIC_FETCHER_ARB_LOCK_DIS field */ +static inline u32 arbitration_lock_disable_encoded(enum ipa_version version, + u32 mask) +{ + /* assert(version >= IPA_VERSION_4_0); */ + + if (version < IPA_VERSION_4_9) + return u32_encode_bits(mask, GENMASK(20, 17)); + + if (version == IPA_VERSION_4_9) + return u32_encode_bits(mask, GENMASK(24, 22)); + + return u32_encode_bits(mask, GENMASK(23, 22)); +} + +/* Encoded value for COMP_CFG register FULL_FLUSH_WAIT_RS_CLOSURE_EN field */ +static inline u32 full_flush_rsc_closure_en_encoded(enum ipa_version version, + bool enable) +{ + u32 val = enable ? 1 : 0; + + /* assert(version >= IPA_VERSION_4_5); */ + + if (version == IPA_VERSION_4_5 || version == IPA_VERSION_4_7) + return u32_encode_bits(val, GENMASK(21, 21)); + + return u32_encode_bits(val, GENMASK(17, 17)); +} #define IPA_REG_CLKON_CFG_OFFSET 0x00000044 #define RX_FMASK GENMASK(0, 0) @@ -108,13 +142,15 @@ struct ipa; #define ACK_MNGR_FMASK GENMASK(14, 14) #define D_DCPH_FMASK GENMASK(15, 15) #define H_DCPH_FMASK GENMASK(16, 16) -/* The next field is not present for IPA v4.5 */ +/* The next field is not present for IPA v4.5+ */ #define DCMP_FMASK GENMASK(17, 17) +/* The next three fields are present for IPA v3.5+ */ #define NTF_TX_CMDQS_FMASK GENMASK(18, 18) #define TX_0_FMASK GENMASK(19, 19) #define TX_1_FMASK GENMASK(20, 20) +/* The next field is present for IPA v3.5.1+ */ #define FNR_FMASK GENMASK(21, 21) -/* The remaining fields are not present for IPA v3.5.1 */ +/* The next eight fields are present for IPA v4.0+ */ #define QSB2AXI_CMDQ_L_FMASK GENMASK(22, 22) #define AGGR_WRAPPER_FMASK GENMASK(23, 23) #define RAM_SLAVEWAY_FMASK GENMASK(24, 24) @@ -123,8 +159,10 @@ struct ipa; #define GSI_IF_FMASK GENMASK(27, 27) #define GLOBAL_FMASK GENMASK(28, 28) #define GLOBAL_2X_CLK_FMASK GENMASK(29, 29) -/* The next field is present for IPA v4.5 */ +/* The next field is present for IPA v4.5+ */ #define DPL_FIFO_FMASK GENMASK(30, 30) +/* The next field is present for IPA v4.7+ */ +#define DRBIP_FMASK GENMASK(31, 31) #define IPA_REG_ROUTE_OFFSET 0x00000048 #define ROUTE_DIS_FMASK GENMASK(0, 0) @@ -145,13 +183,13 @@ struct ipa; #define IPA_REG_QSB_MAX_READS_OFFSET 0x00000078 #define GEN_QMB_0_MAX_READS_FMASK GENMASK(3, 0) #define GEN_QMB_1_MAX_READS_FMASK GENMASK(7, 4) -/* The next two fields are not present for IPA v3.5.1 */ +/* The next two fields are present for IPA v4.0+ */ #define GEN_QMB_0_MAX_READS_BEATS_FMASK GENMASK(23, 16) #define GEN_QMB_1_MAX_READS_BEATS_FMASK GENMASK(31, 24) static inline u32 ipa_reg_filt_rout_hash_en_offset(enum ipa_version version) { - if (version == IPA_VERSION_3_5_1) + if (version < IPA_VERSION_4_0) return 0x000008c; return 0x0000148; @@ -159,7 +197,7 @@ static inline u32 ipa_reg_filt_rout_hash_en_offset(enum ipa_version version) static inline u32 ipa_reg_filt_rout_hash_flush_offset(enum ipa_version version) { - if (version == IPA_VERSION_3_5_1) + if (version < IPA_VERSION_4_0) return 0x0000090; return 0x000014c; @@ -174,96 +212,79 @@ static inline u32 ipa_reg_filt_rout_hash_flush_offset(enum ipa_version version) /* ipa->available defines the valid bits in the STATE_AGGR_ACTIVE register */ static inline u32 ipa_reg_state_aggr_active_offset(enum ipa_version version) { - if (version == IPA_VERSION_3_5_1) + if (version < IPA_VERSION_4_0) return 0x0000010c; return 0x000000b4; } -/* The next register is not present for IPA v4.5 */ +/* The next register is not present for IPA v4.5+ */ #define IPA_REG_BCR_OFFSET 0x000001d0 -/* The next two fields are not present for IPA v4.2 */ +/* The next two fields are not present for IPA v4.2+ */ #define BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK GENMASK(0, 0) #define BCR_TX_NOT_USING_BRESP_FMASK GENMASK(1, 1) -/* The next field is invalid for IPA v4.1 */ +/* The next field is invalid for IPA v4.0+ */ #define BCR_TX_SUSPEND_IRQ_ASSERT_ONCE_FMASK GENMASK(2, 2) -/* The next two fields are not present for IPA v4.2 */ +/* The next two fields are not present for IPA v4.2+ */ #define BCR_SUSPEND_L2_IRQ_FMASK GENMASK(3, 3) #define BCR_HOLB_DROP_L2_IRQ_FMASK GENMASK(4, 4) +/* The next five fields are present for IPA v3.5+ */ #define BCR_DUAL_TX_FMASK GENMASK(5, 5) #define BCR_ENABLE_FILTER_DATA_CACHE_FMASK GENMASK(6, 6) #define BCR_NOTIF_PRIORITY_OVER_ZLT_FMASK GENMASK(7, 7) #define BCR_FILTER_PREFETCH_EN_FMASK GENMASK(8, 8) #define BCR_ROUTER_PREFETCH_EN_FMASK GENMASK(9, 9) -/* Backward compatibility register value to use for each version */ -static inline u32 ipa_reg_bcr_val(enum ipa_version version) +/* The value of the next register must be a multiple of 8 (bottom 3 bits 0) */ +#define IPA_REG_LOCAL_PKT_PROC_CNTXT_OFFSET 0x000001e8 + +/* Encoded value for LOCAL_PKT_PROC_CNTXT register BASE_ADDR field */ +static inline u32 proc_cntxt_base_addr_encoded(enum ipa_version version, + u32 addr) { - if (version == IPA_VERSION_3_5_1) - return BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK | - BCR_TX_NOT_USING_BRESP_FMASK | - BCR_SUSPEND_L2_IRQ_FMASK | - BCR_HOLB_DROP_L2_IRQ_FMASK | - BCR_DUAL_TX_FMASK; - - if (version == IPA_VERSION_4_0 || version == IPA_VERSION_4_1) - return BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK | - BCR_SUSPEND_L2_IRQ_FMASK | - BCR_HOLB_DROP_L2_IRQ_FMASK | - BCR_DUAL_TX_FMASK; - - /* assert(version != IPA_VERSION_4_5); */ - - return 0x00000000; -} + if (version < IPA_VERSION_4_5) + return u32_encode_bits(addr, GENMASK(16, 0)); -/* The value of the next register must be a multiple of 8 */ -#define IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET 0x000001e8 + return u32_encode_bits(addr, GENMASK(17, 0)); +} /* ipa->available defines the valid bits in the AGGR_FORCE_CLOSE register */ #define IPA_REG_AGGR_FORCE_CLOSE_OFFSET 0x000001ec -/* The next register is not present for IPA v4.5 */ +/* The next register is not present for IPA v4.5+ */ #define IPA_REG_COUNTER_CFG_OFFSET 0x000001f0 +/* The next field is not present for IPA v3.5+ */ +#define EOT_COAL_GRANULARITY GENMASK(3, 0) #define AGGR_GRANULARITY_FMASK GENMASK(8, 4) -/* The internal inactivity timer clock is used for the aggregation timer */ -#define TIMER_FREQUENCY 32000 /* 32 KHz inactivity timer clock */ - -/* Compute the value to use in the AGGR_GRANULARITY field representing the - * given number of microseconds. The value is one less than the number of - * timer ticks in the requested period. 0 not a valid granularity value. - */ -static inline u32 ipa_aggr_granularity_val(u32 usec) -{ - return DIV_ROUND_CLOSEST(usec * TIMER_FREQUENCY, USEC_PER_SEC) - 1; -} - -/* The next register is not present for IPA v4.5 */ +/* The next register is present for IPA v3.5+ */ #define IPA_REG_TX_CFG_OFFSET 0x000001fc -/* The first three fields are present for IPA v3.5.1 only */ +/* The next three fields are not present for IPA v4.0+ */ #define TX0_PREFETCH_DISABLE_FMASK GENMASK(0, 0) #define TX1_PREFETCH_DISABLE_FMASK GENMASK(1, 1) #define PREFETCH_ALMOST_EMPTY_SIZE_FMASK GENMASK(4, 2) -/* The next six fields are present for IPA v4.0 and above */ +/* The next six fields are present for IPA v4.0+ */ #define PREFETCH_ALMOST_EMPTY_SIZE_TX0_FMASK GENMASK(5, 2) #define DMAW_SCND_OUTSD_PRED_THRESHOLD_FMASK GENMASK(9, 6) #define DMAW_SCND_OUTSD_PRED_EN_FMASK GENMASK(10, 10) #define DMAW_MAX_BEATS_256_DIS_FMASK GENMASK(11, 11) #define PA_MASK_EN_FMASK GENMASK(12, 12) #define PREFETCH_ALMOST_EMPTY_SIZE_TX1_FMASK GENMASK(16, 13) -/* The next field is present for IPA v4.5 */ +/* The next field is present for IPA v4.5+ */ #define DUAL_TX_ENABLE_FMASK GENMASK(17, 17) -/* The next two fields are present for IPA v4.2 only */ +/* The next field is present for IPA v4.2+, but not IPA v4.5 */ #define SSPND_PA_NO_START_STATE_FMASK GENMASK(18, 18) +/* The next field is present for IPA v4.2 only */ #define SSPND_PA_NO_BQ_STATE_FMASK GENMASK(19, 19) +/* The next register is present for IPA v3.5+ */ #define IPA_REG_FLAVOR_0_OFFSET 0x00000210 #define IPA_MAX_PIPES_FMASK GENMASK(3, 0) #define IPA_MAX_CONS_PIPES_FMASK GENMASK(12, 8) #define IPA_MAX_PROD_PIPES_FMASK GENMASK(20, 16) #define IPA_PROD_LOWEST_FMASK GENMASK(27, 24) +/* The next register is present for IPA v3.5+ */ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version) { if (version >= IPA_VERSION_4_2) @@ -275,19 +296,19 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version) #define ENTER_IDLE_DEBOUNCE_THRESH_FMASK GENMASK(15, 0) #define CONST_NON_IDLE_ENABLE_FMASK GENMASK(16, 16) -/* The next register is present for IPA v4.5 */ +/* The next register is present for IPA v4.5+ */ #define IPA_REG_QTIME_TIMESTAMP_CFG_OFFSET 0x0000024c #define DPL_TIMESTAMP_LSB_FMASK GENMASK(4, 0) #define DPL_TIMESTAMP_SEL_FMASK GENMASK(7, 7) #define TAG_TIMESTAMP_LSB_FMASK GENMASK(12, 8) #define NAT_TIMESTAMP_LSB_FMASK GENMASK(20, 16) -/* The next register is present for IPA v4.5 */ +/* The next register is present for IPA v4.5+ */ #define IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET 0x00000250 #define DIV_VALUE_FMASK GENMASK(8, 0) #define DIV_ENABLE_FMASK GENMASK(31, 31) -/* The next register is present for IPA v4.5 */ +/* The next register is present for IPA v4.5+ */ #define IPA_REG_TIMERS_PULSE_GRAN_CFG_OFFSET 0x00000254 #define GRAN_0_FMASK GENMASK(2, 0) #define GRAN_1_FMASK GENMASK(5, 3) @@ -304,63 +325,23 @@ enum ipa_pulse_gran { IPA_GRAN_655350_US = 0x7, }; -/* # IPA source resource groups available based on version */ -static inline u32 ipa_resource_group_src_count(enum ipa_version version) -{ - switch (version) { - case IPA_VERSION_3_5_1: - case IPA_VERSION_4_0: - case IPA_VERSION_4_1: - return 4; - - case IPA_VERSION_4_2: - return 1; - - case IPA_VERSION_4_5: - return 5; - - default: - return 0; - } -} - -/* # IPA destination resource groups available based on version */ -static inline u32 ipa_resource_group_dst_count(enum ipa_version version) -{ - switch (version) { - case IPA_VERSION_3_5_1: - return 3; - - case IPA_VERSION_4_0: - case IPA_VERSION_4_1: - return 4; - - case IPA_VERSION_4_2: - return 1; - - case IPA_VERSION_4_5: - return 5; - - default: - return 0; - } -} - -/* Not all of the following are valid (depends on the count, above) */ +/* Not all of the following are present (depends on IPA version) */ #define IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \ (0x00000400 + 0x0020 * (rt)) #define IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \ (0x00000404 + 0x0020 * (rt)) -/* The next register is only present for IPA v4.5 */ #define IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \ (0x00000408 + 0x0020 * (rt)) +#define IPA_REG_SRC_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(rt) \ + (0x0000040c + 0x0020 * (rt)) #define IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \ (0x00000500 + 0x0020 * (rt)) #define IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \ (0x00000504 + 0x0020 * (rt)) -/* The next register is only present for IPA v4.5 */ #define IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \ (0x00000508 + 0x0020 * (rt)) +#define IPA_REG_DST_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(rt) \ + (0x0000050c + 0x0020 * (rt)) /* The next four fields are used for all resource group registers */ #define X_MIN_LIM_FMASK GENMASK(5, 0) #define X_MAX_LIM_FMASK GENMASK(13, 8) @@ -370,8 +351,9 @@ static inline u32 ipa_resource_group_dst_count(enum ipa_version version) #define IPA_REG_ENDP_INIT_CTRL_N_OFFSET(ep) \ (0x00000800 + 0x0070 * (ep)) -/* The next field should only used for IPA v3.5.1 */ +/* Valid only for RX (IPA producer) endpoints (do not use for IPA v4.0+) */ #define ENDP_SUSPEND_FMASK GENMASK(0, 0) +/* Valid only for TX (IPA consumer) endpoints */ #define ENDP_DELAY_FMASK GENMASK(1, 1) #define IPA_REG_ENDP_INIT_CFG_N_OFFSET(ep) \ @@ -381,11 +363,23 @@ static inline u32 ipa_resource_group_dst_count(enum ipa_version version) #define CS_METADATA_HDR_OFFSET_FMASK GENMASK(6, 3) #define CS_GEN_QMB_MASTER_SEL_FMASK GENMASK(8, 8) -/** enum ipa_cs_offload_en - checksum offload field in ENDP_INIT_CFG_N */ +/** enum ipa_cs_offload_en - ENDP_INIT_CFG register CS_OFFLOAD_EN field value */ enum ipa_cs_offload_en { IPA_CS_OFFLOAD_NONE = 0x0, - IPA_CS_OFFLOAD_UL = 0x1, - IPA_CS_OFFLOAD_DL = 0x2, + IPA_CS_OFFLOAD_UL = 0x1, /* Before IPA v4.5 (TX) */ + IPA_CS_OFFLOAD_DL = 0x2, /* Before IPA v4.5 (RX) */ +}; + +/* Valid only for TX (IPA consumer) endpoints */ +#define IPA_REG_ENDP_INIT_NAT_N_OFFSET(ep) \ + (0x0000080c + 0x0070 * (ep)) +#define NAT_EN_FMASK GENMASK(1, 0) + +/** enum ipa_nat_en - ENDP_INIT_NAT register NAT_EN field value */ +enum ipa_nat_en { + IPA_NAT_BYPASS = 0x0, + IPA_NAT_SRC = 0x1, + IPA_NAT_DST = 0x2, }; #define IPA_REG_ENDP_INIT_HDR_N_OFFSET(ep) \ @@ -396,11 +390,12 @@ enum ipa_cs_offload_en { #define HDR_ADDITIONAL_CONST_LEN_FMASK GENMASK(18, 13) #define HDR_OFST_PKT_SIZE_VALID_FMASK GENMASK(19, 19) #define HDR_OFST_PKT_SIZE_FMASK GENMASK(25, 20) +/* The next field is not present for IPA v4.9+ */ #define HDR_A5_MUX_FMASK GENMASK(26, 26) #define HDR_LEN_INC_DEAGG_HDR_FMASK GENMASK(27, 27) -/* The next field is not present for IPA v4.5 */ +/* The next field is not present for IPA v4.5+ */ #define HDR_METADATA_REG_VALID_FMASK GENMASK(28, 28) -/* The next two fields are present for IPA v4.5 */ +/* The next two fields are present for IPA v4.5+ */ #define HDR_LEN_MSB_FMASK GENMASK(29, 28) #define HDR_OFST_METADATA_MSB_FMASK GENMASK(31, 30) @@ -452,7 +447,7 @@ static inline u32 ipa_metadata_offset_encoded(enum ipa_version version, #define HDR_PAYLOAD_LEN_INC_PADDING_FMASK GENMASK(3, 3) #define HDR_TOTAL_LEN_OR_PAD_OFFSET_FMASK GENMASK(9, 4) #define HDR_PAD_TO_ALIGNMENT_FMASK GENMASK(13, 10) -/* The next three fields are present for IPA v4.5 */ +/* The next three fields are present for IPA v4.5+ */ #define HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_FMASK GENMASK(17, 16) #define HDR_OFST_PKT_SIZE_MSB_FMASK GENMASK(19, 18) #define HDR_ADDITIONAL_CONST_LEN_MSB_FMASK GENMASK(21, 20) @@ -465,16 +460,18 @@ static inline u32 ipa_metadata_offset_encoded(enum ipa_version version, #define IPA_REG_ENDP_INIT_MODE_N_OFFSET(txep) \ (0x00000820 + 0x0070 * (txep)) #define MODE_FMASK GENMASK(2, 0) -/* The next field is present for IPA v4.5 */ +/* The next field is present for IPA v4.5+ */ #define DCPH_ENABLE_FMASK GENMASK(3, 3) #define DEST_PIPE_INDEX_FMASK GENMASK(8, 4) #define BYTE_THRESHOLD_FMASK GENMASK(27, 12) #define PIPE_REPLICATION_EN_FMASK GENMASK(28, 28) #define PAD_EN_FMASK GENMASK(29, 29) -/* The next register is not present for IPA v4.5 */ +/* The next field is not present for IPA v4.5+ */ #define HDR_FTCH_DISABLE_FMASK GENMASK(30, 30) +/* The next field is present for IPA v4.9+ */ +#define DRBIP_ACL_ENABLE GENMASK(30, 30) -/** enum ipa_mode - mode field in ENDP_INIT_MODE_N */ +/** enum ipa_mode - ENDP_INIT_MODE register MODE field value */ enum ipa_mode { IPA_BASIC = 0x0, IPA_ENABLE_FRAMING_HDLC = 0x1, @@ -486,47 +483,54 @@ enum ipa_mode { (0x00000824 + 0x0070 * (ep)) #define AGGR_EN_FMASK GENMASK(1, 0) #define AGGR_TYPE_FMASK GENMASK(4, 2) + +/* The legacy value is used for IPA hardware before IPA v4.5 */ static inline u32 aggr_byte_limit_fmask(bool legacy) { return legacy ? GENMASK(9, 5) : GENMASK(10, 5); } +/* The legacy value is used for IPA hardware before IPA v4.5 */ static inline u32 aggr_time_limit_fmask(bool legacy) { return legacy ? GENMASK(14, 10) : GENMASK(16, 12); } +/* The legacy value is used for IPA hardware before IPA v4.5 */ static inline u32 aggr_pkt_limit_fmask(bool legacy) { return legacy ? GENMASK(20, 15) : GENMASK(22, 17); } +/* The legacy value is used for IPA hardware before IPA v4.5 */ static inline u32 aggr_sw_eof_active_fmask(bool legacy) { return legacy ? GENMASK(21, 21) : GENMASK(23, 23); } +/* The legacy value is used for IPA hardware before IPA v4.5 */ static inline u32 aggr_force_close_fmask(bool legacy) { return legacy ? GENMASK(22, 22) : GENMASK(24, 24); } +/* The legacy value is used for IPA hardware before IPA v4.5 */ static inline u32 aggr_hard_byte_limit_enable_fmask(bool legacy) { return legacy ? GENMASK(24, 24) : GENMASK(26, 26); } -/* The next field is present for IPA v4.5 */ +/* The next field is present for IPA v4.5+ */ #define AGGR_GRAN_SEL_FMASK GENMASK(27, 27) -/** enum ipa_aggr_en - aggregation enable field in ENDP_INIT_AGGR_N */ +/** enum ipa_aggr_en - ENDP_INIT_AGGR register AGGR_EN field value */ enum ipa_aggr_en { - IPA_BYPASS_AGGR = 0x0, - IPA_ENABLE_AGGR = 0x1, - IPA_ENABLE_DEAGGR = 0x2, + IPA_BYPASS_AGGR = 0x0, /* (TX, RX) */ + IPA_ENABLE_AGGR = 0x1, /* (RX) */ + IPA_ENABLE_DEAGGR = 0x2, /* (TX) */ }; -/** enum ipa_aggr_type - aggregation type field in ENDP_INIT_AGGR_N */ +/** enum ipa_aggr_type - ENDP_INIT_AGGR register AGGR_TYPE field value */ enum ipa_aggr_type { IPA_MBIM_16 = 0x0, IPA_HDLC = 0x1, @@ -567,53 +571,73 @@ enum ipa_aggr_type { /* Encoded value for ENDP_INIT_RSRC_GRP register RSRC_GRP field */ static inline u32 rsrc_grp_encoded(enum ipa_version version, u32 rsrc_grp) { - switch (version) { - case IPA_VERSION_4_2: - return u32_encode_bits(rsrc_grp, GENMASK(0, 0)); - case IPA_VERSION_4_5: + if (version < IPA_VERSION_3_5 || version == IPA_VERSION_4_5) return u32_encode_bits(rsrc_grp, GENMASK(2, 0)); - default: - return u32_encode_bits(rsrc_grp, GENMASK(1, 0)); - } + + if (version == IPA_VERSION_4_2 || version == IPA_VERSION_4_7) + return u32_encode_bits(rsrc_grp, GENMASK(0, 0)); + + return u32_encode_bits(rsrc_grp, GENMASK(1, 0)); } /* Valid only for TX (IPA consumer) endpoints */ #define IPA_REG_ENDP_INIT_SEQ_N_OFFSET(txep) \ (0x0000083c + 0x0070 * (txep)) -#define HPS_SEQ_TYPE_FMASK GENMASK(3, 0) -#define DPS_SEQ_TYPE_FMASK GENMASK(7, 4) -#define HPS_REP_SEQ_TYPE_FMASK GENMASK(11, 8) -#define DPS_REP_SEQ_TYPE_FMASK GENMASK(15, 12) +#define SEQ_TYPE_FMASK GENMASK(7, 0) +#define SEQ_REP_TYPE_FMASK GENMASK(15, 8) /** - * enum ipa_seq_type - HPS and DPS sequencer type fields in ENDP_INIT_SEQ_N - * @IPA_SEQ_DMA_ONLY: only DMA is performed - * @IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP: - * second packet processing pass + no decipher + microcontroller - * @IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP: - * packet processing + no decipher + no uCP + HPS REP DMA parser - * @IPA_SEQ_INVALID: invalid sequencer type + * enum ipa_seq_type - HPS and DPS sequencer type + * @IPA_SEQ_DMA: Perform DMA only + * @IPA_SEQ_1_PASS: One pass through the pipeline + * @IPA_SEQ_2_PASS_SKIP_LAST_UC: Two passes, skip the microcprocessor + * @IPA_SEQ_1_PASS_SKIP_LAST_UC: One pass, skip the microcprocessor + * @IPA_SEQ_2_PASS: Two passes through the pipeline + * @IPA_SEQ_3_PASS_SKIP_LAST_UC: Three passes, skip the microcprocessor + * @IPA_SEQ_DECIPHER: Optional deciphering step (combined) * - * The values defined here are broken into 4-bit nibbles that are written - * into fields of the ENDP_INIT_SEQ registers. + * The low-order byte of the sequencer type register defines the number of + * passes a packet takes through the IPA pipeline. The last pass through can + * optionally skip the microprocessor. Deciphering is optional for all types; + * if enabled, an additional mask (two bits) is added to the type value. + * + * Note: not all combinations of ipa_seq_type and ipa_seq_rep_type are + * supported (or meaningful). */ enum ipa_seq_type { - IPA_SEQ_DMA_ONLY = 0x0000, - IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP = 0x0004, - IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP = 0x0806, - IPA_SEQ_INVALID = 0xffff, + IPA_SEQ_DMA = 0x00, + IPA_SEQ_1_PASS = 0x02, + IPA_SEQ_2_PASS_SKIP_LAST_UC = 0x04, + IPA_SEQ_1_PASS_SKIP_LAST_UC = 0x06, + IPA_SEQ_2_PASS = 0x0a, + IPA_SEQ_3_PASS_SKIP_LAST_UC = 0x0c, + /* The next value can be ORed with the above */ + IPA_SEQ_DECIPHER = 0x11, +}; + +/** + * enum ipa_seq_rep_type - replicated packet sequencer type + * @IPA_SEQ_REP_DMA_PARSER: DMA parser for replicated packets + * + * This goes in the second byte of the endpoint sequencer type register. + * + * Note: not all combinations of ipa_seq_type and ipa_seq_rep_type are + * supported (or meaningful). + */ +enum ipa_seq_rep_type { + IPA_SEQ_REP_DMA_PARSER = 0x08, }; #define IPA_REG_ENDP_STATUS_N_OFFSET(ep) \ (0x00000840 + 0x0070 * (ep)) #define STATUS_EN_FMASK GENMASK(0, 0) #define STATUS_ENDP_FMASK GENMASK(5, 1) -/* The next field is not present for IPA v4.5 */ +/* The next field is not present for IPA v4.5+ */ #define STATUS_LOCATION_FMASK GENMASK(8, 8) -/* The next field is not present for IPA v3.5.1 */ +/* The next field is present for IPA v4.0+ */ #define STATUS_PKT_SUPPRESS_FMASK GENMASK(9, 9) -/* The next register is only present for IPA versions that support hashing */ +/* The next register is not present for IPA v4.2 (which no hashing support) */ #define IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(er) \ (0x0000085c + 0x0070 * (er)) #define FILTER_HASH_MSK_SRC_ID_FMASK GENMASK(0, 0) @@ -634,31 +658,87 @@ enum ipa_seq_type { #define ROUTER_HASH_MSK_METADATA_FMASK GENMASK(22, 22) #define IPA_REG_ENDP_ROUTER_HASH_MSK_ALL GENMASK(22, 16) -#define IPA_REG_IRQ_STTS_OFFSET \ - IPA_REG_IRQ_STTS_EE_N_OFFSET(GSI_EE_AP) -#define IPA_REG_IRQ_STTS_EE_N_OFFSET(ee) \ - (0x00003008 + 0x1000 * (ee)) +static inline u32 ipa_reg_irq_stts_ee_n_offset(enum ipa_version version, + u32 ee) +{ + if (version < IPA_VERSION_4_9) + return 0x00003008 + 0x1000 * ee; + + return 0x00004008 + 0x1000 * ee; +} + +static inline u32 ipa_reg_irq_stts_offset(enum ipa_version version) +{ + return ipa_reg_irq_stts_ee_n_offset(version, GSI_EE_AP); +} + +static inline u32 ipa_reg_irq_en_ee_n_offset(enum ipa_version version, u32 ee) +{ + if (version < IPA_VERSION_4_9) + return 0x0000300c + 0x1000 * ee; + + return 0x0000400c + 0x1000 * ee; +} + +static inline u32 ipa_reg_irq_en_offset(enum ipa_version version) +{ + return ipa_reg_irq_en_ee_n_offset(version, GSI_EE_AP); +} + +static inline u32 ipa_reg_irq_clr_ee_n_offset(enum ipa_version version, u32 ee) +{ + if (version < IPA_VERSION_4_9) + return 0x00003010 + 0x1000 * ee; -#define IPA_REG_IRQ_EN_OFFSET \ - IPA_REG_IRQ_EN_EE_N_OFFSET(GSI_EE_AP) -#define IPA_REG_IRQ_EN_EE_N_OFFSET(ee) \ - (0x0000300c + 0x1000 * (ee)) + return 0x00004010 + 0x1000 * ee; +} + +static inline u32 ipa_reg_irq_clr_offset(enum ipa_version version) +{ + return ipa_reg_irq_clr_ee_n_offset(version, GSI_EE_AP); +} -#define IPA_REG_IRQ_CLR_OFFSET \ - IPA_REG_IRQ_CLR_EE_N_OFFSET(GSI_EE_AP) -#define IPA_REG_IRQ_CLR_EE_N_OFFSET(ee) \ - (0x00003010 + 0x1000 * (ee)) /** * enum ipa_irq_id - Bit positions representing type of IPA IRQ * @IPA_IRQ_UC_0: Microcontroller event interrupt * @IPA_IRQ_UC_1: Microcontroller response interrupt * @IPA_IRQ_TX_SUSPEND: Data ready interrupt + * @IPA_IRQ_COUNT: Number of IRQ ids (must be last) * * IRQ types not described above are not currently used. + * + * @IPA_IRQ_BAD_SNOC_ACCESS: (Not currently used) + * @IPA_IRQ_EOT_COAL: (Not currently used) + * @IPA_IRQ_UC_2: (Not currently used) + * @IPA_IRQ_UC_3: (Not currently used) + * @IPA_IRQ_UC_IN_Q_NOT_EMPTY: (Not currently used) + * @IPA_IRQ_UC_RX_CMD_Q_NOT_FULL: (Not currently used) + * @IPA_IRQ_PROC_UC_ACK_Q_NOT_EMPTY: (Not currently used) + * @IPA_IRQ_RX_ERR: (Not currently used) + * @IPA_IRQ_DEAGGR_ERR: (Not currently used) + * @IPA_IRQ_TX_ERR: (Not currently used) + * @IPA_IRQ_STEP_MODE: (Not currently used) + * @IPA_IRQ_PROC_ERR: (Not currently used) + * @IPA_IRQ_TX_HOLB_DROP: (Not currently used) + * @IPA_IRQ_BAM_GSI_IDLE: (Not currently used) + * @IPA_IRQ_PIPE_YELLOW_BELOW: (Not currently used) + * @IPA_IRQ_PIPE_RED_BELOW: (Not currently used) + * @IPA_IRQ_PIPE_YELLOW_ABOVE: (Not currently used) + * @IPA_IRQ_PIPE_RED_ABOVE: (Not currently used) + * @IPA_IRQ_UCP: (Not currently used) + * @IPA_IRQ_DCMP: (Not currently used) + * @IPA_IRQ_GSI_EE: (Not currently used) + * @IPA_IRQ_GSI_IPA_IF_TLV_RCVD: (Not currently used) + * @IPA_IRQ_GSI_UC: (Not currently used) + * @IPA_IRQ_TLV_LEN_MIN_DSM: (Not currently used) + * @IPA_IRQ_DRBIP_PKT_EXCEED_MAX_SIZE_EN: (Not currently used) + * @IPA_IRQ_DRBIP_DATA_SCTR_CFG_ERROR_EN: (Not currently used) + * @IPA_IRQ_DRBIP_IMM_CMD_NO_FLSH_HZRD_EN: (Not currently used) */ enum ipa_irq_id { IPA_IRQ_BAD_SNOC_ACCESS = 0x0, - /* Type (bit) 0x1 is not defined */ + /* The next bit is not present for IPA v3.5+ */ + IPA_IRQ_EOT_COAL = 0x1, IPA_IRQ_UC_0 = 0x2, IPA_IRQ_UC_1 = 0x3, IPA_IRQ_UC_2 = 0x4, @@ -679,38 +759,89 @@ enum ipa_irq_id { IPA_IRQ_PIPE_YELLOW_ABOVE = 0x13, IPA_IRQ_PIPE_RED_ABOVE = 0x14, IPA_IRQ_UCP = 0x15, + /* The next bit is not present for IPA v4.5+ */ IPA_IRQ_DCMP = 0x16, IPA_IRQ_GSI_EE = 0x17, IPA_IRQ_GSI_IPA_IF_TLV_RCVD = 0x18, IPA_IRQ_GSI_UC = 0x19, - /* The next bit is present for IPA v4.5 */ + /* The next bit is present for IPA v4.5+ */ IPA_IRQ_TLV_LEN_MIN_DSM = 0x1a, + /* The next three bits are present for IPA v4.9+ */ + IPA_IRQ_DRBIP_PKT_EXCEED_MAX_SIZE_EN = 0x1b, + IPA_IRQ_DRBIP_DATA_SCTR_CFG_ERROR_EN = 0x1c, + IPA_IRQ_DRBIP_IMM_CMD_NO_FLSH_HZRD_EN = 0x1d, IPA_IRQ_COUNT, /* Last; not an id */ }; -#define IPA_REG_IRQ_UC_OFFSET \ - IPA_REG_IRQ_UC_EE_N_OFFSET(GSI_EE_AP) -#define IPA_REG_IRQ_UC_EE_N_OFFSET(ee) \ - (0x0000301c + 0x1000 * (ee)) +static inline u32 ipa_reg_irq_uc_ee_n_offset(enum ipa_version version, u32 ee) +{ + if (version < IPA_VERSION_4_9) + return 0x0000301c + 0x1000 * ee; + + return 0x0000401c + 0x1000 * ee; +} + +static inline u32 ipa_reg_irq_uc_offset(enum ipa_version version) +{ + return ipa_reg_irq_uc_ee_n_offset(version, GSI_EE_AP); +} + #define UC_INTR_FMASK GENMASK(0, 0) /* ipa->available defines the valid bits in the SUSPEND_INFO register */ -#define IPA_REG_IRQ_SUSPEND_INFO_OFFSET \ - IPA_REG_IRQ_SUSPEND_INFO_EE_N_OFFSET(GSI_EE_AP) -#define IPA_REG_IRQ_SUSPEND_INFO_EE_N_OFFSET(ee) \ - (0x00003030 + 0x1000 * (ee)) - -/* ipa->available defines the valid bits in the IRQ_SUSPEND_EN register */ -#define IPA_REG_IRQ_SUSPEND_EN_OFFSET \ - IPA_REG_IRQ_SUSPEND_EN_EE_N_OFFSET(GSI_EE_AP) -#define IPA_REG_IRQ_SUSPEND_EN_EE_N_OFFSET(ee) \ - (0x00003034 + 0x1000 * (ee)) - -/* ipa->available defines the valid bits in the IRQ_SUSPEND_CLR register */ -#define IPA_REG_IRQ_SUSPEND_CLR_OFFSET \ - IPA_REG_IRQ_SUSPEND_CLR_EE_N_OFFSET(GSI_EE_AP) -#define IPA_REG_IRQ_SUSPEND_CLR_EE_N_OFFSET(ee) \ - (0x00003038 + 0x1000 * (ee)) +static inline u32 +ipa_reg_irq_suspend_info_ee_n_offset(enum ipa_version version, u32 ee) +{ + if (version == IPA_VERSION_3_0) + return 0x00003098 + 0x1000 * ee; + + if (version < IPA_VERSION_4_9) + return 0x00003030 + 0x1000 * ee; + + return 0x00004030 + 0x1000 * ee; +} + +static inline u32 +ipa_reg_irq_suspend_info_offset(enum ipa_version version) +{ + return ipa_reg_irq_suspend_info_ee_n_offset(version, GSI_EE_AP); +} + +/* ipa->available defines the valid bits in the SUSPEND_EN register */ +static inline u32 +ipa_reg_irq_suspend_en_ee_n_offset(enum ipa_version version, u32 ee) +{ + /* assert(version != IPA_VERSION_3_0); */ + + if (version < IPA_VERSION_4_9) + return 0x00003034 + 0x1000 * ee; + + return 0x00004034 + 0x1000 * ee; +} + +static inline u32 +ipa_reg_irq_suspend_en_offset(enum ipa_version version) +{ + return ipa_reg_irq_suspend_en_ee_n_offset(version, GSI_EE_AP); +} + +/* ipa->available defines the valid bits in the SUSPEND_CLR register */ +static inline u32 +ipa_reg_irq_suspend_clr_ee_n_offset(enum ipa_version version, u32 ee) +{ + /* assert(version != IPA_VERSION_3_0); */ + + if (version < IPA_VERSION_4_9) + return 0x00003038 + 0x1000 * ee; + + return 0x00004038 + 0x1000 * ee; +} + +static inline u32 +ipa_reg_irq_suspend_clr_offset(enum ipa_version version) +{ + return ipa_reg_irq_suspend_clr_ee_n_offset(version, GSI_EE_AP); +} int ipa_reg_init(struct ipa *ipa); void ipa_reg_exit(struct ipa *ipa); diff --git a/drivers/net/ipa/ipa_resource.c b/drivers/net/ipa/ipa_resource.c new file mode 100644 index 000000000000..3b2dc216d3a6 --- /dev/null +++ b/drivers/net/ipa/ipa_resource.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + * Copyright (C) 2018-2021 Linaro Ltd. + */ + +#include <linux/types.h> +#include <linux/kernel.h> + +#include "ipa.h" +#include "ipa_data.h" +#include "ipa_reg.h" +#include "ipa_resource.h" + +/** + * DOC: IPA Resources + * + * The IPA manages a set of resources internally for various purposes. + * A given IPA version has a fixed number of resource types, and a fixed + * total number of resources of each type. "Source" resource types + * are separate from "destination" resource types. + * + * Each version of IPA also has some number of resource groups. Each + * endpoint is assigned to a resource group, and all endpoints in the + * same group share pools of each type of resource. A subset of the + * total resources of each type is assigned for use by each group. + */ + +static bool ipa_resource_limits_valid(struct ipa *ipa, + const struct ipa_resource_data *data) +{ +#ifdef IPA_VALIDATION + u32 group_count; + u32 i; + u32 j; + + /* We program at most 8 source or destination resource group limits */ + BUILD_BUG_ON(IPA_RESOURCE_GROUP_MAX > 8); + + group_count = data->rsrc_group_src_count; + if (!group_count || group_count > IPA_RESOURCE_GROUP_MAX) + return false; + + /* Return an error if a non-zero resource limit is specified + * for a resource group not supported by hardware. + */ + for (i = 0; i < data->resource_src_count; i++) { + const struct ipa_resource *resource; + + resource = &data->resource_src[i]; + for (j = group_count; j < IPA_RESOURCE_GROUP_MAX; j++) + if (resource->limits[j].min || resource->limits[j].max) + return false; + } + + group_count = data->rsrc_group_src_count; + if (!group_count || group_count > IPA_RESOURCE_GROUP_MAX) + return false; + + for (i = 0; i < data->resource_dst_count; i++) { + const struct ipa_resource *resource; + + resource = &data->resource_dst[i]; + for (j = group_count; j < IPA_RESOURCE_GROUP_MAX; j++) + if (resource->limits[j].min || resource->limits[j].max) + return false; + } +#endif /* !IPA_VALIDATION */ + return true; +} + +static void +ipa_resource_config_common(struct ipa *ipa, u32 offset, + const struct ipa_resource_limits *xlimits, + const struct ipa_resource_limits *ylimits) +{ + u32 val; + + val = u32_encode_bits(xlimits->min, X_MIN_LIM_FMASK); + val |= u32_encode_bits(xlimits->max, X_MAX_LIM_FMASK); + if (ylimits) { + val |= u32_encode_bits(ylimits->min, Y_MIN_LIM_FMASK); + val |= u32_encode_bits(ylimits->max, Y_MAX_LIM_FMASK); + } + + iowrite32(val, ipa->reg_virt + offset); +} + +static void ipa_resource_config_src(struct ipa *ipa, u32 resource_type, + const struct ipa_resource_data *data) +{ + u32 group_count = data->rsrc_group_src_count; + const struct ipa_resource_limits *ylimits; + const struct ipa_resource *resource; + u32 offset; + + resource = &data->resource_src[resource_type]; + + offset = IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource_type); + ylimits = group_count == 1 ? NULL : &resource->limits[1]; + ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits); + + if (group_count < 3) + return; + + offset = IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource_type); + ylimits = group_count == 3 ? NULL : &resource->limits[3]; + ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits); + + if (group_count < 5) + return; + + offset = IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource_type); + ylimits = group_count == 5 ? NULL : &resource->limits[5]; + ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits); + + if (group_count < 7) + return; + + offset = IPA_REG_SRC_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(resource_type); + ylimits = group_count == 7 ? NULL : &resource->limits[7]; + ipa_resource_config_common(ipa, offset, &resource->limits[6], ylimits); +} + +static void ipa_resource_config_dst(struct ipa *ipa, u32 resource_type, + const struct ipa_resource_data *data) +{ + u32 group_count = data->rsrc_group_dst_count; + const struct ipa_resource_limits *ylimits; + const struct ipa_resource *resource; + u32 offset; + + resource = &data->resource_dst[resource_type]; + + offset = IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource_type); + ylimits = group_count == 1 ? NULL : &resource->limits[1]; + ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits); + + if (group_count < 3) + return; + + offset = IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource_type); + ylimits = group_count == 3 ? NULL : &resource->limits[3]; + ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits); + + if (group_count < 5) + return; + + offset = IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource_type); + ylimits = group_count == 5 ? NULL : &resource->limits[5]; + ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits); + + if (group_count < 7) + return; + + offset = IPA_REG_DST_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(resource_type); + ylimits = group_count == 7 ? NULL : &resource->limits[7]; + ipa_resource_config_common(ipa, offset, &resource->limits[6], ylimits); +} + +/* Configure resources; there is no ipa_resource_deconfig() */ +int ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data) +{ + u32 i; + + if (!ipa_resource_limits_valid(ipa, data)) + return -EINVAL; + + for (i = 0; i < data->resource_src_count; i++) + ipa_resource_config_src(ipa, i, data); + + for (i = 0; i < data->resource_dst_count; i++) + ipa_resource_config_dst(ipa, i, data); + + return 0; +} diff --git a/drivers/net/ipa/ipa_resource.h b/drivers/net/ipa/ipa_resource.h new file mode 100644 index 000000000000..ef5818bff180 --- /dev/null +++ b/drivers/net/ipa/ipa_resource.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + * Copyright (C) 2019-2021 Linaro Ltd. + */ +#ifndef _IPA_RESOURCE_H_ +#define _IPA_RESOURCE_H_ + +struct ipa; +struct ipa_resource_data; + +/** + * ipa_resource_config() - Configure resources + * @ipa: IPA pointer + * @data: IPA resource configuration data + * + * There is no need for a matching ipa_resource_deconfig() function. + * + * Return: true if all regions are valid, false otherwise + */ +int ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data); + +#endif /* _IPA_RESOURCE_H_ */ diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h index bf0e4063cfd9..20319438a841 100644 --- a/drivers/net/ipa/ipa_smp2p.h +++ b/drivers/net/ipa/ipa_smp2p.h @@ -28,7 +28,7 @@ void ipa_smp2p_exit(struct ipa *ipa); /** * ipa_smp2p_disable() - Prevent "ipa-setup-ready" interrupt handling - * @IPA: IPA pointer + * @ipa: IPA pointer * * Prevent handling of the "setup ready" interrupt from the modem. * This is used before initiating shutdown of the driver. diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c index baaab3dd0e63..3168d72f4245 100644 --- a/drivers/net/ipa/ipa_table.c +++ b/drivers/net/ipa/ipa_table.c @@ -27,28 +27,38 @@ /** * DOC: IPA Filter and Route Tables * - * The IPA has tables defined in its local shared memory that define filter - * and routing rules. Each entry in these tables contains a 64-bit DMA - * address that refers to DRAM (system memory) containing a rule definition. + * The IPA has tables defined in its local (IPA-resident) memory that define + * filter and routing rules. An entry in either of these tables is a little + * endian 64-bit "slot" that holds the address of a rule definition. (The + * size of these slots is 64 bits regardless of the host DMA address size.) + * + * Separate tables (both filter and route) used for IPv4 and IPv6. There + * are normally another set of "hashed" filter and route tables, which are + * used with a hash of message metadata. Hashed operation is not supported + * by all IPA hardware (IPA v4.2 doesn't support hashed tables). + * + * Rules can be in local memory or in DRAM (system memory). The offset of + * an object (such as a route or filter table) in IPA-resident memory must + * 128-byte aligned. An object in system memory (such as a route or filter + * rule) must be at an 8-byte aligned address. We currently only place + * route or filter rules in system memory. + * * A rule consists of a contiguous block of 32-bit values terminated with * 32 zero bits. A special "zero entry" rule consisting of 64 zero bits * represents "no filtering" or "no routing," and is the reset value for - * filter or route table rules. Separate tables (both filter and route) - * used for IPv4 and IPv6. Additionally, there can be hashed filter or - * route tables, which are used when a hash of message metadata matches. - * Hashed operation is not supported by all IPA hardware. + * filter or route table rules. * * Each filter rule is associated with an AP or modem TX endpoint, though - * not all TX endpoints support filtering. The first 64-bit entry in a + * not all TX endpoints support filtering. The first 64-bit slot in a * filter table is a bitmap indicating which endpoints have entries in * the table. The low-order bit (bit 0) in this bitmap represents a * special global filter, which applies to all traffic. This is not * used in the current code. Bit 1, if set, indicates that there is an - * entry (i.e. a DMA address referring to a rule) for endpoint 0 in the - * table. Bit 2, if set, indicates there is an entry for endpoint 1, - * and so on. Space is set aside in IPA local memory to hold as many - * filter table entries as might be required, but typically they are not - * all used. + * entry (i.e. slot containing a system address referring to a rule) for + * endpoint 0 in the table. Bit 3, if set, indicates there is an entry + * for endpoint 2, and so on. Space is set aside in IPA local memory to + * hold as many filter table entries as might be required, but typically + * they are not all used. * * The AP initializes all entries in a filter table to refer to a "zero" * entry. Once initialized the modem and AP update the entries for @@ -96,9 +106,6 @@ * ---------------------- */ -/* IPA hardware constrains filter and route tables alignment */ -#define IPA_TABLE_ALIGN 128 /* Minimum table alignment */ - /* Assignment of route table entries to the modem and AP */ #define IPA_ROUTE_MODEM_MIN 0 #define IPA_ROUTE_MODEM_COUNT 8 @@ -118,21 +125,14 @@ /* Check things that can be validated at build time. */ static void ipa_table_validate_build(void) { - /* IPA hardware accesses memory 128 bytes at a time. Addresses - * referred to by entries in filter and route tables must be - * aligned on 128-byte byte boundaries. The only rule address - * ever use is the "zero rule", and it's aligned at the base - * of a coherent DMA allocation. - */ - BUILD_BUG_ON(ARCH_DMA_MINALIGN % IPA_TABLE_ALIGN); - - /* Filter and route tables contain DMA addresses that refer to - * filter or route rules. We use a fixed constant to represent - * the size of either type of table entry. Code in ipa_table_init() - * uses a pointer to __le64 to initialize table entriews. + /* Filter and route tables contain DMA addresses that refer + * to filter or route rules. But the size of a table entry + * is 64 bits regardless of what the size of an AP DMA address + * is. A fixed constant defines the size of an entry, and + * code in ipa_table_init() uses a pointer to __le64 to + * initialize tables. */ - BUILD_BUG_ON(IPA_TABLE_ENTRY_SIZE != sizeof(dma_addr_t)); - BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(__le64)); + BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(__le64)); /* A "zero rule" is used to represent no filtering or no routing. * It is a 64-bit block of zeroed memory. Code in ipa_table_init() @@ -163,7 +163,7 @@ ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed) else mem = hashed ? &ipa->mem[IPA_MEM_V4_ROUTE_HASHED] : &ipa->mem[IPA_MEM_V4_ROUTE]; - size = IPA_ROUTE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE; + size = IPA_ROUTE_COUNT_MAX * sizeof(__le64); } else { if (ipv6) mem = hashed ? &ipa->mem[IPA_MEM_V6_FILTER_HASHED] @@ -171,7 +171,7 @@ ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed) else mem = hashed ? &ipa->mem[IPA_MEM_V4_FILTER_HASHED] : &ipa->mem[IPA_MEM_V4_FILTER]; - size = (1 + IPA_FILTER_COUNT_MAX) * IPA_TABLE_ENTRY_SIZE; + size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64); } if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed)) @@ -239,11 +239,6 @@ static void ipa_table_validate_build(void) #endif /* !IPA_VALIDATE */ -bool ipa_table_hash_support(struct ipa *ipa) -{ - return ipa->version != IPA_VERSION_4_2; -} - /* Zero entry count means no table, so just return a 0 address */ static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count) { @@ -275,8 +270,8 @@ static void ipa_table_reset_add(struct gsi_trans *trans, bool filter, if (filter) first++; /* skip over bitmap */ - offset = mem->offset + first * IPA_TABLE_ENTRY_SIZE; - size = count * IPA_TABLE_ENTRY_SIZE; + offset = mem->offset + first * sizeof(__le64); + size = count * sizeof(__le64); addr = ipa_table_addr(ipa, false, count); ipa_cmd_dma_shared_mem_add(trans, offset, size, addr, true); @@ -458,11 +453,11 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter, count = hweight32(ipa->filter_map); hash_count = hash_mem->size ? count : 0; } else { - count = mem->size / IPA_TABLE_ENTRY_SIZE; - hash_count = hash_mem->size / IPA_TABLE_ENTRY_SIZE; + count = mem->size / sizeof(__le64); + hash_count = hash_mem->size / sizeof(__le64); } - size = count * IPA_TABLE_ENTRY_SIZE; - hash_size = hash_count * IPA_TABLE_ENTRY_SIZE; + size = count * sizeof(__le64); + hash_size = hash_count * sizeof(__le64); addr = ipa_table_addr(ipa, filter, count); hash_addr = ipa_table_addr(ipa, filter, hash_count); @@ -502,11 +497,6 @@ int ipa_table_setup(struct ipa *ipa) return 0; } -void ipa_table_teardown(struct ipa *ipa) -{ - /* Nothing to do */ /* XXX Maybe reset the tables? */ -} - /** * ipa_filter_tuple_zero() - Zero an endpoint's hashed filter tuple * @endpoint: Endpoint whose filter hash tuple should be zeroed @@ -530,6 +520,7 @@ static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint) iowrite32(val, endpoint->ipa->reg_virt + offset); } +/* Configure a hashed filter table; there is no ipa_filter_deconfig() */ static void ipa_filter_config(struct ipa *ipa, bool modem) { enum gsi_ee_id ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP; @@ -550,11 +541,6 @@ static void ipa_filter_config(struct ipa *ipa, bool modem) } } -static void ipa_filter_deconfig(struct ipa *ipa, bool modem) -{ - /* Nothing to do */ -} - static bool ipa_route_id_modem(u32 route_id) { return route_id >= IPA_ROUTE_MODEM_MIN && @@ -581,6 +567,7 @@ static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id) iowrite32(val, ipa->reg_virt + offset); } +/* Configure a hashed route table; there is no ipa_route_deconfig() */ static void ipa_route_config(struct ipa *ipa, bool modem) { u32 route_id; @@ -593,11 +580,7 @@ static void ipa_route_config(struct ipa *ipa, bool modem) ipa_route_tuple_zero(ipa, route_id); } -static void ipa_route_deconfig(struct ipa *ipa, bool modem) -{ - /* Nothing to do */ -} - +/* Configure a filter and route tables; there is no ipa_table_deconfig() */ void ipa_table_config(struct ipa *ipa) { ipa_filter_config(ipa, false); @@ -606,14 +589,6 @@ void ipa_table_config(struct ipa *ipa) ipa_route_config(ipa, true); } -void ipa_table_deconfig(struct ipa *ipa) -{ - ipa_route_deconfig(ipa, true); - ipa_route_deconfig(ipa, false); - ipa_filter_deconfig(ipa, true); - ipa_filter_deconfig(ipa, false); -} - /* * Initialize a coherent DMA allocation containing initialized filter and * route table data. This is used when initializing or resetting the IPA @@ -663,7 +638,13 @@ int ipa_table_init(struct ipa *ipa) ipa_table_validate_build(); - size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE; + /* The IPA hardware requires route and filter table rules to be + * aligned on a 128-byte boundary. We put the "zero rule" at the + * base of the table area allocated here. The DMA address returned + * by dma_alloc_coherent() is guaranteed to be a power-of-2 number + * of pages, which satisfies the rule alignment requirement. + */ + size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64); virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); if (!virt) return -ENOMEM; @@ -695,7 +676,7 @@ void ipa_table_exit(struct ipa *ipa) struct device *dev = &ipa->pdev->dev; size_t size; - size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE; + size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64); dma_free_coherent(dev, size, ipa->table_virt, ipa->table_addr); ipa->table_addr = 0; diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h index 1a68d20f19d6..1e2be9fce2f8 100644 --- a/drivers/net/ipa/ipa_table.h +++ b/drivers/net/ipa/ipa_table.h @@ -10,9 +10,6 @@ struct ipa; -/* The size of a filter or route table entry */ -#define IPA_TABLE_ENTRY_SIZE sizeof(__le64) /* Holds a physical address */ - /* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */ #define IPA_FILTER_COUNT_MAX 14 @@ -24,7 +21,7 @@ struct ipa; /** * ipa_table_valid() - Validate route and filter table memory regions * @ipa: IPA pointer - + * * Return: true if all regions are valid, false otherwise */ bool ipa_table_valid(struct ipa *ipa); @@ -32,6 +29,7 @@ bool ipa_table_valid(struct ipa *ipa); /** * ipa_filter_map_valid() - Validate a filter table endpoint bitmap * @ipa: IPA pointer + * @filter_mask: Filter table endpoint bitmap to check * * Return: true if all regions are valid, false otherwise */ @@ -55,7 +53,10 @@ static inline bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask) * ipa_table_hash_support() - Return true if hashed tables are supported * @ipa: IPA pointer */ -bool ipa_table_hash_support(struct ipa *ipa); +static inline bool ipa_table_hash_support(struct ipa *ipa) +{ + return ipa->version != IPA_VERSION_4_2; +} /** * ipa_table_reset() - Reset filter and route tables entries to "none" @@ -73,28 +74,20 @@ int ipa_table_hash_flush(struct ipa *ipa); /** * ipa_table_setup() - Set up filter and route tables * @ipa: IPA pointer + * + * There is no need for a matching ipa_table_teardown() function. */ int ipa_table_setup(struct ipa *ipa); /** - * ipa_table_teardown() - Inverse of ipa_table_setup() - * @ipa: IPA pointer - */ -void ipa_table_teardown(struct ipa *ipa); - -/** * ipa_table_config() - Configure filter and route tables * @ipa: IPA pointer + * + * There is no need for a matching ipa_table_deconfig() function. */ void ipa_table_config(struct ipa *ipa); /** - * ipa_table_deconfig() - Inverse of ipa_table_config() - * @ipa: IPA pointer - */ -void ipa_table_deconfig(struct ipa *ipa); - -/** * ipa_table_init() - Do early initialization of filter and route tables * @ipa: IPA pointer */ diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c index dee58a6596d4..2756363e6938 100644 --- a/drivers/net/ipa/ipa_uc.c +++ b/drivers/net/ipa/ipa_uc.c @@ -192,6 +192,7 @@ void ipa_uc_teardown(struct ipa *ipa) static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param) { struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa); + u32 offset; u32 val; /* Fill in the command data */ @@ -203,8 +204,8 @@ static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param) /* Use an interrupt to tell the microcontroller the command is ready */ val = u32_encode_bits(1, UC_INTR_FMASK); - - iowrite32(val, ipa->reg_virt + IPA_REG_IRQ_UC_OFFSET); + offset = ipa_reg_irq_uc_offset(ipa->version); + iowrite32(val, ipa->reg_virt + offset); } /* Tell the microcontroller the AP is shutting down */ diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h index 2944e2a89023..ee2b3d02f3cd 100644 --- a/drivers/net/ipa/ipa_version.h +++ b/drivers/net/ipa/ipa_version.h @@ -8,17 +8,32 @@ /** * enum ipa_version + * @IPA_VERSION_3_0: IPA version 3.0/GSI version 1.0 + * @IPA_VERSION_3_1: IPA version 3.1/GSI version 1.1 + * @IPA_VERSION_3_5: IPA version 3.5/GSI version 1.2 + * @IPA_VERSION_3_5_1: IPA version 3.5.1/GSI version 1.3 + * @IPA_VERSION_4_0: IPA version 4.0/GSI version 2.0 + * @IPA_VERSION_4_1: IPA version 4.1/GSI version 2.0 + * @IPA_VERSION_4_2: IPA version 4.2/GSI version 2.2 + * @IPA_VERSION_4_5: IPA version 4.5/GSI version 2.5 + * @IPA_VERSION_4_7: IPA version 4.7/GSI version 2.7 + * @IPA_VERSION_4_9: IPA version 4.9/GSI version 2.9 + * @IPA_VERSION_4_11: IPA version 4.11/GSI version 2.11 (2.1.1) * * Defines the version of IPA (and GSI) hardware present on the platform. - * It seems this might be better defined elsewhere, but having it here gets - * it where it's needed. */ enum ipa_version { - IPA_VERSION_3_5_1, /* GSI version 1.3.0 */ - IPA_VERSION_4_0, /* GSI version 2.0 */ - IPA_VERSION_4_1, /* GSI version 2.1 */ - IPA_VERSION_4_2, /* GSI version 2.2 */ - IPA_VERSION_4_5, /* GSI version 2.5 */ + IPA_VERSION_3_0, + IPA_VERSION_3_1, + IPA_VERSION_3_5, + IPA_VERSION_3_5_1, + IPA_VERSION_4_0, + IPA_VERSION_4_1, + IPA_VERSION_4_2, + IPA_VERSION_4_5, + IPA_VERSION_4_7, + IPA_VERSION_4_9, + IPA_VERSION_4_11, }; #endif /* _IPA_VERSION_H_ */ diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 9a9a5cf36a4b..1b998aa481f8 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -272,25 +272,22 @@ static void macvlan_broadcast(struct sk_buff *skb, if (skb->protocol == htons(ETH_P_PAUSE)) return; - for (i = 0; i < MACVLAN_HASH_SIZE; i++) { - hlist_for_each_entry_rcu(vlan, &port->vlan_hash[i], hlist) { - if (vlan->dev == src || !(vlan->mode & mode)) - continue; + hash_for_each_rcu(port->vlan_hash, i, vlan, hlist) { + if (vlan->dev == src || !(vlan->mode & mode)) + continue; - hash = mc_hash(vlan, eth->h_dest); - if (!test_bit(hash, vlan->mc_filter)) - continue; + hash = mc_hash(vlan, eth->h_dest); + if (!test_bit(hash, vlan->mc_filter)) + continue; - err = NET_RX_DROP; - nskb = skb_clone(skb, GFP_ATOMIC); - if (likely(nskb)) - err = macvlan_broadcast_one( - nskb, vlan, eth, + err = NET_RX_DROP; + nskb = skb_clone(skb, GFP_ATOMIC); + if (likely(nskb)) + err = macvlan_broadcast_one(nskb, vlan, eth, mode == MACVLAN_MODE_BRIDGE) ?: - netif_rx_ni(nskb); - macvlan_count_rx(vlan, skb->len + ETH_HLEN, - err == NET_RX_SUCCESS, true); - } + netif_rx_ni(nskb); + macvlan_count_rx(vlan, skb->len + ETH_HLEN, + err == NET_RX_SUCCESS, true); } } @@ -380,20 +377,14 @@ err: static void macvlan_flush_sources(struct macvlan_port *port, struct macvlan_dev *vlan) { + struct macvlan_source_entry *entry; + struct hlist_node *next; int i; - for (i = 0; i < MACVLAN_HASH_SIZE; i++) { - struct hlist_node *h, *n; - - hlist_for_each_safe(h, n, &port->vlan_source_hash[i]) { - struct macvlan_source_entry *entry; + hash_for_each_safe(port->vlan_source_hash, i, next, entry, hlist) + if (entry->vlan == vlan) + macvlan_hash_del_source(entry); - entry = hlist_entry(h, struct macvlan_source_entry, - hlist); - if (entry->vlan == vlan) - macvlan_hash_del_source(entry); - } - } vlan->macaddr_count = 0; } @@ -423,18 +414,24 @@ static void macvlan_forward_source_one(struct sk_buff *skb, macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false); } -static void macvlan_forward_source(struct sk_buff *skb, +static bool macvlan_forward_source(struct sk_buff *skb, struct macvlan_port *port, const unsigned char *addr) { struct macvlan_source_entry *entry; u32 idx = macvlan_eth_hash(addr); struct hlist_head *h = &port->vlan_source_hash[idx]; + bool consume = false; hlist_for_each_entry_rcu(entry, h, hlist) { - if (ether_addr_equal_64bits(entry->addr, addr)) + if (ether_addr_equal_64bits(entry->addr, addr)) { + if (entry->vlan->flags & MACVLAN_FLAG_NODST) + consume = true; macvlan_forward_source_one(skb, entry->vlan); + } } + + return consume; } /* called under rcu_read_lock() from netif_receive_skb */ @@ -463,7 +460,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) return RX_HANDLER_CONSUMED; *pskb = skb; eth = eth_hdr(skb); - macvlan_forward_source(skb, port, eth->h_source); + if (macvlan_forward_source(skb, port, eth->h_source)) + return RX_HANDLER_CONSUMED; src = macvlan_hash_lookup(port, eth->h_source); if (src && src->mode != MACVLAN_MODE_VEPA && src->mode != MACVLAN_MODE_BRIDGE) { @@ -482,7 +480,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) return RX_HANDLER_PASS; } - macvlan_forward_source(skb, port, eth->h_source); + if (macvlan_forward_source(skb, port, eth->h_source)) + return RX_HANDLER_CONSUMED; if (macvlan_passthru(port)) vlan = list_first_or_null_rcu(&port->vlans, struct macvlan_dev, list); @@ -1286,7 +1285,8 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[], return 0; if (data[IFLA_MACVLAN_FLAGS] && - nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC) + nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~(MACVLAN_FLAG_NOPROMISC | + MACVLAN_FLAG_NODST)) return -EINVAL; if (data[IFLA_MACVLAN_MODE]) { diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c index 5e72cc55afbd..e08c90ac0c6e 100644 --- a/drivers/net/mdio.c +++ b/drivers/net/mdio.c @@ -83,7 +83,7 @@ int mdio_set_flag(const struct mdio_if_info *mdio, EXPORT_SYMBOL(mdio_set_flag); /** - * mdio_link_ok - is link status up/OK + * mdio45_links_ok - is link status up/OK * @mdio: MDIO interface * @mmd_mask: Mask for MMDs to check * diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig index a10cc460d7cf..d06e06f5e31a 100644 --- a/drivers/net/mdio/Kconfig +++ b/drivers/net/mdio/Kconfig @@ -200,6 +200,17 @@ config MDIO_BUS_MUX_MESON_G12A the amlogic g12a SoC. The multiplexers connects either the external or the internal MDIO bus to the parent bus. +config MDIO_BUS_MUX_BCM6368 + tristate "Broadcom BCM6368 MDIO bus multiplexers" + depends on OF && OF_MDIO && (BMIPS_GENERIC || COMPILE_TEST) + select MDIO_BUS_MUX + default BMIPS_GENERIC + help + This module provides a driver for MDIO bus multiplexers found in + BCM6368 based Broadcom SoCs. This multiplexer connects one of several + child MDIO bus to a parent bus. Buses could be internal as well as + external and selection logic lies inside the same multiplexer. + config MDIO_BUS_MUX_BCM_IPROC tristate "Broadcom iProc based MDIO bus multiplexers" depends on OF && OF_MDIO && (ARCH_BCM_IPROC || COMPILE_TEST) diff --git a/drivers/net/mdio/Makefile b/drivers/net/mdio/Makefile index 5c498dde463f..c3ec0ef989df 100644 --- a/drivers/net/mdio/Makefile +++ b/drivers/net/mdio/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o +obj-$(CONFIG_MDIO_BUS_MUX_BCM6368) += mdio-mux-bcm6368.o obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o obj-$(CONFIG_MDIO_BUS_MUX_MESON_G12A) += mdio-mux-meson-g12a.o diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c index fbd36891ee64..5d171e7f118d 100644 --- a/drivers/net/mdio/mdio-bcm-unimac.c +++ b/drivers/net/mdio/mdio-bcm-unimac.c @@ -5,20 +5,18 @@ * Copyright (C) 2014-2017 Broadcom */ +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> #include <linux/kernel.h> -#include <linux/phy.h> -#include <linux/platform_device.h> -#include <linux/sched.h> #include <linux/module.h> -#include <linux/io.h> -#include <linux/delay.h> -#include <linux/clk.h> - #include <linux/of.h> -#include <linux/of_platform.h> #include <linux/of_mdio.h> - +#include <linux/of_platform.h> +#include <linux/phy.h> #include <linux/platform_data/mdio-bcm-unimac.h> +#include <linux/platform_device.h> +#include <linux/sched.h> #define MDIO_CMD 0x00 #define MDIO_START_BUSY (1 << 29) diff --git a/drivers/net/mdio/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c index d3915f831854..07609114a26b 100644 --- a/drivers/net/mdio/mdio-bitbang.c +++ b/drivers/net/mdio/mdio-bitbang.c @@ -14,10 +14,10 @@ * Vitaly Bordug <vbordug@ru.mvista.com> */ -#include <linux/module.h> +#include <linux/delay.h> #include <linux/mdio-bitbang.h> +#include <linux/module.h> #include <linux/types.h> -#include <linux/delay.h> #define MDIO_READ 2 #define MDIO_WRITE 1 @@ -158,7 +158,7 @@ int mdiobb_read(struct mii_bus *bus, int phy, int reg) reg = mdiobb_cmd_addr(ctrl, phy, reg); mdiobb_cmd(ctrl, MDIO_C45_READ, phy, reg); } else - mdiobb_cmd(ctrl, MDIO_READ, phy, reg); + mdiobb_cmd(ctrl, ctrl->op_c22_read, phy, reg); ctrl->ops->set_mdio_dir(ctrl, 0); @@ -190,7 +190,7 @@ int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val) reg = mdiobb_cmd_addr(ctrl, phy, reg); mdiobb_cmd(ctrl, MDIO_C45_WRITE, phy, reg); } else - mdiobb_cmd(ctrl, MDIO_WRITE, phy, reg); + mdiobb_cmd(ctrl, ctrl->op_c22_write, phy, reg); /* send the turnaround (10) */ mdiobb_send_bit(ctrl, 1); @@ -217,6 +217,10 @@ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl) bus->read = mdiobb_read; bus->write = mdiobb_write; bus->priv = ctrl; + if (!ctrl->override_op_c22) { + ctrl->op_c22_read = MDIO_READ; + ctrl->op_c22_write = MDIO_WRITE; + } return bus; } diff --git a/drivers/net/mdio/mdio-cavium.c b/drivers/net/mdio/mdio-cavium.c index 1afd6fc1a351..95ce274c1be1 100644 --- a/drivers/net/mdio/mdio-cavium.c +++ b/drivers/net/mdio/mdio-cavium.c @@ -4,9 +4,9 @@ */ #include <linux/delay.h> +#include <linux/io.h> #include <linux/module.h> #include <linux/phy.h> -#include <linux/io.h> #include "mdio-cavium.h" diff --git a/drivers/net/mdio/mdio-gpio.c b/drivers/net/mdio/mdio-gpio.c index 1b00235d7dc5..0fb3c2de0845 100644 --- a/drivers/net/mdio/mdio-gpio.c +++ b/drivers/net/mdio/mdio-gpio.c @@ -17,15 +17,15 @@ * Vitaly Bordug <vbordug@ru.mvista.com> */ -#include <linux/module.h> -#include <linux/slab.h> +#include <linux/gpio/consumer.h> #include <linux/interrupt.h> -#include <linux/platform_device.h> -#include <linux/platform_data/mdio-gpio.h> #include <linux/mdio-bitbang.h> #include <linux/mdio-gpio.h> -#include <linux/gpio/consumer.h> +#include <linux/module.h> #include <linux/of_mdio.h> +#include <linux/platform_data/mdio-gpio.h> +#include <linux/platform_device.h> +#include <linux/slab.h> struct mdio_gpio_info { struct mdiobb_ctrl ctrl; @@ -132,6 +132,13 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, new_bus->phy_ignore_ta_mask = pdata->phy_ignore_ta_mask; } + if (dev->of_node && + of_device_is_compatible(dev->of_node, "microchip,mdio-smi0")) { + bitbang->ctrl.op_c22_read = 0; + bitbang->ctrl.op_c22_write = 0; + bitbang->ctrl.override_op_c22 = 1; + } + dev_set_drvdata(dev, new_bus); return new_bus; @@ -196,6 +203,7 @@ static int mdio_gpio_remove(struct platform_device *pdev) static const struct of_device_id mdio_gpio_of_match[] = { { .compatible = "virtual,mdio-gpio", }, + { .compatible = "microchip,mdio-smi0" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mdio_gpio_of_match); diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c index 25c25ea6da66..9cd71d896963 100644 --- a/drivers/net/mdio/mdio-ipq4019.c +++ b/drivers/net/mdio/mdio-ipq4019.c @@ -3,10 +3,10 @@ /* Copyright (c) 2020 Sartura Ltd. */ #include <linux/delay.h> -#include <linux/kernel.h> -#include <linux/module.h> #include <linux/io.h> #include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> #include <linux/of_address.h> #include <linux/of_mdio.h> #include <linux/phy.h> diff --git a/drivers/net/mdio/mdio-ipq8064.c b/drivers/net/mdio/mdio-ipq8064.c index 1bd18857e1c5..8fe8f0119fc1 100644 --- a/drivers/net/mdio/mdio-ipq8064.c +++ b/drivers/net/mdio/mdio-ipq8064.c @@ -7,12 +7,12 @@ #include <linux/delay.h> #include <linux/kernel.h> +#include <linux/mfd/syscon.h> #include <linux/module.h> -#include <linux/regmap.h> #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/platform_device.h> -#include <linux/mfd/syscon.h> +#include <linux/regmap.h> /* MII address register definitions */ #define MII_ADDR_REG_ADDR 0x10 diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c index 11f583fd4611..b36e5ea04ddf 100644 --- a/drivers/net/mdio/mdio-mscc-miim.c +++ b/drivers/net/mdio/mdio-mscc-miim.c @@ -6,14 +6,14 @@ * Copyright (c) 2017 Microsemi Corporation */ -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/phy.h> -#include <linux/platform_device.h> #include <linux/bitops.h> #include <linux/io.h> #include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> #include <linux/of_mdio.h> +#include <linux/phy.h> +#include <linux/platform_device.h> #define MSCC_MIIM_REG_STATUS 0x0 #define MSCC_MIIM_STATUS_STAT_PENDING BIT(2) diff --git a/drivers/net/mdio/mdio-mux-bcm-iproc.c b/drivers/net/mdio/mdio-mux-bcm-iproc.c index 42fb5f166136..03261e6b9ceb 100644 --- a/drivers/net/mdio/mdio-mux-bcm-iproc.c +++ b/drivers/net/mdio/mdio-mux-bcm-iproc.c @@ -3,14 +3,14 @@ * Copyright 2016 Broadcom */ #include <linux/clk.h> -#include <linux/platform_device.h> +#include <linux/delay.h> #include <linux/device.h> -#include <linux/of_mdio.h> +#include <linux/iopoll.h> +#include <linux/mdio-mux.h> #include <linux/module.h> +#include <linux/of_mdio.h> #include <linux/phy.h> -#include <linux/mdio-mux.h> -#include <linux/delay.h> -#include <linux/iopoll.h> +#include <linux/platform_device.h> #define MDIO_RATE_ADJ_EXT_OFFSET 0x000 #define MDIO_RATE_ADJ_INT_OFFSET 0x004 @@ -197,10 +197,8 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev) res->end = res->start + MDIO_REG_ADDR_SPACE_SIZE - 1; } md->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(md->base)) { - dev_err(&pdev->dev, "failed to ioremap register\n"); + if (IS_ERR(md->base)) return PTR_ERR(md->base); - } md->mii_bus = devm_mdiobus_alloc(&pdev->dev); if (!md->mii_bus) { diff --git a/drivers/net/mdio/mdio-mux-bcm6368.c b/drivers/net/mdio/mdio-mux-bcm6368.c new file mode 100644 index 000000000000..6dcbf987d61b --- /dev/null +++ b/drivers/net/mdio/mdio-mux-bcm6368.c @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Broadcom BCM6368 mdiomux bus controller driver + * + * Copyright (C) 2021 Álvaro Fernández Rojas <noltari@gmail.com> + */ + +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/mdio-mux.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/of_mdio.h> +#include <linux/phy.h> +#include <linux/platform_device.h> +#include <linux/sched.h> + +#define MDIOC_REG 0x0 +#define MDIOC_EXT_MASK BIT(16) +#define MDIOC_REG_SHIFT 20 +#define MDIOC_PHYID_SHIFT 25 +#define MDIOC_RD_MASK BIT(30) +#define MDIOC_WR_MASK BIT(31) + +#define MDIOD_REG 0x4 + +struct bcm6368_mdiomux_desc { + void *mux_handle; + void __iomem *base; + struct device *dev; + struct mii_bus *mii_bus; + int ext_phy; +}; + +static int bcm6368_mdiomux_read(struct mii_bus *bus, int phy_id, int loc) +{ + struct bcm6368_mdiomux_desc *md = bus->priv; + uint32_t reg; + int ret; + + __raw_writel(0, md->base + MDIOC_REG); + + reg = MDIOC_RD_MASK | + (phy_id << MDIOC_PHYID_SHIFT) | + (loc << MDIOC_REG_SHIFT); + if (md->ext_phy) + reg |= MDIOC_EXT_MASK; + + __raw_writel(reg, md->base + MDIOC_REG); + udelay(50); + ret = __raw_readw(md->base + MDIOD_REG); + + return ret; +} + +static int bcm6368_mdiomux_write(struct mii_bus *bus, int phy_id, int loc, + uint16_t val) +{ + struct bcm6368_mdiomux_desc *md = bus->priv; + uint32_t reg; + + __raw_writel(0, md->base + MDIOC_REG); + + reg = MDIOC_WR_MASK | + (phy_id << MDIOC_PHYID_SHIFT) | + (loc << MDIOC_REG_SHIFT); + if (md->ext_phy) + reg |= MDIOC_EXT_MASK; + reg |= val; + + __raw_writel(reg, md->base + MDIOC_REG); + udelay(50); + + return 0; +} + +static int bcm6368_mdiomux_switch_fn(int current_child, int desired_child, + void *data) +{ + struct bcm6368_mdiomux_desc *md = data; + + md->ext_phy = desired_child; + + return 0; +} + +static int bcm6368_mdiomux_probe(struct platform_device *pdev) +{ + struct bcm6368_mdiomux_desc *md; + struct mii_bus *bus; + struct resource *res; + int rc; + + md = devm_kzalloc(&pdev->dev, sizeof(*md), GFP_KERNEL); + if (!md) + return -ENOMEM; + md->dev = &pdev->dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + /* + * Just ioremap, as this MDIO block is usually integrated into an + * Ethernet MAC controller register range + */ + md->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (!md->base) { + dev_err(&pdev->dev, "failed to ioremap register\n"); + return -ENOMEM; + } + + md->mii_bus = devm_mdiobus_alloc(&pdev->dev); + if (!md->mii_bus) { + dev_err(&pdev->dev, "mdiomux bus alloc failed\n"); + return ENOMEM; + } + + bus = md->mii_bus; + bus->priv = md; + bus->name = "BCM6368 MDIO mux bus"; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id); + bus->parent = &pdev->dev; + bus->read = bcm6368_mdiomux_read; + bus->write = bcm6368_mdiomux_write; + bus->phy_mask = 0x3f; + bus->dev.of_node = pdev->dev.of_node; + + rc = mdiobus_register(bus); + if (rc) { + dev_err(&pdev->dev, "mdiomux registration failed\n"); + return rc; + } + + platform_set_drvdata(pdev, md); + + rc = mdio_mux_init(md->dev, md->dev->of_node, + bcm6368_mdiomux_switch_fn, &md->mux_handle, md, + md->mii_bus); + if (rc) { + dev_info(md->dev, "mdiomux initialization failed\n"); + goto out_register; + } + + dev_info(&pdev->dev, "Broadcom BCM6368 MDIO mux bus\n"); + + return 0; + +out_register: + mdiobus_unregister(bus); + return rc; +} + +static int bcm6368_mdiomux_remove(struct platform_device *pdev) +{ + struct bcm6368_mdiomux_desc *md = platform_get_drvdata(pdev); + + mdio_mux_uninit(md->mux_handle); + mdiobus_unregister(md->mii_bus); + + return 0; +} + +static const struct of_device_id bcm6368_mdiomux_ids[] = { + { .compatible = "brcm,bcm6368-mdio-mux", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, bcm6368_mdiomux_ids); + +static struct platform_driver bcm6368_mdiomux_driver = { + .driver = { + .name = "bcm6368-mdio-mux", + .of_match_table = bcm6368_mdiomux_ids, + }, + .probe = bcm6368_mdiomux_probe, + .remove = bcm6368_mdiomux_remove, +}; +module_platform_driver(bcm6368_mdiomux_driver); + +MODULE_AUTHOR("Álvaro Fernández Rojas <noltari@gmail.com>"); +MODULE_DESCRIPTION("BCM6368 mdiomux bus controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/mdio/mdio-mux-gpio.c b/drivers/net/mdio/mdio-mux-gpio.c index 10a758fdc9e6..3c7f16f06b45 100644 --- a/drivers/net/mdio/mdio-mux-gpio.c +++ b/drivers/net/mdio/mdio-mux-gpio.c @@ -3,13 +3,13 @@ * Copyright (C) 2011, 2012 Cavium, Inc. */ -#include <linux/platform_device.h> #include <linux/device.h> -#include <linux/of_mdio.h> +#include <linux/gpio/consumer.h> +#include <linux/mdio-mux.h> #include <linux/module.h> +#include <linux/of_mdio.h> #include <linux/phy.h> -#include <linux/mdio-mux.h> -#include <linux/gpio/consumer.h> +#include <linux/platform_device.h> #define DRV_VERSION "1.1" #define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver" diff --git a/drivers/net/mdio/mdio-mux-mmioreg.c b/drivers/net/mdio/mdio-mux-mmioreg.c index d1a8780e24d8..c02fb2a067ee 100644 --- a/drivers/net/mdio/mdio-mux-mmioreg.c +++ b/drivers/net/mdio/mdio-mux-mmioreg.c @@ -7,13 +7,13 @@ * Copyright 2012 Freescale Semiconductor, Inc. */ -#include <linux/platform_device.h> #include <linux/device.h> +#include <linux/mdio-mux.h> +#include <linux/module.h> #include <linux/of_address.h> #include <linux/of_mdio.h> -#include <linux/module.h> #include <linux/phy.h> -#include <linux/mdio-mux.h> +#include <linux/platform_device.h> struct mdio_mux_mmioreg_state { void *mux_handle; diff --git a/drivers/net/mdio/mdio-mux-multiplexer.c b/drivers/net/mdio/mdio-mux-multiplexer.c index d6564381aa3e..527acfc3c045 100644 --- a/drivers/net/mdio/mdio-mux-multiplexer.c +++ b/drivers/net/mdio/mdio-mux-multiplexer.c @@ -4,10 +4,10 @@ * Copyright 2019 NXP */ -#include <linux/platform_device.h> #include <linux/mdio-mux.h> #include <linux/module.h> #include <linux/mux/consumer.h> +#include <linux/platform_device.h> struct mdio_mux_multiplexer_state { struct mux_control *muxc; diff --git a/drivers/net/mdio/mdio-mux.c b/drivers/net/mdio/mdio-mux.c index 6a1d3540210b..110e4ee85785 100644 --- a/drivers/net/mdio/mdio-mux.c +++ b/drivers/net/mdio/mdio-mux.c @@ -3,12 +3,12 @@ * Copyright (C) 2011, 2012 Cavium, Inc. */ -#include <linux/platform_device.h> -#include <linux/mdio-mux.h> -#include <linux/of_mdio.h> #include <linux/device.h> +#include <linux/mdio-mux.h> #include <linux/module.h> +#include <linux/of_mdio.h> #include <linux/phy.h> +#include <linux/platform_device.h> #define DRV_DESCRIPTION "MDIO bus multiplexer driver" diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c index d1e1009d51af..8ce99c4888e1 100644 --- a/drivers/net/mdio/mdio-octeon.c +++ b/drivers/net/mdio/mdio-octeon.c @@ -3,13 +3,13 @@ * Copyright (C) 2009-2015 Cavium, Inc. */ -#include <linux/platform_device.h> +#include <linux/gfp.h> +#include <linux/io.h> +#include <linux/module.h> #include <linux/of_address.h> #include <linux/of_mdio.h> -#include <linux/module.h> -#include <linux/gfp.h> #include <linux/phy.h> -#include <linux/io.h> +#include <linux/platform_device.h> #include "mdio-cavium.h" diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c index 3d7eda99d34e..cb1761693b69 100644 --- a/drivers/net/mdio/mdio-thunder.c +++ b/drivers/net/mdio/mdio-thunder.c @@ -3,14 +3,14 @@ * Copyright (C) 2009-2016 Cavium, Inc. */ -#include <linux/of_address.h> -#include <linux/of_mdio.h> -#include <linux/module.h> +#include <linux/acpi.h> #include <linux/gfp.h> -#include <linux/phy.h> #include <linux/io.h> -#include <linux/acpi.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_mdio.h> #include <linux/pci.h> +#include <linux/phy.h> #include "mdio-cavium.h" diff --git a/drivers/net/mdio/mdio-xgene.c b/drivers/net/mdio/mdio-xgene.c index 461207cdf5d6..7ab4e26db08c 100644 --- a/drivers/net/mdio/mdio-xgene.c +++ b/drivers/net/mdio/mdio-xgene.c @@ -13,11 +13,11 @@ #include <linux/io.h> #include <linux/mdio/mdio-xgene.h> #include <linux/module.h> -#include <linux/of_platform.h> -#include <linux/of_net.h> #include <linux/of_mdio.h> -#include <linux/prefetch.h> +#include <linux/of_net.h> +#include <linux/of_platform.h> #include <linux/phy.h> +#include <linux/prefetch.h> #include <net/ip.h> static bool xgene_mdio_status; diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c index ea9d5855fb52..094494a68ddf 100644 --- a/drivers/net/mdio/of_mdio.c +++ b/drivers/net/mdio/of_mdio.c @@ -8,17 +8,17 @@ * out of the OpenFirmware device tree and using it to populate an mii_bus. */ -#include <linux/kernel.h> #include <linux/device.h> -#include <linux/netdevice.h> #include <linux/err.h> -#include <linux/phy.h> -#include <linux/phy_fixed.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_net.h> -#include <linux/module.h> +#include <linux/phy.h> +#include <linux/phy_fixed.h> #define DEFAULT_GPIO_RESET_DELAY 10 /* in microseconds */ diff --git a/drivers/net/mhi/mhi.h b/drivers/net/mhi/mhi.h index 12e7407d712a..1d0c499d27a3 100644 --- a/drivers/net/mhi/mhi.h +++ b/drivers/net/mhi/mhi.h @@ -29,6 +29,7 @@ struct mhi_net_dev { struct mhi_net_stats stats; u32 rx_queue_sz; int msg_enable; + unsigned int mru; }; struct mhi_net_proto { diff --git a/drivers/net/mhi/net.c b/drivers/net/mhi/net.c index f59960876083..0d8293a47a56 100644 --- a/drivers/net/mhi/net.c +++ b/drivers/net/mhi/net.c @@ -265,10 +265,12 @@ static void mhi_net_rx_refill_work(struct work_struct *work) rx_refill.work); struct net_device *ndev = mhi_netdev->ndev; struct mhi_device *mdev = mhi_netdev->mdev; - int size = READ_ONCE(ndev->mtu); struct sk_buff *skb; + unsigned int size; int err; + size = mhi_netdev->mru ? mhi_netdev->mru : READ_ONCE(ndev->mtu); + while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) { skb = netdev_alloc_skb(ndev, size); if (unlikely(!skb)) @@ -359,8 +361,7 @@ static void mhi_net_remove(struct mhi_device *mhi_dev) mhi_unprepare_from_transfer(mhi_netdev->mdev); - if (mhi_netdev->skbagg_head) - kfree_skb(mhi_netdev->skbagg_head); + kfree_skb(mhi_netdev->skbagg_head); free_netdev(mhi_netdev->ndev); } diff --git a/drivers/net/mhi/proto_mbim.c b/drivers/net/mhi/proto_mbim.c index 75b5484c40d5..fc72b3f6ec9e 100644 --- a/drivers/net/mhi/proto_mbim.c +++ b/drivers/net/mhi/proto_mbim.c @@ -26,6 +26,15 @@ #define MBIM_NDP16_SIGN_MASK 0x00ffffff +/* Usual WWAN MTU */ +#define MHI_MBIM_DEFAULT_MTU 1500 + +/* 3500 allows to optimize skb allocation, the skbs will basically fit in + * one 4K page. Large MBIM packets will simply be split over several MHI + * transfers and chained by the MHI net layer (zerocopy). + */ +#define MHI_MBIM_DEFAULT_MRU 3500 + struct mbim_context { u16 rx_seq; u16 tx_seq; @@ -91,20 +100,11 @@ static int mbim_rx_verify_nth16(struct sk_buff *skb) return le16_to_cpu(nth16->wNdpIndex); } -static int mbim_rx_verify_ndp16(struct sk_buff *skb, int ndpoffset) +static int mbim_rx_verify_ndp16(struct sk_buff *skb, struct usb_cdc_ncm_ndp16 *ndp16) { struct mhi_net_dev *dev = netdev_priv(skb->dev); - struct usb_cdc_ncm_ndp16 *ndp16; int ret; - if (ndpoffset + sizeof(struct usb_cdc_ncm_ndp16) > skb->len) { - netif_dbg(dev, rx_err, dev->ndev, "invalid NDP offset <%u>\n", - ndpoffset); - return -EINVAL; - } - - ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset); - if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) { netif_dbg(dev, rx_err, dev->ndev, "invalid DPT16 length <%u>\n", le16_to_cpu(ndp16->wLength)); @@ -130,9 +130,6 @@ static void mbim_rx(struct mhi_net_dev *mhi_netdev, struct sk_buff *skb) struct net_device *ndev = mhi_netdev->ndev; int ndpoffset; - if (skb_linearize(skb)) - goto error; - /* Check NTB header and retrieve first NDP offset */ ndpoffset = mbim_rx_verify_nth16(skb); if (ndpoffset < 0) { @@ -142,12 +139,19 @@ static void mbim_rx(struct mhi_net_dev *mhi_netdev, struct sk_buff *skb) /* Process each NDP */ while (1) { - struct usb_cdc_ncm_ndp16 *ndp16; - struct usb_cdc_ncm_dpe16 *dpe16; - int nframes, n; + struct usb_cdc_ncm_ndp16 ndp16; + struct usb_cdc_ncm_dpe16 dpe16; + int nframes, n, dpeoffset; + + if (skb_copy_bits(skb, ndpoffset, &ndp16, sizeof(ndp16))) { + net_err_ratelimited("%s: Incorrect NDP offset (%u)\n", + ndev->name, ndpoffset); + __mbim_length_errors_inc(mhi_netdev); + goto error; + } /* Check NDP header and retrieve number of datagrams */ - nframes = mbim_rx_verify_ndp16(skb, ndpoffset); + nframes = mbim_rx_verify_ndp16(skb, &ndp16); if (nframes < 0) { net_err_ratelimited("%s: Incorrect NDP16\n", ndev->name); __mbim_length_errors_inc(mhi_netdev); @@ -155,8 +159,7 @@ static void mbim_rx(struct mhi_net_dev *mhi_netdev, struct sk_buff *skb) } /* Only IP data type supported, no DSS in MHI context */ - ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset); - if ((ndp16->dwSignature & cpu_to_le32(MBIM_NDP16_SIGN_MASK)) + if ((ndp16.dwSignature & cpu_to_le32(MBIM_NDP16_SIGN_MASK)) != cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN)) { net_err_ratelimited("%s: Unsupported NDP type\n", ndev->name); __mbim_errors_inc(mhi_netdev); @@ -164,19 +167,24 @@ static void mbim_rx(struct mhi_net_dev *mhi_netdev, struct sk_buff *skb) } /* Only primary IP session 0 (0x00) supported for now */ - if (ndp16->dwSignature & ~cpu_to_le32(MBIM_NDP16_SIGN_MASK)) { + if (ndp16.dwSignature & ~cpu_to_le32(MBIM_NDP16_SIGN_MASK)) { net_err_ratelimited("%s: bad packet session\n", ndev->name); __mbim_errors_inc(mhi_netdev); goto next_ndp; } /* de-aggregate and deliver IP packets */ - dpe16 = ndp16->dpe16; - for (n = 0; n < nframes; n++, dpe16++) { - u16 dgram_offset = le16_to_cpu(dpe16->wDatagramIndex); - u16 dgram_len = le16_to_cpu(dpe16->wDatagramLength); + dpeoffset = ndpoffset + sizeof(struct usb_cdc_ncm_ndp16); + for (n = 0; n < nframes; n++, dpeoffset += sizeof(dpe16)) { + u16 dgram_offset, dgram_len; struct sk_buff *skbn; + if (skb_copy_bits(skb, dpeoffset, &dpe16, sizeof(dpe16))) + break; + + dgram_offset = le16_to_cpu(dpe16.wDatagramIndex); + dgram_len = le16_to_cpu(dpe16.wDatagramLength); + if (!dgram_offset || !dgram_len) break; /* null terminator */ @@ -185,7 +193,7 @@ static void mbim_rx(struct mhi_net_dev *mhi_netdev, struct sk_buff *skb) continue; skb_put(skbn, dgram_len); - memcpy(skbn->data, skb->data + dgram_offset, dgram_len); + skb_copy_bits(skb, dgram_offset, skbn->data, dgram_len); switch (skbn->data[0] & 0xf0) { case 0x40: @@ -206,7 +214,7 @@ static void mbim_rx(struct mhi_net_dev *mhi_netdev, struct sk_buff *skb) } next_ndp: /* Other NDP to process? */ - ndpoffset = (int)le16_to_cpu(ndp16->wNextNdpIndex); + ndpoffset = (int)le16_to_cpu(ndp16.wNextNdpIndex); if (!ndpoffset) break; } @@ -282,6 +290,8 @@ static int mbim_init(struct mhi_net_dev *mhi_netdev) return -ENOMEM; ndev->needed_headroom = sizeof(struct mbim_tx_hdr); + ndev->mtu = MHI_MBIM_DEFAULT_MTU; + mhi_netdev->mru = MHI_MBIM_DEFAULT_MRU; return 0; } diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile index ade086eed955..a1cbfa44a1e1 100644 --- a/drivers/net/netdevsim/Makefile +++ b/drivers/net/netdevsim/Makefile @@ -13,3 +13,7 @@ endif ifneq ($(CONFIG_XFRM_OFFLOAD),) netdevsim-objs += ipsec.o endif + +ifneq ($(CONFIG_PSAMPLE),) +netdevsim-objs += psample.o +endif diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index dbeb29fa16e8..6189a4c0d39e 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -1032,10 +1032,14 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev, if (err) goto err_fib_destroy; - err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count); + err = nsim_dev_psample_init(nsim_dev); if (err) goto err_health_exit; + err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count); + if (err) + goto err_psample_exit; + nsim_dev->take_snapshot = debugfs_create_file("take_snapshot", 0200, nsim_dev->ddir, @@ -1043,6 +1047,8 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev, &nsim_dev_take_snapshot_fops); return 0; +err_psample_exit: + nsim_dev_psample_exit(nsim_dev); err_health_exit: nsim_dev_health_exit(nsim_dev); err_fib_destroy: @@ -1118,14 +1124,20 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev) if (err) goto err_health_exit; - err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count); + err = nsim_dev_psample_init(nsim_dev); if (err) goto err_bpf_dev_exit; + err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count); + if (err) + goto err_psample_exit; + devlink_params_publish(devlink); devlink_reload_enable(devlink); return 0; +err_psample_exit: + nsim_dev_psample_exit(nsim_dev); err_bpf_dev_exit: nsim_bpf_dev_exit(nsim_dev); err_health_exit: @@ -1158,6 +1170,7 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev) return; debugfs_remove(nsim_dev->take_snapshot); nsim_dev_port_del_all(nsim_dev); + nsim_dev_psample_exit(nsim_dev); nsim_dev_health_exit(nsim_dev); nsim_fib_destroy(devlink, nsim_dev->fib_data); nsim_dev_traps_exit(devlink); diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c index 166f0d6cbcf7..c9ae52595a8f 100644 --- a/drivers/net/netdevsim/ethtool.c +++ b/drivers/net/netdevsim/ethtool.c @@ -77,6 +77,34 @@ static int nsim_set_ringparam(struct net_device *dev, return 0; } +static int +nsim_get_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam) +{ + struct netdevsim *ns = netdev_priv(dev); + + if (ns->ethtool.get_err) + return -ns->ethtool.get_err; + memcpy(fecparam, &ns->ethtool.fec, sizeof(ns->ethtool.fec)); + return 0; +} + +static int +nsim_set_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam) +{ + struct netdevsim *ns = netdev_priv(dev); + u32 fec; + + if (ns->ethtool.set_err) + return -ns->ethtool.set_err; + memcpy(&ns->ethtool.fec, fecparam, sizeof(ns->ethtool.fec)); + fec = fecparam->fec; + if (fec == ETHTOOL_FEC_AUTO) + fec |= ETHTOOL_FEC_OFF; + fec |= ETHTOOL_FEC_NONE; + ns->ethtool.fec.active_fec = 1 << (fls(fec) - 1); + return 0; +} + static const struct ethtool_ops nsim_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_ALL_PARAMS, .get_pause_stats = nsim_get_pause_stats, @@ -86,6 +114,8 @@ static const struct ethtool_ops nsim_ethtool_ops = { .get_coalesce = nsim_get_coalesce, .get_ringparam = nsim_get_ringparam, .set_ringparam = nsim_set_ringparam, + .get_fecparam = nsim_get_fecparam, + .set_fecparam = nsim_set_fecparam, }; static void nsim_ethtool_ring_init(struct netdevsim *ns) @@ -104,8 +134,14 @@ void nsim_ethtool_init(struct netdevsim *ns) nsim_ethtool_ring_init(ns); + ns->ethtool.fec.fec = ETHTOOL_FEC_NONE; + ns->ethtool.fec.active_fec = ETHTOOL_FEC_NONE; + ethtool = debugfs_create_dir("ethtool", ns->nsim_dev_port->ddir); + debugfs_create_u32("get_err", 0600, ethtool, &ns->ethtool.get_err); + debugfs_create_u32("set_err", 0600, ethtool, &ns->ethtool.set_err); + dir = debugfs_create_dir("pause", ethtool); debugfs_create_bool("report_stats_rx", 0600, dir, &ns->ethtool.pauseparam.report_stats_rx); diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index 46fb414f7ca6..213d3e5056c8 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -14,6 +14,7 @@ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. */ +#include <linux/bitmap.h> #include <linux/in6.h> #include <linux/kernel.h> #include <linux/list.h> @@ -47,15 +48,18 @@ struct nsim_fib_data { struct nsim_fib_entry nexthops; struct rhashtable fib_rt_ht; struct list_head fib_rt_list; - struct mutex fib_lock; /* Protects hashtable and list */ + struct mutex fib_lock; /* Protects FIB HT and list */ struct notifier_block nexthop_nb; struct rhashtable nexthop_ht; struct devlink *devlink; struct work_struct fib_event_work; struct list_head fib_event_queue; spinlock_t fib_event_queue_lock; /* Protects fib event queue list */ + struct mutex nh_lock; /* Protects NH HT */ struct dentry *ddir; bool fail_route_offload; + bool fail_res_nexthop_group_replace; + bool fail_nexthop_bucket_replace; }; struct nsim_fib_rt_key { @@ -116,6 +120,7 @@ struct nsim_nexthop { struct rhash_head ht_node; u64 occ; u32 id; + bool is_resilient; }; static const struct rhashtable_params nsim_nexthop_ht_params = { @@ -561,7 +566,7 @@ nsim_fib6_rt_create(struct nsim_fib_data *data, err_fib6_rt_nh_del: for (i--; i >= 0; i--) { nsim_fib6_rt_nh_del(fib6_rt, rt_arr[i]); - }; + } nsim_fib_rt_fini(&fib6_rt->common); kfree(fib6_rt); return ERR_PTR(err); @@ -869,10 +874,8 @@ err_rt_offload_failed_flag_set: return err; } -static int nsim_fib_event(struct nsim_fib_event *fib_event) +static void nsim_fib_event(struct nsim_fib_event *fib_event) { - int err = 0; - switch (fib_event->family) { case AF_INET: nsim_fib4_event(fib_event->data, &fib_event->fen_info, @@ -885,8 +888,6 @@ static int nsim_fib_event(struct nsim_fib_event *fib_event) nsim_fib6_event_fini(&fib_event->fib6_event); break; } - - return err; } static int nsim_fib4_prepare_event(struct fib_notifier_info *info, @@ -1118,6 +1119,10 @@ static struct nsim_nexthop *nsim_nexthop_create(struct nsim_fib_data *data, for (i = 0; i < info->nh_grp->num_nh; i++) occ += info->nh_grp->nh_entries[i].weight; break; + case NH_NOTIFIER_INFO_TYPE_RES_TABLE: + occ = info->nh_res_table->num_nh_buckets; + nexthop->is_resilient = true; + break; default: NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type"); kfree(nexthop); @@ -1160,6 +1165,21 @@ err_num_decrease: } +static void nsim_nexthop_hw_flags_set(struct net *net, + const struct nsim_nexthop *nexthop, + bool trap) +{ + int i; + + nexthop_set_hw_flags(net, nexthop->id, false, trap); + + if (!nexthop->is_resilient) + return; + + for (i = 0; i < nexthop->occ; i++) + nexthop_bucket_set_hw_flags(net, nexthop->id, i, false, trap); +} + static int nsim_nexthop_add(struct nsim_fib_data *data, struct nsim_nexthop *nexthop, struct netlink_ext_ack *extack) @@ -1178,7 +1198,7 @@ static int nsim_nexthop_add(struct nsim_fib_data *data, goto err_nexthop_dismiss; } - nexthop_set_hw_flags(net, nexthop->id, false, true); + nsim_nexthop_hw_flags_set(net, nexthop, true); return 0; @@ -1207,7 +1227,7 @@ static int nsim_nexthop_replace(struct nsim_fib_data *data, goto err_nexthop_dismiss; } - nexthop_set_hw_flags(net, nexthop->id, false, true); + nsim_nexthop_hw_flags_set(net, nexthop, true); nsim_nexthop_account(data, nexthop_old->occ, false, extack); nsim_nexthop_destroy(nexthop_old); @@ -1258,6 +1278,32 @@ static void nsim_nexthop_remove(struct nsim_fib_data *data, nsim_nexthop_destroy(nexthop); } +static int nsim_nexthop_res_table_pre_replace(struct nsim_fib_data *data, + struct nh_notifier_info *info) +{ + if (data->fail_res_nexthop_group_replace) { + NL_SET_ERR_MSG_MOD(info->extack, "Failed to replace a resilient nexthop group"); + return -EINVAL; + } + + return 0; +} + +static int nsim_nexthop_bucket_replace(struct nsim_fib_data *data, + struct nh_notifier_info *info) +{ + if (data->fail_nexthop_bucket_replace) { + NL_SET_ERR_MSG_MOD(info->extack, "Failed to replace nexthop bucket"); + return -EINVAL; + } + + nexthop_bucket_set_hw_flags(info->net, info->id, + info->nh_res_bucket->bucket_index, + false, true); + + return 0; +} + static int nsim_nexthop_event_nb(struct notifier_block *nb, unsigned long event, void *ptr) { @@ -1266,8 +1312,7 @@ static int nsim_nexthop_event_nb(struct notifier_block *nb, unsigned long event, struct nh_notifier_info *info = ptr; int err = 0; - ASSERT_RTNL(); - + mutex_lock(&data->nh_lock); switch (event) { case NEXTHOP_EVENT_REPLACE: err = nsim_nexthop_insert(data, info); @@ -1275,10 +1320,17 @@ static int nsim_nexthop_event_nb(struct notifier_block *nb, unsigned long event, case NEXTHOP_EVENT_DEL: nsim_nexthop_remove(data, info); break; + case NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE: + err = nsim_nexthop_res_table_pre_replace(data, info); + break; + case NEXTHOP_EVENT_BUCKET_REPLACE: + err = nsim_nexthop_bucket_replace(data, info); + break; default: break; } + mutex_unlock(&data->nh_lock); return notifier_from_errno(err); } @@ -1289,11 +1341,68 @@ static void nsim_nexthop_free(void *ptr, void *arg) struct net *net; net = devlink_net(data->devlink); - nexthop_set_hw_flags(net, nexthop->id, false, false); + nsim_nexthop_hw_flags_set(net, nexthop, false); nsim_nexthop_account(data, nexthop->occ, false, NULL); nsim_nexthop_destroy(nexthop); } +static ssize_t nsim_nexthop_bucket_activity_write(struct file *file, + const char __user *user_buf, + size_t size, loff_t *ppos) +{ + struct nsim_fib_data *data = file->private_data; + struct net *net = devlink_net(data->devlink); + struct nsim_nexthop *nexthop; + unsigned long *activity; + loff_t pos = *ppos; + u16 bucket_index; + char buf[128]; + int err = 0; + u32 nhid; + + if (pos != 0) + return -EINVAL; + if (size > sizeof(buf)) + return -EINVAL; + if (copy_from_user(buf, user_buf, size)) + return -EFAULT; + if (sscanf(buf, "%u %hu", &nhid, &bucket_index) != 2) + return -EINVAL; + + rtnl_lock(); + + nexthop = rhashtable_lookup_fast(&data->nexthop_ht, &nhid, + nsim_nexthop_ht_params); + if (!nexthop || !nexthop->is_resilient || + bucket_index >= nexthop->occ) { + err = -EINVAL; + goto out; + } + + activity = bitmap_zalloc(nexthop->occ, GFP_KERNEL); + if (!activity) { + err = -ENOMEM; + goto out; + } + + bitmap_set(activity, bucket_index, 1); + nexthop_res_grp_activity_update(net, nhid, nexthop->occ, activity); + bitmap_free(activity); + +out: + rtnl_unlock(); + + *ppos = size; + return err ?: size; +} + +static const struct file_operations nsim_nexthop_bucket_activity_fops = { + .open = simple_open, + .write = nsim_nexthop_bucket_activity_write, + .llseek = no_llseek, + .owner = THIS_MODULE, +}; + static u64 nsim_fib_ipv4_resource_occ_get(void *priv) { struct nsim_fib_data *data = priv; @@ -1383,6 +1492,17 @@ nsim_fib_debugfs_init(struct nsim_fib_data *data, struct nsim_dev *nsim_dev) data->fail_route_offload = false; debugfs_create_bool("fail_route_offload", 0600, data->ddir, &data->fail_route_offload); + + data->fail_res_nexthop_group_replace = false; + debugfs_create_bool("fail_res_nexthop_group_replace", 0600, data->ddir, + &data->fail_res_nexthop_group_replace); + + data->fail_nexthop_bucket_replace = false; + debugfs_create_bool("fail_nexthop_bucket_replace", 0600, data->ddir, + &data->fail_nexthop_bucket_replace); + + debugfs_create_file("nexthop_bucket_activity", 0200, data->ddir, + data, &nsim_nexthop_bucket_activity_fops); return 0; } @@ -1408,6 +1528,7 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink, if (err) goto err_data_free; + mutex_init(&data->nh_lock); err = rhashtable_init(&data->nexthop_ht, &nsim_nexthop_ht_params); if (err) goto err_debugfs_exit; @@ -1473,6 +1594,7 @@ err_rhashtable_nexthop_destroy: data); mutex_destroy(&data->fib_lock); err_debugfs_exit: + mutex_destroy(&data->nh_lock); nsim_fib_debugfs_exit(data); err_data_free: kfree(data); @@ -1501,6 +1623,7 @@ void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data) WARN_ON_ONCE(!list_empty(&data->fib_event_queue)); WARN_ON_ONCE(!list_empty(&data->fib_rt_list)); mutex_destroy(&data->fib_lock); + mutex_destroy(&data->nh_lock); nsim_fib_debugfs_exit(data); kfree(data); } diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c index 21e2974660e7..04aebdf85747 100644 --- a/drivers/net/netdevsim/health.c +++ b/drivers/net/netdevsim/health.c @@ -235,15 +235,10 @@ static ssize_t nsim_dev_health_break_write(struct file *file, char *break_msg; int err; - break_msg = kmalloc(count + 1, GFP_KERNEL); - if (!break_msg) - return -ENOMEM; + break_msg = memdup_user_nul(data, count); + if (IS_ERR(break_msg)) + return PTR_ERR(break_msg); - if (copy_from_user(break_msg, data, count)) { - err = -EFAULT; - goto out; - } - break_msg[count] = '\0'; if (break_msg[count - 1] == '\n') break_msg[count - 1] = '\0'; diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 48163c5f2ec9..7ff24e03577b 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -60,9 +60,12 @@ struct nsim_ethtool_pauseparam { }; struct nsim_ethtool { + u32 get_err; + u32 set_err; struct nsim_ethtool_pauseparam pauseparam; struct ethtool_coalesce coalesce; struct ethtool_ringparam ring; + struct ethtool_fecparam fec; }; struct netdevsim { @@ -180,6 +183,20 @@ struct nsim_dev_health { int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink); void nsim_dev_health_exit(struct nsim_dev *nsim_dev); +#if IS_ENABLED(CONFIG_PSAMPLE) +int nsim_dev_psample_init(struct nsim_dev *nsim_dev); +void nsim_dev_psample_exit(struct nsim_dev *nsim_dev); +#else +static inline int nsim_dev_psample_init(struct nsim_dev *nsim_dev) +{ + return 0; +} + +static inline void nsim_dev_psample_exit(struct nsim_dev *nsim_dev) +{ +} +#endif + struct nsim_dev_port { struct list_head list; struct devlink_port devlink_port; @@ -229,6 +246,7 @@ struct nsim_dev { bool static_iana_vxlan; u32 sleep; } udp_ports; + struct nsim_dev_psample *psample; }; static inline struct net *nsim_dev_net(struct nsim_dev *nsim_dev) diff --git a/drivers/net/netdevsim/psample.c b/drivers/net/netdevsim/psample.c new file mode 100644 index 000000000000..f0c6477dd0ae --- /dev/null +++ b/drivers/net/netdevsim/psample.c @@ -0,0 +1,265 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Mellanox Technologies. All rights reserved */ + +#include <linux/debugfs.h> +#include <linux/err.h> +#include <linux/etherdevice.h> +#include <linux/inet.h> +#include <linux/kernel.h> +#include <linux/random.h> +#include <linux/slab.h> +#include <net/devlink.h> +#include <net/ip.h> +#include <net/psample.h> +#include <uapi/linux/ip.h> +#include <uapi/linux/udp.h> + +#include "netdevsim.h" + +#define NSIM_PSAMPLE_REPORT_INTERVAL_MS 100 +#define NSIM_PSAMPLE_INVALID_TC 0xFFFF +#define NSIM_PSAMPLE_L4_DATA_LEN 100 + +struct nsim_dev_psample { + struct delayed_work psample_dw; + struct dentry *ddir; + struct psample_group *group; + u32 rate; + u32 group_num; + u32 trunc_size; + int in_ifindex; + int out_ifindex; + u16 out_tc; + u64 out_tc_occ_max; + u64 latency_max; + bool is_active; +}; + +static struct sk_buff *nsim_dev_psample_skb_build(void) +{ + int tot_len, data_len = NSIM_PSAMPLE_L4_DATA_LEN; + struct sk_buff *skb; + struct udphdr *udph; + struct ethhdr *eth; + struct iphdr *iph; + + skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); + if (!skb) + return NULL; + tot_len = sizeof(struct iphdr) + sizeof(struct udphdr) + data_len; + + skb_reset_mac_header(skb); + eth = skb_put(skb, sizeof(struct ethhdr)); + eth_random_addr(eth->h_dest); + eth_random_addr(eth->h_source); + eth->h_proto = htons(ETH_P_IP); + skb->protocol = htons(ETH_P_IP); + + skb_set_network_header(skb, skb->len); + iph = skb_put(skb, sizeof(struct iphdr)); + iph->protocol = IPPROTO_UDP; + iph->saddr = in_aton("192.0.2.1"); + iph->daddr = in_aton("198.51.100.1"); + iph->version = 0x4; + iph->frag_off = 0; + iph->ihl = 0x5; + iph->tot_len = htons(tot_len); + iph->id = 0; + iph->ttl = 100; + iph->check = 0; + iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); + + skb_set_transport_header(skb, skb->len); + udph = skb_put_zero(skb, sizeof(struct udphdr) + data_len); + get_random_bytes(&udph->source, sizeof(u16)); + get_random_bytes(&udph->dest, sizeof(u16)); + udph->len = htons(sizeof(struct udphdr) + data_len); + + return skb; +} + +static void nsim_dev_psample_md_prepare(const struct nsim_dev_psample *psample, + struct psample_metadata *md, + unsigned int len) +{ + md->trunc_size = psample->trunc_size ? psample->trunc_size : len; + md->in_ifindex = psample->in_ifindex; + md->out_ifindex = psample->out_ifindex; + + if (psample->out_tc != NSIM_PSAMPLE_INVALID_TC) { + md->out_tc = psample->out_tc; + md->out_tc_valid = 1; + } + + if (psample->out_tc_occ_max) { + u64 out_tc_occ; + + get_random_bytes(&out_tc_occ, sizeof(u64)); + md->out_tc_occ = out_tc_occ & (psample->out_tc_occ_max - 1); + md->out_tc_occ_valid = 1; + } + + if (psample->latency_max) { + u64 latency; + + get_random_bytes(&latency, sizeof(u64)); + md->latency = latency & (psample->latency_max - 1); + md->latency_valid = 1; + } +} + +static void nsim_dev_psample_report_work(struct work_struct *work) +{ + struct nsim_dev_psample *psample; + struct psample_metadata md = {}; + struct sk_buff *skb; + unsigned long delay; + + psample = container_of(work, struct nsim_dev_psample, psample_dw.work); + + skb = nsim_dev_psample_skb_build(); + if (!skb) + goto out; + + nsim_dev_psample_md_prepare(psample, &md, skb->len); + psample_sample_packet(psample->group, skb, psample->rate, &md); + consume_skb(skb); + +out: + delay = msecs_to_jiffies(NSIM_PSAMPLE_REPORT_INTERVAL_MS); + schedule_delayed_work(&psample->psample_dw, delay); +} + +static int nsim_dev_psample_enable(struct nsim_dev *nsim_dev) +{ + struct nsim_dev_psample *psample = nsim_dev->psample; + struct devlink *devlink; + unsigned long delay; + + if (psample->is_active) + return -EBUSY; + + devlink = priv_to_devlink(nsim_dev); + psample->group = psample_group_get(devlink_net(devlink), + psample->group_num); + if (!psample->group) + return -EINVAL; + + delay = msecs_to_jiffies(NSIM_PSAMPLE_REPORT_INTERVAL_MS); + schedule_delayed_work(&psample->psample_dw, delay); + + psample->is_active = true; + + return 0; +} + +static int nsim_dev_psample_disable(struct nsim_dev *nsim_dev) +{ + struct nsim_dev_psample *psample = nsim_dev->psample; + + if (!psample->is_active) + return -EINVAL; + + psample->is_active = false; + + cancel_delayed_work_sync(&psample->psample_dw); + psample_group_put(psample->group); + + return 0; +} + +static ssize_t nsim_dev_psample_enable_write(struct file *file, + const char __user *data, + size_t count, loff_t *ppos) +{ + struct nsim_dev *nsim_dev = file->private_data; + bool enable; + int err; + + err = kstrtobool_from_user(data, count, &enable); + if (err) + return err; + + if (enable) + err = nsim_dev_psample_enable(nsim_dev); + else + err = nsim_dev_psample_disable(nsim_dev); + + return err ? err : count; +} + +static const struct file_operations nsim_psample_enable_fops = { + .open = simple_open, + .write = nsim_dev_psample_enable_write, + .llseek = generic_file_llseek, + .owner = THIS_MODULE, +}; + +int nsim_dev_psample_init(struct nsim_dev *nsim_dev) +{ + struct nsim_dev_psample *psample; + int err; + + psample = kzalloc(sizeof(*psample), GFP_KERNEL); + if (!psample) + return -ENOMEM; + nsim_dev->psample = psample; + + INIT_DELAYED_WORK(&psample->psample_dw, nsim_dev_psample_report_work); + + psample->ddir = debugfs_create_dir("psample", nsim_dev->ddir); + if (IS_ERR(psample->ddir)) { + err = PTR_ERR(psample->ddir); + goto err_psample_free; + } + + /* Populate sampling parameters with sane defaults. */ + psample->rate = 100; + debugfs_create_u32("rate", 0600, psample->ddir, &psample->rate); + + psample->group_num = 10; + debugfs_create_u32("group_num", 0600, psample->ddir, + &psample->group_num); + + psample->trunc_size = 0; + debugfs_create_u32("trunc_size", 0600, psample->ddir, + &psample->trunc_size); + + psample->in_ifindex = 1; + debugfs_create_u32("in_ifindex", 0600, psample->ddir, + &psample->in_ifindex); + + psample->out_ifindex = 2; + debugfs_create_u32("out_ifindex", 0600, psample->ddir, + &psample->out_ifindex); + + psample->out_tc = 0; + debugfs_create_u16("out_tc", 0600, psample->ddir, &psample->out_tc); + + psample->out_tc_occ_max = 10000; + debugfs_create_u64("out_tc_occ_max", 0600, psample->ddir, + &psample->out_tc_occ_max); + + psample->latency_max = 50; + debugfs_create_u64("latency_max", 0600, psample->ddir, + &psample->latency_max); + + debugfs_create_file("enable", 0200, psample->ddir, nsim_dev, + &nsim_psample_enable_fops); + + return 0; + +err_psample_free: + kfree(nsim_dev->psample); + return err; +} + +void nsim_dev_psample_exit(struct nsim_dev *nsim_dev) +{ + debugfs_remove_recursive(nsim_dev->psample->ddir); + if (nsim_dev->psample->is_active) { + cancel_delayed_work_sync(&nsim_dev->psample->psample_dw); + psample_group_put(nsim_dev->psample->group); + } + kfree(nsim_dev->psample); +} diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c index 1aa9903d602e..944ba105cac1 100644 --- a/drivers/net/pcs/pcs-xpcs.c +++ b/drivers/net/pcs/pcs-xpcs.c @@ -15,6 +15,7 @@ #define SYNOPSYS_XPCS_USXGMII_ID 0x7996ced0 #define SYNOPSYS_XPCS_10GKR_ID 0x7996ced0 #define SYNOPSYS_XPCS_XLGMII_ID 0x7996ced0 +#define SYNOPSYS_XPCS_SGMII_ID 0x7996ced0 #define SYNOPSYS_XPCS_MASK 0xffffffff /* Vendor regs access */ @@ -57,6 +58,34 @@ #define DW_C73_2500KX BIT(0) #define DW_C73_5000KR BIT(1) +/* Clause 37 Defines */ +/* VR MII MMD registers offsets */ +#define DW_VR_MII_DIG_CTRL1 0x8000 +#define DW_VR_MII_AN_CTRL 0x8001 +#define DW_VR_MII_AN_INTR_STS 0x8002 + +/* VR_MII_DIG_CTRL1 */ +#define DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW BIT(9) + +/* VR_MII_AN_CTRL */ +#define DW_VR_MII_AN_CTRL_TX_CONFIG_SHIFT 3 +#define DW_VR_MII_TX_CONFIG_MASK BIT(3) +#define DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII 0x1 +#define DW_VR_MII_TX_CONFIG_MAC_SIDE_SGMII 0x0 +#define DW_VR_MII_AN_CTRL_PCS_MODE_SHIFT 1 +#define DW_VR_MII_PCS_MODE_MASK GENMASK(2, 1) +#define DW_VR_MII_PCS_MODE_C37_1000BASEX 0x0 +#define DW_VR_MII_PCS_MODE_C37_SGMII 0x2 + +/* VR_MII_AN_INTR_STS */ +#define DW_VR_MII_AN_STS_C37_ANSGM_FD BIT(1) +#define DW_VR_MII_AN_STS_C37_ANSGM_SP_SHIFT 2 +#define DW_VR_MII_AN_STS_C37_ANSGM_SP GENMASK(3, 2) +#define DW_VR_MII_C37_ANSGM_SP_10 0x0 +#define DW_VR_MII_C37_ANSGM_SP_100 0x1 +#define DW_VR_MII_C37_ANSGM_SP_1000 0x2 +#define DW_VR_MII_C37_ANSGM_SP_LNKSTS BIT(4) + static const int xpcs_usxgmii_features[] = { ETHTOOL_LINK_MODE_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT, @@ -105,6 +134,16 @@ static const int xpcs_xlgmii_features[] = { __ETHTOOL_LINK_MODE_MASK_NBITS, }; +static const int xpcs_sgmii_features[] = { + ETHTOOL_LINK_MODE_10baseT_Half_BIT, + ETHTOOL_LINK_MODE_10baseT_Full_BIT, + ETHTOOL_LINK_MODE_100baseT_Half_BIT, + ETHTOOL_LINK_MODE_100baseT_Full_BIT, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + __ETHTOOL_LINK_MODE_MASK_NBITS, +}; + static const phy_interface_t xpcs_usxgmii_interfaces[] = { PHY_INTERFACE_MODE_USXGMII, PHY_INTERFACE_MODE_MAX, @@ -120,27 +159,42 @@ static const phy_interface_t xpcs_xlgmii_interfaces[] = { PHY_INTERFACE_MODE_MAX, }; +static const phy_interface_t xpcs_sgmii_interfaces[] = { + PHY_INTERFACE_MODE_SGMII, + PHY_INTERFACE_MODE_MAX, +}; + static struct xpcs_id { u32 id; u32 mask; const int *supported; const phy_interface_t *interface; + int an_mode; } xpcs_id_list[] = { { .id = SYNOPSYS_XPCS_USXGMII_ID, .mask = SYNOPSYS_XPCS_MASK, .supported = xpcs_usxgmii_features, .interface = xpcs_usxgmii_interfaces, + .an_mode = DW_AN_C73, }, { .id = SYNOPSYS_XPCS_10GKR_ID, .mask = SYNOPSYS_XPCS_MASK, .supported = xpcs_10gkr_features, .interface = xpcs_10gkr_interfaces, + .an_mode = DW_AN_C73, }, { .id = SYNOPSYS_XPCS_XLGMII_ID, .mask = SYNOPSYS_XPCS_MASK, .supported = xpcs_xlgmii_features, .interface = xpcs_xlgmii_interfaces, + .an_mode = DW_AN_C73, + }, { + .id = SYNOPSYS_XPCS_SGMII_ID, + .mask = SYNOPSYS_XPCS_MASK, + .supported = xpcs_sgmii_features, + .interface = xpcs_sgmii_interfaces, + .an_mode = DW_AN_C37_SGMII, }, }; @@ -195,9 +249,20 @@ static int xpcs_poll_reset(struct mdio_xpcs_args *xpcs, int dev) return (ret & MDIO_CTRL1_RESET) ? -ETIMEDOUT : 0; } -static int xpcs_soft_reset(struct mdio_xpcs_args *xpcs, int dev) +static int xpcs_soft_reset(struct mdio_xpcs_args *xpcs) { - int ret; + int ret, dev; + + switch (xpcs->an_mode) { + case DW_AN_C73: + dev = MDIO_MMD_PCS; + break; + case DW_AN_C37_SGMII: + dev = MDIO_MMD_VEND2; + break; + default: + return -1; + } ret = xpcs_write(xpcs, dev, MDIO_CTRL1, MDIO_CTRL1_RESET); if (ret < 0) @@ -212,8 +277,8 @@ static int xpcs_soft_reset(struct mdio_xpcs_args *xpcs, int dev) dev_warn(&(__xpcs)->bus->dev, ##__args); \ }) -static int xpcs_read_fault(struct mdio_xpcs_args *xpcs, - struct phylink_link_state *state) +static int xpcs_read_fault_c73(struct mdio_xpcs_args *xpcs, + struct phylink_link_state *state) { int ret; @@ -263,7 +328,7 @@ static int xpcs_read_fault(struct mdio_xpcs_args *xpcs, return 0; } -static int xpcs_read_link(struct mdio_xpcs_args *xpcs, bool an) +static int xpcs_read_link_c73(struct mdio_xpcs_args *xpcs, bool an) { bool link = true; int ret; @@ -357,7 +422,7 @@ static int xpcs_config_usxgmii(struct mdio_xpcs_args *xpcs, int speed) return xpcs_write_vpcs(xpcs, MDIO_CTRL1, ret | DW_USXGMII_RST); } -static int xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs) +static int _xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs) { int ret, adv; @@ -401,11 +466,11 @@ static int xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs) return xpcs_write(xpcs, MDIO_MMD_AN, DW_SR_AN_ADV1, adv); } -static int xpcs_config_aneg(struct mdio_xpcs_args *xpcs) +static int xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs) { int ret; - ret = xpcs_config_aneg_c73(xpcs); + ret = _xpcs_config_aneg_c73(xpcs); if (ret < 0) return ret; @@ -418,8 +483,8 @@ static int xpcs_config_aneg(struct mdio_xpcs_args *xpcs) return xpcs_write(xpcs, MDIO_MMD_AN, MDIO_CTRL1, ret); } -static int xpcs_aneg_done(struct mdio_xpcs_args *xpcs, - struct phylink_link_state *state) +static int xpcs_aneg_done_c73(struct mdio_xpcs_args *xpcs, + struct phylink_link_state *state) { int ret; @@ -434,7 +499,7 @@ static int xpcs_aneg_done(struct mdio_xpcs_args *xpcs, /* Check if Aneg outcome is valid */ if (!(ret & DW_C73_AN_ADV_SF)) { - xpcs_config_aneg(xpcs); + xpcs_config_aneg_c73(xpcs); return 0; } @@ -444,8 +509,8 @@ static int xpcs_aneg_done(struct mdio_xpcs_args *xpcs, return 0; } -static int xpcs_read_lpa(struct mdio_xpcs_args *xpcs, - struct phylink_link_state *state) +static int xpcs_read_lpa_c73(struct mdio_xpcs_args *xpcs, + struct phylink_link_state *state) { int ret; @@ -493,8 +558,8 @@ static int xpcs_read_lpa(struct mdio_xpcs_args *xpcs, return 0; } -static void xpcs_resolve_lpa(struct mdio_xpcs_args *xpcs, - struct phylink_link_state *state) +static void xpcs_resolve_lpa_c73(struct mdio_xpcs_args *xpcs, + struct phylink_link_state *state) { int max_speed = xpcs_get_max_usxgmii_speed(state->lp_advertising); @@ -585,32 +650,84 @@ static int xpcs_validate(struct mdio_xpcs_args *xpcs, return 0; } +static int xpcs_config_aneg_c37_sgmii(struct mdio_xpcs_args *xpcs) +{ + int ret; + + /* For AN for C37 SGMII mode, the settings are :- + * 1) VR_MII_AN_CTRL Bit(2:1)[PCS_MODE] = 10b (SGMII AN) + * 2) VR_MII_AN_CTRL Bit(3) [TX_CONFIG] = 0b (MAC side SGMII) + * DW xPCS used with DW EQoS MAC is always MAC side SGMII. + * 3) VR_MII_DIG_CTRL1 Bit(9) [MAC_AUTO_SW] = 1b (Automatic + * speed/duplex mode change by HW after SGMII AN complete) + * + * Note: Since it is MAC side SGMII, there is no need to set + * SR_MII_AN_ADV. MAC side SGMII receives AN Tx Config from + * PHY about the link state change after C28 AN is completed + * between PHY and Link Partner. There is also no need to + * trigger AN restart for MAC-side SGMII. + */ + ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL); + if (ret < 0) + return ret; + + ret &= ~(DW_VR_MII_PCS_MODE_MASK | DW_VR_MII_TX_CONFIG_MASK); + ret |= (DW_VR_MII_PCS_MODE_C37_SGMII << + DW_VR_MII_AN_CTRL_PCS_MODE_SHIFT & + DW_VR_MII_PCS_MODE_MASK); + ret |= (DW_VR_MII_TX_CONFIG_MAC_SIDE_SGMII << + DW_VR_MII_AN_CTRL_TX_CONFIG_SHIFT & + DW_VR_MII_TX_CONFIG_MASK); + ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL, ret); + if (ret < 0) + return ret; + + ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1); + if (ret < 0) + return ret; + + ret |= DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW; + + return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret); +} + static int xpcs_config(struct mdio_xpcs_args *xpcs, const struct phylink_link_state *state) { int ret; - if (state->an_enabled) { - ret = xpcs_config_aneg(xpcs); + switch (xpcs->an_mode) { + case DW_AN_C73: + if (state->an_enabled) { + ret = xpcs_config_aneg_c73(xpcs); + if (ret) + return ret; + } + break; + case DW_AN_C37_SGMII: + ret = xpcs_config_aneg_c37_sgmii(xpcs); if (ret) return ret; + break; + default: + return -1; } return 0; } -static int xpcs_get_state(struct mdio_xpcs_args *xpcs, - struct phylink_link_state *state) +static int xpcs_get_state_c73(struct mdio_xpcs_args *xpcs, + struct phylink_link_state *state) { int ret; /* Link needs to be read first ... */ - state->link = xpcs_read_link(xpcs, state->an_enabled) > 0 ? 1 : 0; + state->link = xpcs_read_link_c73(xpcs, state->an_enabled) > 0 ? 1 : 0; /* ... and then we check the faults. */ - ret = xpcs_read_fault(xpcs, state); + ret = xpcs_read_fault_c73(xpcs, state); if (ret) { - ret = xpcs_soft_reset(xpcs, MDIO_MMD_PCS); + ret = xpcs_soft_reset(xpcs); if (ret) return ret; @@ -619,10 +736,10 @@ static int xpcs_get_state(struct mdio_xpcs_args *xpcs, return xpcs_config(xpcs, state); } - if (state->an_enabled && xpcs_aneg_done(xpcs, state)) { + if (state->an_enabled && xpcs_aneg_done_c73(xpcs, state)) { state->an_complete = true; - xpcs_read_lpa(xpcs, state); - xpcs_resolve_lpa(xpcs, state); + xpcs_read_lpa_c73(xpcs, state); + xpcs_resolve_lpa_c73(xpcs, state); } else if (state->an_enabled) { state->link = 0; } else if (state->link) { @@ -632,6 +749,70 @@ static int xpcs_get_state(struct mdio_xpcs_args *xpcs, return 0; } +static int xpcs_get_state_c37_sgmii(struct mdio_xpcs_args *xpcs, + struct phylink_link_state *state) +{ + int ret; + + /* Reset link_state */ + state->link = false; + state->speed = SPEED_UNKNOWN; + state->duplex = DUPLEX_UNKNOWN; + state->pause = 0; + + /* For C37 SGMII mode, we check DW_VR_MII_AN_INTR_STS for link + * status, speed and duplex. + */ + ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS); + if (ret < 0) + return false; + + if (ret & DW_VR_MII_C37_ANSGM_SP_LNKSTS) { + int speed_value; + + state->link = true; + + speed_value = (ret & DW_VR_MII_AN_STS_C37_ANSGM_SP) >> + DW_VR_MII_AN_STS_C37_ANSGM_SP_SHIFT; + if (speed_value == DW_VR_MII_C37_ANSGM_SP_1000) + state->speed = SPEED_1000; + else if (speed_value == DW_VR_MII_C37_ANSGM_SP_100) + state->speed = SPEED_100; + else + state->speed = SPEED_10; + + if (ret & DW_VR_MII_AN_STS_C37_ANSGM_FD) + state->duplex = DUPLEX_FULL; + else + state->duplex = DUPLEX_HALF; + } + + return 0; +} + +static int xpcs_get_state(struct mdio_xpcs_args *xpcs, + struct phylink_link_state *state) +{ + int ret; + + switch (xpcs->an_mode) { + case DW_AN_C73: + ret = xpcs_get_state_c73(xpcs, state); + if (ret) + return ret; + break; + case DW_AN_C37_SGMII: + ret = xpcs_get_state_c37_sgmii(xpcs, state); + if (ret) + return ret; + break; + default: + return -1; + } + + return 0; +} + static int xpcs_link_up(struct mdio_xpcs_args *xpcs, int speed, phy_interface_t interface) { @@ -646,6 +827,7 @@ static u32 xpcs_get_id(struct mdio_xpcs_args *xpcs) int ret; u32 id; + /* First, search C73 PCS using PCS MMD */ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MII_PHYSID1); if (ret < 0) return 0xffffffff; @@ -656,7 +838,26 @@ static u32 xpcs_get_id(struct mdio_xpcs_args *xpcs) if (ret < 0) return 0xffffffff; - return id | ret; + /* If Device IDs are not all zeros, we found C73 AN-type device */ + if (id | ret) + return id | ret; + + /* Next, search C37 PCS using Vendor-Specific MII MMD */ + ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_PHYSID1); + if (ret < 0) + return 0xffffffff; + + id = ret << 16; + + ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_PHYSID2); + if (ret < 0) + return 0xffffffff; + + /* If Device IDs are not all zeros, we found C37 AN-type device */ + if (id | ret) + return id | ret; + + return 0xffffffff; } static bool xpcs_check_features(struct mdio_xpcs_args *xpcs, @@ -676,6 +877,8 @@ static bool xpcs_check_features(struct mdio_xpcs_args *xpcs, for (i = 0; match->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++) set_bit(match->supported[i], xpcs->supported); + xpcs->an_mode = match->an_mode; + return true; } @@ -692,7 +895,7 @@ static int xpcs_probe(struct mdio_xpcs_args *xpcs, phy_interface_t interface) match = entry; if (xpcs_check_features(xpcs, match, interface)) - return xpcs_soft_reset(xpcs, MDIO_MMD_PCS); + return xpcs_soft_reset(xpcs); } } diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 698bea312adc..288bf405ebdb 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -201,6 +201,12 @@ config MARVELL_10G_PHY help Support for the Marvell Alaska MV88X3310 and compatible PHYs. +config MARVELL_88X2222_PHY + tristate "Marvell 88X2222 PHY" + help + Support for the Marvell 88X2222 Dual-port Multi-speed Ethernet + Transceiver. + config MICREL_PHY tristate "Micrel PHYs" help @@ -228,6 +234,12 @@ config NATIONAL_PHY help Currently supports the DP83865 PHY. +config NXP_C45_TJA11XX_PHY + tristate "NXP C45 TJA11XX PHYs" + help + Enable support for NXP C45 TJA11XX PHYs. + Currently supports only the TJA1103 PHY. + config NXP_TJA11XX_PHY tristate "NXP TJA11xx PHYs support" depends on HWMON diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index a13e402074cf..bcda7ed2455d 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -63,6 +63,7 @@ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o obj-$(CONFIG_LXT_PHY) += lxt.o obj-$(CONFIG_MARVELL_10G_PHY) += marvell10g.o obj-$(CONFIG_MARVELL_PHY) += marvell.o +obj-$(CONFIG_MARVELL_88X2222_PHY) += marvell-88x2222.o obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o obj-$(CONFIG_MICREL_PHY) += micrel.o @@ -70,6 +71,7 @@ obj-$(CONFIG_MICROCHIP_PHY) += microchip.o obj-$(CONFIG_MICROCHIP_T1_PHY) += microchip_t1.o obj-$(CONFIG_MICROSEMI_PHY) += mscc/ obj-$(CONFIG_NATIONAL_PHY) += national.o +obj-$(CONFIG_NXP_C45_TJA11XX_PHY) += nxp-c45-tja11xx.o obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o obj-$(CONFIG_QSEMI_PHY) += qsemi.o obj-$(CONFIG_REALTEK_PHY) += realtek.o diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index c2aa4c92edde..32af52dd5aed 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -144,6 +144,9 @@ #define ATH8035_PHY_ID 0x004dd072 #define AT8030_PHY_ID_MASK 0xffffffef +#define AT803X_PAGE_FIBER 0 +#define AT803X_PAGE_COPPER 1 + MODULE_DESCRIPTION("Qualcomm Atheros AR803x PHY driver"); MODULE_AUTHOR("Matus Ujhelyi"); MODULE_LICENSE("GPL"); @@ -198,6 +201,35 @@ static int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg, return phy_write(phydev, AT803X_DEBUG_DATA, val); } +static int at803x_write_page(struct phy_device *phydev, int page) +{ + int mask; + int set; + + if (page == AT803X_PAGE_COPPER) { + set = AT803X_BT_BX_REG_SEL; + mask = 0; + } else { + set = 0; + mask = AT803X_BT_BX_REG_SEL; + } + + return __phy_modify(phydev, AT803X_REG_CHIP_CONFIG, mask, set); +} + +static int at803x_read_page(struct phy_device *phydev) +{ + int ccr = __phy_read(phydev, AT803X_REG_CHIP_CONFIG); + + if (ccr < 0) + return ccr; + + if (ccr & AT803X_BT_BX_REG_SEL) + return AT803X_PAGE_COPPER; + + return AT803X_PAGE_FIBER; +} + static int at803x_enable_rx_delay(struct phy_device *phydev) { return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, 0, @@ -522,10 +554,6 @@ static int at803x_parse_dt(struct phy_device *phydev) phydev_err(phydev, "failed to get VDDIO regulator\n"); return PTR_ERR(priv->vddio); } - - ret = regulator_enable(priv->vddio); - if (ret < 0) - return ret; } return 0; @@ -535,6 +563,7 @@ static int at803x_probe(struct phy_device *phydev) { struct device *dev = &phydev->mdio.dev; struct at803x_priv *priv; + int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -542,7 +571,35 @@ static int at803x_probe(struct phy_device *phydev) phydev->priv = priv; - return at803x_parse_dt(phydev); + ret = at803x_parse_dt(phydev); + if (ret) + return ret; + + if (priv->vddio) { + ret = regulator_enable(priv->vddio); + if (ret < 0) + return ret; + } + + /* Some bootloaders leave the fiber page selected. + * Switch to the copper page, as otherwise we read + * the PHY capabilities from the fiber side. + */ + if (at803x_match_phy_id(phydev, ATH8031_PHY_ID)) { + phy_lock_mdio_bus(phydev); + ret = at803x_write_page(phydev, AT803X_PAGE_COPPER); + phy_unlock_mdio_bus(phydev); + if (ret) + goto err; + } + + return 0; + +err: + if (priv->vddio) + regulator_disable(priv->vddio); + + return ret; } static void at803x_remove(struct phy_device *phydev) @@ -751,36 +808,6 @@ static void at803x_link_change_notify(struct phy_device *phydev) } } -static int at803x_aneg_done(struct phy_device *phydev) -{ - int ccr; - - int aneg_done = genphy_aneg_done(phydev); - if (aneg_done != BMSR_ANEGCOMPLETE) - return aneg_done; - - /* - * in SGMII mode, if copper side autoneg is successful, - * also check SGMII side autoneg result - */ - ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG); - if ((ccr & AT803X_MODE_CFG_MASK) != AT803X_MODE_CFG_SGMII) - return aneg_done; - - /* switch to SGMII/fiber page */ - phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL); - - /* check if the SGMII link is OK. */ - if (!(phy_read(phydev, AT803X_PSSR) & AT803X_PSSR_MR_AN_COMPLETE)) { - phydev_warn(phydev, "803x_aneg_done: SGMII link is not ok\n"); - aneg_done = 0; - } - /* switch back to copper page */ - phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL); - - return aneg_done; -} - static int at803x_read_status(struct phy_device *phydev) { int ss, err, old_link = phydev->link; @@ -1196,9 +1223,10 @@ static struct phy_driver at803x_driver[] = { .get_wol = at803x_get_wol, .suspend = at803x_suspend, .resume = at803x_resume, + .read_page = at803x_read_page, + .write_page = at803x_write_page, /* PHY_GBIT_FEATURES */ .read_status = at803x_read_status, - .aneg_done = at803x_aneg_done, .config_intr = &at803x_config_intr, .handle_interrupt = at803x_handle_interrupt, .get_tunable = at803x_get_tunable, diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 82fe5f43f0e9..7bf3011b8e77 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -671,13 +671,13 @@ static irqreturn_t brcm_fet_handle_interrupt(struct phy_device *phydev) return IRQ_HANDLED; } -struct bcm53xx_phy_priv { +struct bcm54xx_phy_priv { u64 *stats; }; -static int bcm53xx_phy_probe(struct phy_device *phydev) +static int bcm54xx_phy_probe(struct phy_device *phydev) { - struct bcm53xx_phy_priv *priv; + struct bcm54xx_phy_priv *priv; priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -694,10 +694,10 @@ static int bcm53xx_phy_probe(struct phy_device *phydev) return 0; } -static void bcm53xx_phy_get_stats(struct phy_device *phydev, - struct ethtool_stats *stats, u64 *data) +static void bcm54xx_get_stats(struct phy_device *phydev, + struct ethtool_stats *stats, u64 *data) { - struct bcm53xx_phy_priv *priv = phydev->priv; + struct bcm54xx_phy_priv *priv = phydev->priv; bcm_phy_get_stats(phydev, priv->stats, stats, data); } @@ -708,6 +708,10 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5411", /* PHY_GBIT_FEATURES */ + .get_sset_count = bcm_phy_get_sset_count, + .get_strings = bcm_phy_get_strings, + .get_stats = bcm54xx_get_stats, + .probe = bcm54xx_phy_probe, .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, @@ -716,6 +720,10 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5421", /* PHY_GBIT_FEATURES */ + .get_sset_count = bcm_phy_get_sset_count, + .get_strings = bcm_phy_get_strings, + .get_stats = bcm54xx_get_stats, + .probe = bcm54xx_phy_probe, .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, @@ -724,6 +732,10 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM54210E", /* PHY_GBIT_FEATURES */ + .get_sset_count = bcm_phy_get_sset_count, + .get_strings = bcm_phy_get_strings, + .get_stats = bcm54xx_get_stats, + .probe = bcm54xx_phy_probe, .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, @@ -732,6 +744,10 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5461", /* PHY_GBIT_FEATURES */ + .get_sset_count = bcm_phy_get_sset_count, + .get_strings = bcm_phy_get_strings, + .get_stats = bcm54xx_get_stats, + .probe = bcm54xx_phy_probe, .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, @@ -740,6 +756,10 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM54612E", /* PHY_GBIT_FEATURES */ + .get_sset_count = bcm_phy_get_sset_count, + .get_strings = bcm_phy_get_strings, + .get_stats = bcm54xx_get_stats, + .probe = bcm54xx_phy_probe, .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, @@ -759,6 +779,10 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5464", /* PHY_GBIT_FEATURES */ + .get_sset_count = bcm_phy_get_sset_count, + .get_strings = bcm_phy_get_strings, + .get_stats = bcm54xx_get_stats, + .probe = bcm54xx_phy_probe, .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, @@ -769,6 +793,10 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5481", /* PHY_GBIT_FEATURES */ + .get_sset_count = bcm_phy_get_sset_count, + .get_strings = bcm_phy_get_strings, + .get_stats = bcm54xx_get_stats, + .probe = bcm54xx_phy_probe, .config_init = bcm54xx_config_init, .config_aneg = bcm5481_config_aneg, .config_intr = bcm_phy_config_intr, @@ -778,6 +806,10 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM54810", /* PHY_GBIT_FEATURES */ + .get_sset_count = bcm_phy_get_sset_count, + .get_strings = bcm_phy_get_strings, + .get_stats = bcm54xx_get_stats, + .probe = bcm54xx_phy_probe, .config_init = bcm54xx_config_init, .config_aneg = bcm5481_config_aneg, .config_intr = bcm_phy_config_intr, @@ -789,6 +821,10 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM54811", /* PHY_GBIT_FEATURES */ + .get_sset_count = bcm_phy_get_sset_count, + .get_strings = bcm_phy_get_strings, + .get_stats = bcm54xx_get_stats, + .probe = bcm54xx_phy_probe, .config_init = bcm54811_config_init, .config_aneg = bcm5481_config_aneg, .config_intr = bcm_phy_config_intr, @@ -800,6 +836,10 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5482", /* PHY_GBIT_FEATURES */ + .get_sset_count = bcm_phy_get_sset_count, + .get_strings = bcm_phy_get_strings, + .get_stats = bcm54xx_get_stats, + .probe = bcm54xx_phy_probe, .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, @@ -808,6 +848,10 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM50610", /* PHY_GBIT_FEATURES */ + .get_sset_count = bcm_phy_get_sset_count, + .get_strings = bcm_phy_get_strings, + .get_stats = bcm54xx_get_stats, + .probe = bcm54xx_phy_probe, .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, @@ -816,6 +860,10 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM50610M", /* PHY_GBIT_FEATURES */ + .get_sset_count = bcm_phy_get_sset_count, + .get_strings = bcm_phy_get_strings, + .get_stats = bcm54xx_get_stats, + .probe = bcm54xx_phy_probe, .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, @@ -824,6 +872,10 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM57780", /* PHY_GBIT_FEATURES */ + .get_sset_count = bcm_phy_get_sset_count, + .get_strings = bcm_phy_get_strings, + .get_stats = bcm54xx_get_stats, + .probe = bcm54xx_phy_probe, .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, @@ -851,8 +903,8 @@ static struct phy_driver broadcom_drivers[] = { /* PHY_GBIT_FEATURES */ .get_sset_count = bcm_phy_get_sset_count, .get_strings = bcm_phy_get_strings, - .get_stats = bcm53xx_phy_get_stats, - .probe = bcm53xx_phy_probe, + .get_stats = bcm54xx_get_stats, + .probe = bcm54xx_phy_probe, }, { .phy_id = PHY_ID_BCM53125, .phy_id_mask = 0xfffffff0, @@ -861,8 +913,8 @@ static struct phy_driver broadcom_drivers[] = { /* PHY_GBIT_FEATURES */ .get_sset_count = bcm_phy_get_sset_count, .get_strings = bcm_phy_get_strings, - .get_stats = bcm53xx_phy_get_stats, - .probe = bcm53xx_phy_probe, + .get_stats = bcm54xx_get_stats, + .probe = bcm54xx_phy_probe, .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, @@ -871,6 +923,10 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM89610", /* PHY_GBIT_FEATURES */ + .get_sset_count = bcm_phy_get_sset_count, + .get_strings = bcm_phy_get_strings, + .get_stats = bcm54xx_get_stats, + .probe = bcm54xx_phy_probe, .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c index 6eac50d4b42f..d453ec016168 100644 --- a/drivers/net/phy/intel-xway.c +++ b/drivers/net/phy/intel-xway.c @@ -11,6 +11,18 @@ #define XWAY_MDIO_IMASK 0x19 /* interrupt mask */ #define XWAY_MDIO_ISTAT 0x1A /* interrupt status */ +#define XWAY_MDIO_LED 0x1B /* led control */ + +/* bit 15:12 are reserved */ +#define XWAY_MDIO_LED_LED3_EN BIT(11) /* Enable the integrated function of LED3 */ +#define XWAY_MDIO_LED_LED2_EN BIT(10) /* Enable the integrated function of LED2 */ +#define XWAY_MDIO_LED_LED1_EN BIT(9) /* Enable the integrated function of LED1 */ +#define XWAY_MDIO_LED_LED0_EN BIT(8) /* Enable the integrated function of LED0 */ +/* bit 7:4 are reserved */ +#define XWAY_MDIO_LED_LED3_DA BIT(3) /* Direct Access to LED3 */ +#define XWAY_MDIO_LED_LED2_DA BIT(2) /* Direct Access to LED2 */ +#define XWAY_MDIO_LED_LED1_DA BIT(1) /* Direct Access to LED1 */ +#define XWAY_MDIO_LED_LED0_DA BIT(0) /* Direct Access to LED0 */ #define XWAY_MDIO_INIT_WOL BIT(15) /* Wake-On-LAN */ #define XWAY_MDIO_INIT_MSRE BIT(14) @@ -159,6 +171,15 @@ static int xway_gphy_config_init(struct phy_device *phydev) /* Clear all pending interrupts */ phy_read(phydev, XWAY_MDIO_ISTAT); + /* Ensure that integrated led function is enabled for all leds */ + err = phy_write(phydev, XWAY_MDIO_LED, + XWAY_MDIO_LED_LED0_EN | + XWAY_MDIO_LED_LED1_EN | + XWAY_MDIO_LED_LED2_EN | + XWAY_MDIO_LED_LED3_EN); + if (err) + return err; + phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LEDCH, XWAY_MMD_LEDCH_NACS_NONE | XWAY_MMD_LEDCH_SBF_F02HZ | diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c new file mode 100644 index 000000000000..d8b31d4d2a73 --- /dev/null +++ b/drivers/net/phy/marvell-88x2222.c @@ -0,0 +1,621 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Marvell 88x2222 dual-port multi-speed ethernet transceiver. + * + * Supports: + * XAUI on the host side. + * 1000Base-X or 10GBase-R on the line side. + * SGMII over 1000Base-X. + */ +#include <linux/module.h> +#include <linux/phy.h> +#include <linux/gpio.h> +#include <linux/delay.h> +#include <linux/mdio.h> +#include <linux/marvell_phy.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/sfp.h> +#include <linux/netdevice.h> + +/* Port PCS Configuration */ +#define MV_PCS_CONFIG 0xF002 +#define MV_PCS_HOST_XAUI 0x73 +#define MV_PCS_LINE_10GBR (0x71 << 8) +#define MV_PCS_LINE_1GBX_AN (0x7B << 8) +#define MV_PCS_LINE_SGMII_AN (0x7F << 8) + +/* Port Reset and Power Down */ +#define MV_PORT_RST 0xF003 +#define MV_LINE_RST_SW BIT(15) +#define MV_HOST_RST_SW BIT(7) +#define MV_PORT_RST_SW (MV_LINE_RST_SW | MV_HOST_RST_SW) + +/* PMD Receive Signal Detect */ +#define MV_RX_SIGNAL_DETECT 0x000A +#define MV_RX_SIGNAL_DETECT_GLOBAL BIT(0) + +/* 1000Base-X/SGMII Control Register */ +#define MV_1GBX_CTRL (0x2000 + MII_BMCR) + +/* 1000BASE-X/SGMII Status Register */ +#define MV_1GBX_STAT (0x2000 + MII_BMSR) + +/* 1000Base-X Auto-Negotiation Advertisement Register */ +#define MV_1GBX_ADVERTISE (0x2000 + MII_ADVERTISE) + +/* 1000Base-X PHY Specific Status Register */ +#define MV_1GBX_PHY_STAT 0xA003 +#define MV_1GBX_PHY_STAT_AN_RESOLVED BIT(11) +#define MV_1GBX_PHY_STAT_DUPLEX BIT(13) +#define MV_1GBX_PHY_STAT_SPEED100 BIT(14) +#define MV_1GBX_PHY_STAT_SPEED1000 BIT(15) + +#define AUTONEG_TIMEOUT 3 + +struct mv2222_data { + phy_interface_t line_interface; + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + bool sfp_link; +}; + +/* SFI PMA transmit enable */ +static int mv2222_tx_enable(struct phy_device *phydev) +{ + return phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_TXDIS, + MDIO_PMD_TXDIS_GLOBAL); +} + +/* SFI PMA transmit disable */ +static int mv2222_tx_disable(struct phy_device *phydev) +{ + return phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_TXDIS, + MDIO_PMD_TXDIS_GLOBAL); +} + +static int mv2222_soft_reset(struct phy_device *phydev) +{ + int val, ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_PORT_RST, + MV_PORT_RST_SW); + if (ret < 0) + return ret; + + return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND2, MV_PORT_RST, + val, !(val & MV_PORT_RST_SW), + 5000, 1000000, true); +} + +static int mv2222_disable_aneg(struct phy_device *phydev) +{ + int ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_CTRL, + BMCR_ANENABLE | BMCR_ANRESTART); + if (ret < 0) + return ret; + + return mv2222_soft_reset(phydev); +} + +static int mv2222_enable_aneg(struct phy_device *phydev) +{ + int ret = phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_CTRL, + BMCR_ANENABLE | BMCR_RESET); + if (ret < 0) + return ret; + + return mv2222_soft_reset(phydev); +} + +static int mv2222_set_sgmii_speed(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + + switch (phydev->speed) { + default: + case SPEED_1000: + if ((linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + priv->supported))) + return phy_modify_mmd(phydev, MDIO_MMD_PCS, + MV_1GBX_CTRL, + BMCR_SPEED1000 | BMCR_SPEED100, + BMCR_SPEED1000); + + fallthrough; + case SPEED_100: + if ((linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + priv->supported))) + return phy_modify_mmd(phydev, MDIO_MMD_PCS, + MV_1GBX_CTRL, + BMCR_SPEED1000 | BMCR_SPEED100, + BMCR_SPEED100); + fallthrough; + case SPEED_10: + if ((linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + priv->supported))) + return phy_modify_mmd(phydev, MDIO_MMD_PCS, + MV_1GBX_CTRL, + BMCR_SPEED1000 | BMCR_SPEED100, + BMCR_SPEED10); + + return -EINVAL; + } +} + +static bool mv2222_is_10g_capable(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + + return (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT, + priv->supported)); +} + +static bool mv2222_is_1gbx_capable(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + + return linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + priv->supported); +} + +static bool mv2222_is_sgmii_capable(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + + return (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + priv->supported)); +} + +static int mv2222_config_line(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + + switch (priv->line_interface) { + case PHY_INTERFACE_MODE_10GBASER: + return phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_PCS_CONFIG, + MV_PCS_HOST_XAUI | MV_PCS_LINE_10GBR); + case PHY_INTERFACE_MODE_1000BASEX: + return phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_PCS_CONFIG, + MV_PCS_HOST_XAUI | MV_PCS_LINE_1GBX_AN); + case PHY_INTERFACE_MODE_SGMII: + return phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_PCS_CONFIG, + MV_PCS_HOST_XAUI | MV_PCS_LINE_SGMII_AN); + default: + return -EINVAL; + } +} + +/* Switch between 1G (1000Base-X/SGMII) and 10G (10GBase-R) modes */ +static int mv2222_swap_line_type(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + bool changed = false; + int ret; + + switch (priv->line_interface) { + case PHY_INTERFACE_MODE_10GBASER: + if (mv2222_is_1gbx_capable(phydev)) { + priv->line_interface = PHY_INTERFACE_MODE_1000BASEX; + changed = true; + } + + if (mv2222_is_sgmii_capable(phydev)) { + priv->line_interface = PHY_INTERFACE_MODE_SGMII; + changed = true; + } + + break; + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_SGMII: + if (mv2222_is_10g_capable(phydev)) { + priv->line_interface = PHY_INTERFACE_MODE_10GBASER; + changed = true; + } + + break; + default: + return -EINVAL; + } + + if (changed) { + ret = mv2222_config_line(phydev); + if (ret < 0) + return ret; + } + + return 0; +} + +static int mv2222_setup_forced(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + int ret; + + if (priv->line_interface == PHY_INTERFACE_MODE_10GBASER) { + if (phydev->speed < SPEED_10000 && + phydev->speed != SPEED_UNKNOWN) { + ret = mv2222_swap_line_type(phydev); + if (ret < 0) + return ret; + } + } + + if (priv->line_interface == PHY_INTERFACE_MODE_SGMII) { + ret = mv2222_set_sgmii_speed(phydev); + if (ret < 0) + return ret; + } + + return mv2222_disable_aneg(phydev); +} + +static int mv2222_config_aneg(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + int ret, adv; + + /* SFP is not present, do nothing */ + if (priv->line_interface == PHY_INTERFACE_MODE_NA) + return 0; + + if (phydev->autoneg == AUTONEG_DISABLE || + priv->line_interface == PHY_INTERFACE_MODE_10GBASER) + return mv2222_setup_forced(phydev); + + adv = linkmode_adv_to_mii_adv_x(priv->supported, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT); + + ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_ADVERTISE, + ADVERTISE_1000XFULL | + ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM, + adv); + if (ret < 0) + return ret; + + return mv2222_enable_aneg(phydev); +} + +static int mv2222_aneg_done(struct phy_device *phydev) +{ + int ret; + + if (mv2222_is_10g_capable(phydev)) { + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1); + if (ret < 0) + return ret; + + if (ret & MDIO_STAT1_LSTATUS) + return 1; + } + + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_STAT); + if (ret < 0) + return ret; + + return (ret & BMSR_ANEGCOMPLETE); +} + +/* Returns negative on error, 0 if link is down, 1 if link is up */ +static int mv2222_read_status_10g(struct phy_device *phydev) +{ + static int timeout; + int val, link = 0; + + val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1); + if (val < 0) + return val; + + if (val & MDIO_STAT1_LSTATUS) { + link = 1; + + /* 10GBASE-R do not support auto-negotiation */ + phydev->autoneg = AUTONEG_DISABLE; + phydev->speed = SPEED_10000; + phydev->duplex = DUPLEX_FULL; + } else { + if (phydev->autoneg == AUTONEG_ENABLE) { + timeout++; + + if (timeout > AUTONEG_TIMEOUT) { + timeout = 0; + + val = mv2222_swap_line_type(phydev); + if (val < 0) + return val; + + return mv2222_config_aneg(phydev); + } + } + } + + return link; +} + +/* Returns negative on error, 0 if link is down, 1 if link is up */ +static int mv2222_read_status_1g(struct phy_device *phydev) +{ + static int timeout; + int val, link = 0; + + val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_STAT); + if (val < 0) + return val; + + if (phydev->autoneg == AUTONEG_ENABLE && + !(val & BMSR_ANEGCOMPLETE)) { + timeout++; + + if (timeout > AUTONEG_TIMEOUT) { + timeout = 0; + + val = mv2222_swap_line_type(phydev); + if (val < 0) + return val; + + return mv2222_config_aneg(phydev); + } + + return 0; + } + + if (!(val & BMSR_LSTATUS)) + return 0; + + link = 1; + + val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_PHY_STAT); + if (val < 0) + return val; + + if (val & MV_1GBX_PHY_STAT_AN_RESOLVED) { + if (val & MV_1GBX_PHY_STAT_DUPLEX) + phydev->duplex = DUPLEX_FULL; + else + phydev->duplex = DUPLEX_HALF; + + if (val & MV_1GBX_PHY_STAT_SPEED1000) + phydev->speed = SPEED_1000; + else if (val & MV_1GBX_PHY_STAT_SPEED100) + phydev->speed = SPEED_100; + else + phydev->speed = SPEED_10; + } + + return link; +} + +static bool mv2222_link_is_operational(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + int val; + + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MV_RX_SIGNAL_DETECT); + if (val < 0 || !(val & MV_RX_SIGNAL_DETECT_GLOBAL)) + return false; + + if (phydev->sfp_bus && !priv->sfp_link) + return false; + + return true; +} + +static int mv2222_read_status(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + int link; + + phydev->link = 0; + phydev->speed = SPEED_UNKNOWN; + phydev->duplex = DUPLEX_UNKNOWN; + + if (!mv2222_link_is_operational(phydev)) + return 0; + + if (priv->line_interface == PHY_INTERFACE_MODE_10GBASER) + link = mv2222_read_status_10g(phydev); + else + link = mv2222_read_status_1g(phydev); + + if (link < 0) + return link; + + phydev->link = link; + + return 0; +} + +static int mv2222_resume(struct phy_device *phydev) +{ + return mv2222_tx_enable(phydev); +} + +static int mv2222_suspend(struct phy_device *phydev) +{ + return mv2222_tx_disable(phydev); +} + +static int mv2222_get_features(struct phy_device *phydev) +{ + /* All supported linkmodes are set at probe */ + + return 0; +} + +static int mv2222_config_init(struct phy_device *phydev) +{ + if (phydev->interface != PHY_INTERFACE_MODE_XAUI) + return -EINVAL; + + return 0; +} + +static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id) +{ + struct phy_device *phydev = upstream; + phy_interface_t sfp_interface; + struct mv2222_data *priv; + struct device *dev; + int ret; + + __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_supported) = { 0, }; + + priv = (struct mv2222_data *)phydev->priv; + dev = &phydev->mdio.dev; + + sfp_parse_support(phydev->sfp_bus, id, sfp_supported); + sfp_interface = sfp_select_interface(phydev->sfp_bus, sfp_supported); + + dev_info(dev, "%s SFP module inserted\n", phy_modes(sfp_interface)); + + if (sfp_interface != PHY_INTERFACE_MODE_10GBASER && + sfp_interface != PHY_INTERFACE_MODE_1000BASEX && + sfp_interface != PHY_INTERFACE_MODE_SGMII) { + dev_err(dev, "Incompatible SFP module inserted\n"); + + return -EINVAL; + } + + priv->line_interface = sfp_interface; + linkmode_and(priv->supported, phydev->supported, sfp_supported); + + ret = mv2222_config_line(phydev); + if (ret < 0) + return ret; + + if (mutex_trylock(&phydev->lock)) { + ret = mv2222_config_aneg(phydev); + mutex_unlock(&phydev->lock); + } + + return ret; +} + +static void mv2222_sfp_remove(void *upstream) +{ + struct phy_device *phydev = upstream; + struct mv2222_data *priv; + + priv = (struct mv2222_data *)phydev->priv; + + priv->line_interface = PHY_INTERFACE_MODE_NA; + linkmode_zero(priv->supported); +} + +static void mv2222_sfp_link_up(void *upstream) +{ + struct phy_device *phydev = upstream; + struct mv2222_data *priv; + + priv = phydev->priv; + priv->sfp_link = true; +} + +static void mv2222_sfp_link_down(void *upstream) +{ + struct phy_device *phydev = upstream; + struct mv2222_data *priv; + + priv = phydev->priv; + priv->sfp_link = false; +} + +static const struct sfp_upstream_ops sfp_phy_ops = { + .module_insert = mv2222_sfp_insert, + .module_remove = mv2222_sfp_remove, + .link_up = mv2222_sfp_link_up, + .link_down = mv2222_sfp_link_down, + .attach = phy_sfp_attach, + .detach = phy_sfp_detach, +}; + +static int mv2222_probe(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + struct mv2222_data *priv = NULL; + + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; + + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT, supported); + + linkmode_copy(phydev->supported, supported); + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->line_interface = PHY_INTERFACE_MODE_NA; + phydev->priv = priv; + + return phy_sfp_probe(phydev, &sfp_phy_ops); +} + +static struct phy_driver mv2222_drivers[] = { + { + .phy_id = MARVELL_PHY_ID_88X2222, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88X2222", + .get_features = mv2222_get_features, + .soft_reset = mv2222_soft_reset, + .config_init = mv2222_config_init, + .config_aneg = mv2222_config_aneg, + .aneg_done = mv2222_aneg_done, + .probe = mv2222_probe, + .suspend = mv2222_suspend, + .resume = mv2222_resume, + .read_status = mv2222_read_status, + }, +}; +module_phy_driver(mv2222_drivers); + +static struct mdio_device_id __maybe_unused mv2222_tbl[] = { + { MARVELL_PHY_ID_88X2222, MARVELL_PHY_ID_MASK }, + { } +}; +MODULE_DEVICE_TABLE(mdio, mv2222_tbl); + +MODULE_DESCRIPTION("Marvell 88x2222 ethernet transceiver driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 8018ddf7f316..0b2cccb0d865 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -113,15 +113,26 @@ #define MII_88E1540_COPPER_CTRL3_FAST_LINK_DOWN BIT(9) #define MII_88E6390_MISC_TEST 0x1b -#define MII_88E6390_MISC_TEST_SAMPLE_1S 0 -#define MII_88E6390_MISC_TEST_SAMPLE_10MS BIT(14) -#define MII_88E6390_MISC_TEST_SAMPLE_DISABLE BIT(15) -#define MII_88E6390_MISC_TEST_SAMPLE_ENABLE 0 -#define MII_88E6390_MISC_TEST_SAMPLE_MASK (0x3 << 14) +#define MII_88E6390_MISC_TEST_TEMP_SENSOR_ENABLE_SAMPLE_1S (0x0 << 14) +#define MII_88E6390_MISC_TEST_TEMP_SENSOR_ENABLE (0x1 << 14) +#define MII_88E6390_MISC_TEST_TEMP_SENSOR_ENABLE_ONESHOT (0x2 << 14) +#define MII_88E6390_MISC_TEST_TEMP_SENSOR_DISABLE (0x3 << 14) +#define MII_88E6390_MISC_TEST_TEMP_SENSOR_MASK (0x3 << 14) +#define MII_88E6393_MISC_TEST_SAMPLES_2048 (0x0 << 11) +#define MII_88E6393_MISC_TEST_SAMPLES_4096 (0x1 << 11) +#define MII_88E6393_MISC_TEST_SAMPLES_8192 (0x2 << 11) +#define MII_88E6393_MISC_TEST_SAMPLES_16384 (0x3 << 11) +#define MII_88E6393_MISC_TEST_SAMPLES_MASK (0x3 << 11) +#define MII_88E6393_MISC_TEST_RATE_2_3MS (0x5 << 8) +#define MII_88E6393_MISC_TEST_RATE_6_4MS (0x6 << 8) +#define MII_88E6393_MISC_TEST_RATE_11_9MS (0x7 << 8) +#define MII_88E6393_MISC_TEST_RATE_MASK (0x7 << 8) #define MII_88E6390_TEMP_SENSOR 0x1c -#define MII_88E6390_TEMP_SENSOR_MASK 0xff -#define MII_88E6390_TEMP_SENSOR_SAMPLES 10 +#define MII_88E6393_TEMP_SENSOR_THRESHOLD_MASK 0xff00 +#define MII_88E6393_TEMP_SENSOR_THRESHOLD_SHIFT 8 +#define MII_88E6390_TEMP_SENSOR_MASK 0xff +#define MII_88E6390_TEMP_SENSOR_SAMPLES 10 #define MII_88E1318S_PHY_MSCR1_REG 16 #define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6) @@ -967,22 +978,28 @@ static int m88e1111_get_downshift(struct phy_device *phydev, u8 *data) static int m88e1111_set_downshift(struct phy_device *phydev, u8 cnt) { - int val; + int val, err; if (cnt > MII_M1111_PHY_EXT_CR_DOWNSHIFT_MAX) return -E2BIG; - if (!cnt) - return phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR, - MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN); + if (!cnt) { + err = phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR, + MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN); + } else { + val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN; + val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1); - val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN; - val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1); + err = phy_modify(phydev, MII_M1111_PHY_EXT_CR, + MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN | + MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, + val); + } - return phy_modify(phydev, MII_M1111_PHY_EXT_CR, - MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN | - MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, - val); + if (err < 0) + return err; + + return genphy_soft_reset(phydev); } static int m88e1111_get_tunable(struct phy_device *phydev, @@ -1025,22 +1042,28 @@ static int m88e1011_get_downshift(struct phy_device *phydev, u8 *data) static int m88e1011_set_downshift(struct phy_device *phydev, u8 cnt) { - int val; + int val, err; if (cnt > MII_M1011_PHY_SCR_DOWNSHIFT_MAX) return -E2BIG; - if (!cnt) - return phy_clear_bits(phydev, MII_M1011_PHY_SCR, - MII_M1011_PHY_SCR_DOWNSHIFT_EN); + if (!cnt) { + err = phy_clear_bits(phydev, MII_M1011_PHY_SCR, + MII_M1011_PHY_SCR_DOWNSHIFT_EN); + } else { + val = MII_M1011_PHY_SCR_DOWNSHIFT_EN; + val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1); + + err = phy_modify(phydev, MII_M1011_PHY_SCR, + MII_M1011_PHY_SCR_DOWNSHIFT_EN | + MII_M1011_PHY_SCR_DOWNSHIFT_MASK, + val); + } - val = MII_M1011_PHY_SCR_DOWNSHIFT_EN; - val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1); + if (err < 0) + return err; - return phy_modify(phydev, MII_M1011_PHY_SCR, - MII_M1011_PHY_SCR_DOWNSHIFT_EN | - MII_M1011_PHY_SCR_DOWNSHIFT_MASK, - val); + return genphy_soft_reset(phydev); } static int m88e1011_get_tunable(struct phy_device *phydev, @@ -2216,6 +2239,20 @@ static int marvell_vct7_cable_test_get_status(struct phy_device *phydev, } #ifdef CONFIG_HWMON +struct marvell_hwmon_ops { + int (*config)(struct phy_device *phydev); + int (*get_temp)(struct phy_device *phydev, long *temp); + int (*get_temp_critical)(struct phy_device *phydev, long *temp); + int (*set_temp_critical)(struct phy_device *phydev, long temp); + int (*get_temp_alarm)(struct phy_device *phydev, long *alarm); +}; + +static const struct marvell_hwmon_ops * +to_marvell_hwmon_ops(const struct phy_device *phydev) +{ + return phydev->drv->driver_data; +} + static int m88e1121_get_temp(struct phy_device *phydev, long *temp) { int oldpage; @@ -2259,75 +2296,6 @@ error: return phy_restore_page(phydev, oldpage, ret); } -static int m88e1121_hwmon_read(struct device *dev, - enum hwmon_sensor_types type, - u32 attr, int channel, long *temp) -{ - struct phy_device *phydev = dev_get_drvdata(dev); - int err; - - switch (attr) { - case hwmon_temp_input: - err = m88e1121_get_temp(phydev, temp); - break; - default: - return -EOPNOTSUPP; - } - - return err; -} - -static umode_t m88e1121_hwmon_is_visible(const void *data, - enum hwmon_sensor_types type, - u32 attr, int channel) -{ - if (type != hwmon_temp) - return 0; - - switch (attr) { - case hwmon_temp_input: - return 0444; - default: - return 0; - } -} - -static u32 m88e1121_hwmon_chip_config[] = { - HWMON_C_REGISTER_TZ, - 0 -}; - -static const struct hwmon_channel_info m88e1121_hwmon_chip = { - .type = hwmon_chip, - .config = m88e1121_hwmon_chip_config, -}; - -static u32 m88e1121_hwmon_temp_config[] = { - HWMON_T_INPUT, - 0 -}; - -static const struct hwmon_channel_info m88e1121_hwmon_temp = { - .type = hwmon_temp, - .config = m88e1121_hwmon_temp_config, -}; - -static const struct hwmon_channel_info *m88e1121_hwmon_info[] = { - &m88e1121_hwmon_chip, - &m88e1121_hwmon_temp, - NULL -}; - -static const struct hwmon_ops m88e1121_hwmon_hwmon_ops = { - .is_visible = m88e1121_hwmon_is_visible, - .read = m88e1121_hwmon_read, -}; - -static const struct hwmon_chip_info m88e1121_hwmon_chip_info = { - .ops = &m88e1121_hwmon_hwmon_ops, - .info = m88e1121_hwmon_info, -}; - static int m88e1510_get_temp(struct phy_device *phydev, long *temp) { int ret; @@ -2390,92 +2358,6 @@ static int m88e1510_get_temp_alarm(struct phy_device *phydev, long *alarm) return 0; } -static int m88e1510_hwmon_read(struct device *dev, - enum hwmon_sensor_types type, - u32 attr, int channel, long *temp) -{ - struct phy_device *phydev = dev_get_drvdata(dev); - int err; - - switch (attr) { - case hwmon_temp_input: - err = m88e1510_get_temp(phydev, temp); - break; - case hwmon_temp_crit: - err = m88e1510_get_temp_critical(phydev, temp); - break; - case hwmon_temp_max_alarm: - err = m88e1510_get_temp_alarm(phydev, temp); - break; - default: - return -EOPNOTSUPP; - } - - return err; -} - -static int m88e1510_hwmon_write(struct device *dev, - enum hwmon_sensor_types type, - u32 attr, int channel, long temp) -{ - struct phy_device *phydev = dev_get_drvdata(dev); - int err; - - switch (attr) { - case hwmon_temp_crit: - err = m88e1510_set_temp_critical(phydev, temp); - break; - default: - return -EOPNOTSUPP; - } - return err; -} - -static umode_t m88e1510_hwmon_is_visible(const void *data, - enum hwmon_sensor_types type, - u32 attr, int channel) -{ - if (type != hwmon_temp) - return 0; - - switch (attr) { - case hwmon_temp_input: - case hwmon_temp_max_alarm: - return 0444; - case hwmon_temp_crit: - return 0644; - default: - return 0; - } -} - -static u32 m88e1510_hwmon_temp_config[] = { - HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_MAX_ALARM, - 0 -}; - -static const struct hwmon_channel_info m88e1510_hwmon_temp = { - .type = hwmon_temp, - .config = m88e1510_hwmon_temp_config, -}; - -static const struct hwmon_channel_info *m88e1510_hwmon_info[] = { - &m88e1121_hwmon_chip, - &m88e1510_hwmon_temp, - NULL -}; - -static const struct hwmon_ops m88e1510_hwmon_hwmon_ops = { - .is_visible = m88e1510_hwmon_is_visible, - .read = m88e1510_hwmon_read, - .write = m88e1510_hwmon_write, -}; - -static const struct hwmon_chip_info m88e1510_hwmon_chip_info = { - .ops = &m88e1510_hwmon_hwmon_ops, - .info = m88e1510_hwmon_info, -}; - static int m88e6390_get_temp(struct phy_device *phydev, long *temp) { int sum = 0; @@ -2494,9 +2376,8 @@ static int m88e6390_get_temp(struct phy_device *phydev, long *temp) if (ret < 0) goto error; - ret = ret & ~MII_88E6390_MISC_TEST_SAMPLE_MASK; - ret |= MII_88E6390_MISC_TEST_SAMPLE_ENABLE | - MII_88E6390_MISC_TEST_SAMPLE_1S; + ret &= ~MII_88E6390_MISC_TEST_TEMP_SENSOR_MASK; + ret |= MII_88E6390_MISC_TEST_TEMP_SENSOR_ENABLE_SAMPLE_1S; ret = __phy_write(phydev, MII_88E6390_MISC_TEST, ret); if (ret < 0) @@ -2523,8 +2404,8 @@ static int m88e6390_get_temp(struct phy_device *phydev, long *temp) if (ret < 0) goto error; - ret = ret & ~MII_88E6390_MISC_TEST_SAMPLE_MASK; - ret |= MII_88E6390_MISC_TEST_SAMPLE_DISABLE; + ret = ret & ~MII_88E6390_MISC_TEST_TEMP_SENSOR_MASK; + ret |= MII_88E6390_MISC_TEST_TEMP_SENSOR_DISABLE; ret = __phy_write(phydev, MII_88E6390_MISC_TEST, ret); @@ -2534,63 +2415,169 @@ error: return ret; } -static int m88e6390_hwmon_read(struct device *dev, - enum hwmon_sensor_types type, - u32 attr, int channel, long *temp) +static int m88e6393_get_temp(struct phy_device *phydev, long *temp) +{ + int err; + + err = m88e1510_get_temp(phydev, temp); + + /* 88E1510 measures T + 25, while the PHY on 88E6393X switch + * T + 75, so we have to subtract another 50 + */ + *temp -= 50000; + + return err; +} + +static int m88e6393_get_temp_critical(struct phy_device *phydev, long *temp) +{ + int ret; + + *temp = 0; + + ret = phy_read_paged(phydev, MII_MARVELL_MISC_TEST_PAGE, + MII_88E6390_TEMP_SENSOR); + if (ret < 0) + return ret; + + *temp = (((ret & MII_88E6393_TEMP_SENSOR_THRESHOLD_MASK) >> + MII_88E6393_TEMP_SENSOR_THRESHOLD_SHIFT) - 75) * 1000; + + return 0; +} + +static int m88e6393_set_temp_critical(struct phy_device *phydev, long temp) +{ + temp = (temp / 1000) + 75; + + return phy_modify_paged(phydev, MII_MARVELL_MISC_TEST_PAGE, + MII_88E6390_TEMP_SENSOR, + MII_88E6393_TEMP_SENSOR_THRESHOLD_MASK, + temp << MII_88E6393_TEMP_SENSOR_THRESHOLD_SHIFT); +} + +static int m88e6393_hwmon_config(struct phy_device *phydev) { - struct phy_device *phydev = dev_get_drvdata(dev); int err; + err = m88e6393_set_temp_critical(phydev, 100000); + if (err) + return err; + + return phy_modify_paged(phydev, MII_MARVELL_MISC_TEST_PAGE, + MII_88E6390_MISC_TEST, + MII_88E6390_MISC_TEST_TEMP_SENSOR_MASK | + MII_88E6393_MISC_TEST_SAMPLES_MASK | + MII_88E6393_MISC_TEST_RATE_MASK, + MII_88E6390_MISC_TEST_TEMP_SENSOR_ENABLE | + MII_88E6393_MISC_TEST_SAMPLES_2048 | + MII_88E6393_MISC_TEST_RATE_2_3MS); +} + +static int marvell_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *temp) +{ + struct phy_device *phydev = dev_get_drvdata(dev); + const struct marvell_hwmon_ops *ops = to_marvell_hwmon_ops(phydev); + int err = -EOPNOTSUPP; + switch (attr) { case hwmon_temp_input: - err = m88e6390_get_temp(phydev, temp); + if (ops->get_temp) + err = ops->get_temp(phydev, temp); + break; + case hwmon_temp_crit: + if (ops->get_temp_critical) + err = ops->get_temp_critical(phydev, temp); + break; + case hwmon_temp_max_alarm: + if (ops->get_temp_alarm) + err = ops->get_temp_alarm(phydev, temp); + break; + } + + return err; +} + +static int marvell_hwmon_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long temp) +{ + struct phy_device *phydev = dev_get_drvdata(dev); + const struct marvell_hwmon_ops *ops = to_marvell_hwmon_ops(phydev); + int err = -EOPNOTSUPP; + + switch (attr) { + case hwmon_temp_crit: + if (ops->set_temp_critical) + err = ops->set_temp_critical(phydev, temp); break; - default: - return -EOPNOTSUPP; } return err; } -static umode_t m88e6390_hwmon_is_visible(const void *data, - enum hwmon_sensor_types type, - u32 attr, int channel) +static umode_t marvell_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) { + const struct phy_device *phydev = data; + const struct marvell_hwmon_ops *ops = to_marvell_hwmon_ops(phydev); + if (type != hwmon_temp) return 0; switch (attr) { case hwmon_temp_input: - return 0444; + return ops->get_temp ? 0444 : 0; + case hwmon_temp_max_alarm: + return ops->get_temp_alarm ? 0444 : 0; + case hwmon_temp_crit: + return (ops->get_temp_critical ? 0444 : 0) | + (ops->set_temp_critical ? 0200 : 0); default: return 0; } } -static u32 m88e6390_hwmon_temp_config[] = { - HWMON_T_INPUT, +static u32 marvell_hwmon_chip_config[] = { + HWMON_C_REGISTER_TZ, + 0 +}; + +static const struct hwmon_channel_info marvell_hwmon_chip = { + .type = hwmon_chip, + .config = marvell_hwmon_chip_config, +}; + +/* we can define HWMON_T_CRIT and HWMON_T_MAX_ALARM even though these are not + * defined for all PHYs, because the hwmon code checks whether the attributes + * exists via the .is_visible method + */ +static u32 marvell_hwmon_temp_config[] = { + HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_MAX_ALARM, 0 }; -static const struct hwmon_channel_info m88e6390_hwmon_temp = { +static const struct hwmon_channel_info marvell_hwmon_temp = { .type = hwmon_temp, - .config = m88e6390_hwmon_temp_config, + .config = marvell_hwmon_temp_config, }; -static const struct hwmon_channel_info *m88e6390_hwmon_info[] = { - &m88e1121_hwmon_chip, - &m88e6390_hwmon_temp, +static const struct hwmon_channel_info *marvell_hwmon_info[] = { + &marvell_hwmon_chip, + &marvell_hwmon_temp, NULL }; -static const struct hwmon_ops m88e6390_hwmon_hwmon_ops = { - .is_visible = m88e6390_hwmon_is_visible, - .read = m88e6390_hwmon_read, +static const struct hwmon_ops marvell_hwmon_hwmon_ops = { + .is_visible = marvell_hwmon_is_visible, + .read = marvell_hwmon_read, + .write = marvell_hwmon_write, }; -static const struct hwmon_chip_info m88e6390_hwmon_chip_info = { - .ops = &m88e6390_hwmon_hwmon_ops, - .info = m88e6390_hwmon_info, +static const struct hwmon_chip_info marvell_hwmon_chip_info = { + .ops = &marvell_hwmon_hwmon_ops, + .info = marvell_hwmon_info, }; static int marvell_hwmon_name(struct phy_device *phydev) @@ -2613,49 +2600,61 @@ static int marvell_hwmon_name(struct phy_device *phydev) return 0; } -static int marvell_hwmon_probe(struct phy_device *phydev, - const struct hwmon_chip_info *chip) +static int marvell_hwmon_probe(struct phy_device *phydev) { + const struct marvell_hwmon_ops *ops = to_marvell_hwmon_ops(phydev); struct marvell_priv *priv = phydev->priv; struct device *dev = &phydev->mdio.dev; int err; + if (!ops) + return 0; + err = marvell_hwmon_name(phydev); if (err) return err; priv->hwmon_dev = devm_hwmon_device_register_with_info( - dev, priv->hwmon_name, phydev, chip, NULL); + dev, priv->hwmon_name, phydev, &marvell_hwmon_chip_info, NULL); + if (IS_ERR(priv->hwmon_dev)) + return PTR_ERR(priv->hwmon_dev); - return PTR_ERR_OR_ZERO(priv->hwmon_dev); -} + if (ops->config) + err = ops->config(phydev); -static int m88e1121_hwmon_probe(struct phy_device *phydev) -{ - return marvell_hwmon_probe(phydev, &m88e1121_hwmon_chip_info); + return err; } -static int m88e1510_hwmon_probe(struct phy_device *phydev) -{ - return marvell_hwmon_probe(phydev, &m88e1510_hwmon_chip_info); -} +static const struct marvell_hwmon_ops m88e1121_hwmon_ops = { + .get_temp = m88e1121_get_temp, +}; + +static const struct marvell_hwmon_ops m88e1510_hwmon_ops = { + .get_temp = m88e1510_get_temp, + .get_temp_critical = m88e1510_get_temp_critical, + .set_temp_critical = m88e1510_set_temp_critical, + .get_temp_alarm = m88e1510_get_temp_alarm, +}; + +static const struct marvell_hwmon_ops m88e6390_hwmon_ops = { + .get_temp = m88e6390_get_temp, +}; + +static const struct marvell_hwmon_ops m88e6393_hwmon_ops = { + .config = m88e6393_hwmon_config, + .get_temp = m88e6393_get_temp, + .get_temp_critical = m88e6393_get_temp_critical, + .set_temp_critical = m88e6393_set_temp_critical, + .get_temp_alarm = m88e1510_get_temp_alarm, +}; + +#define DEF_MARVELL_HWMON_OPS(s) (&(s)) -static int m88e6390_hwmon_probe(struct phy_device *phydev) -{ - return marvell_hwmon_probe(phydev, &m88e6390_hwmon_chip_info); -} #else -static int m88e1121_hwmon_probe(struct phy_device *phydev) -{ - return 0; -} -static int m88e1510_hwmon_probe(struct phy_device *phydev) -{ - return 0; -} +#define DEF_MARVELL_HWMON_OPS(s) NULL -static int m88e6390_hwmon_probe(struct phy_device *phydev) +static int marvell_hwmon_probe(struct phy_device *phydev) { return 0; } @@ -2671,40 +2670,7 @@ static int marvell_probe(struct phy_device *phydev) phydev->priv = priv; - return 0; -} - -static int m88e1121_probe(struct phy_device *phydev) -{ - int err; - - err = marvell_probe(phydev); - if (err) - return err; - - return m88e1121_hwmon_probe(phydev); -} - -static int m88e1510_probe(struct phy_device *phydev) -{ - int err; - - err = marvell_probe(phydev); - if (err) - return err; - - return m88e1510_hwmon_probe(phydev); -} - -static int m88e6390_probe(struct phy_device *phydev) -{ - int err; - - err = marvell_probe(phydev); - if (err) - return err; - - return m88e6390_hwmon_probe(phydev); + return marvell_hwmon_probe(phydev); } static struct phy_driver marvell_drivers[] = { @@ -2810,8 +2776,9 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E1121R, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1121R", + .driver_data = DEF_MARVELL_HWMON_OPS(m88e1121_hwmon_ops), /* PHY_GBIT_FEATURES */ - .probe = m88e1121_probe, + .probe = marvell_probe, .config_init = marvell_config_init, .config_aneg = m88e1121_config_aneg, .read_status = marvell_read_status, @@ -2903,6 +2870,8 @@ static struct phy_driver marvell_drivers[] = { .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, + .get_tunable = m88e1011_get_tunable, + .set_tunable = m88e1011_set_tunable, }, { .phy_id = MARVELL_PHY_ID_88E1116R, @@ -2927,9 +2896,10 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E1510, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1510", + .driver_data = DEF_MARVELL_HWMON_OPS(m88e1510_hwmon_ops), .features = PHY_GBIT_FIBRE_FEATURES, .flags = PHY_POLL_CABLE_TEST, - .probe = m88e1510_probe, + .probe = marvell_probe, .config_init = m88e1510_config_init, .config_aneg = m88e1510_config_aneg, .read_status = marvell_read_status, @@ -2955,9 +2925,10 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E1540, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1540", + .driver_data = DEF_MARVELL_HWMON_OPS(m88e1510_hwmon_ops), /* PHY_GBIT_FEATURES */ .flags = PHY_POLL_CABLE_TEST, - .probe = m88e1510_probe, + .probe = marvell_probe, .config_init = marvell_config_init, .config_aneg = m88e1510_config_aneg, .read_status = marvell_read_status, @@ -2980,7 +2951,8 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E1545, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1545", - .probe = m88e1510_probe, + .driver_data = DEF_MARVELL_HWMON_OPS(m88e1510_hwmon_ops), + .probe = marvell_probe, /* PHY_GBIT_FEATURES */ .flags = PHY_POLL_CABLE_TEST, .config_init = marvell_config_init, @@ -3024,9 +2996,10 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E6341_FAMILY, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E6341 Family", + .driver_data = DEF_MARVELL_HWMON_OPS(m88e1510_hwmon_ops), /* PHY_GBIT_FEATURES */ .flags = PHY_POLL_CABLE_TEST, - .probe = m88e1510_probe, + .probe = marvell_probe, .config_init = marvell_config_init, .config_aneg = m88e6390_config_aneg, .read_status = marvell_read_status, @@ -3049,9 +3022,10 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E6390_FAMILY, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E6390 Family", + .driver_data = DEF_MARVELL_HWMON_OPS(m88e6390_hwmon_ops), /* PHY_GBIT_FEATURES */ .flags = PHY_POLL_CABLE_TEST, - .probe = m88e6390_probe, + .probe = marvell_probe, .config_init = marvell_config_init, .config_aneg = m88e6390_config_aneg, .read_status = marvell_read_status, @@ -3071,10 +3045,37 @@ static struct phy_driver marvell_drivers[] = { .cable_test_get_status = marvell_vct7_cable_test_get_status, }, { + .phy_id = MARVELL_PHY_ID_88E6393_FAMILY, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E6393 Family", + .driver_data = DEF_MARVELL_HWMON_OPS(m88e6393_hwmon_ops), + /* PHY_GBIT_FEATURES */ + .flags = PHY_POLL_CABLE_TEST, + .probe = marvell_probe, + .config_init = marvell_config_init, + .config_aneg = m88e1510_config_aneg, + .read_status = marvell_read_status, + .config_intr = marvell_config_intr, + .handle_interrupt = marvell_handle_interrupt, + .resume = genphy_resume, + .suspend = genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, + .get_tunable = m88e1540_get_tunable, + .set_tunable = m88e1540_set_tunable, + .cable_test_start = marvell_vct7_cable_test_start, + .cable_test_tdr_start = marvell_vct5_cable_test_tdr_start, + .cable_test_get_status = marvell_vct7_cable_test_get_status, + }, + { .phy_id = MARVELL_PHY_ID_88E1340S, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1340S", - .probe = m88e1510_probe, + .driver_data = DEF_MARVELL_HWMON_OPS(m88e1510_hwmon_ops), + .probe = marvell_probe, /* PHY_GBIT_FEATURES */ .config_init = marvell_config_init, .config_aneg = m88e1510_config_aneg, @@ -3095,7 +3096,8 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E1548P, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1548P", - .probe = m88e1510_probe, + .driver_data = DEF_MARVELL_HWMON_OPS(m88e1510_hwmon_ops), + .probe = marvell_probe, .features = PHY_GBIT_FIBRE_FEATURES, .config_init = marvell_config_init, .config_aneg = m88e1510_config_aneg, @@ -3134,6 +3136,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E6341_FAMILY, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E6390_FAMILY, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E6393_FAMILY, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1340S, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1548P, MARVELL_PHY_ID_MASK }, { } diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index b1bb9b8e1e4e..bbbc6ac8fa82 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -35,6 +35,15 @@ enum { MV_PMA_FW_VER0 = 0xc011, MV_PMA_FW_VER1 = 0xc012, + MV_PMA_21X0_PORT_CTRL = 0xc04a, + MV_PMA_21X0_PORT_CTRL_SWRST = BIT(15), + MV_PMA_21X0_PORT_CTRL_MACTYPE_MASK = 0x7, + MV_PMA_21X0_PORT_CTRL_MACTYPE_USXGMII = 0x0, + MV_PMA_2180_PORT_CTRL_MACTYPE_DXGMII = 0x1, + MV_PMA_2180_PORT_CTRL_MACTYPE_QXGMII = 0x2, + MV_PMA_21X0_PORT_CTRL_MACTYPE_5GBASER = 0x4, + MV_PMA_21X0_PORT_CTRL_MACTYPE_5GBASER_NO_SGMII_AN = 0x5, + MV_PMA_21X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH = 0x6, MV_PMA_BOOT = 0xc050, MV_PMA_BOOT_FATAL = BIT(0), @@ -78,10 +87,18 @@ enum { /* Vendor2 MMD registers */ MV_V2_PORT_CTRL = 0xf001, - MV_V2_PORT_CTRL_SWRST = BIT(15), - MV_V2_PORT_CTRL_PWRDOWN = BIT(11), - MV_V2_PORT_MAC_TYPE_MASK = 0x7, - MV_V2_PORT_MAC_TYPE_RATE_MATCH = 0x6, + MV_V2_PORT_CTRL_PWRDOWN = BIT(11), + MV_V2_33X0_PORT_CTRL_SWRST = BIT(15), + MV_V2_33X0_PORT_CTRL_MACTYPE_MASK = 0x7, + MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI = 0x0, + MV_V2_3310_PORT_CTRL_MACTYPE_XAUI_RATE_MATCH = 0x1, + MV_V2_3340_PORT_CTRL_MACTYPE_RXAUI_NO_SGMII_AN = 0x1, + MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI_RATE_MATCH = 0x2, + MV_V2_3310_PORT_CTRL_MACTYPE_XAUI = 0x3, + MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER = 0x4, + MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_NO_SGMII_AN = 0x5, + MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH = 0x6, + MV_V2_33X0_PORT_CTRL_MACTYPE_USXGMII = 0x7, /* Temperature control/read registers (88X3310 only) */ MV_V2_TEMP_CTRL = 0xf08a, MV_V2_TEMP_CTRL_MASK = 0xc000, @@ -91,14 +108,32 @@ enum { MV_V2_TEMP_UNKNOWN = 0x9600, /* unknown function */ }; +struct mv3310_chip { + void (*init_supported_interfaces)(unsigned long *mask); + int (*get_mactype)(struct phy_device *phydev); + int (*init_interface)(struct phy_device *phydev, int mactype); + +#ifdef CONFIG_HWMON + int (*hwmon_read_temp_reg)(struct phy_device *phydev); +#endif +}; + struct mv3310_priv { + DECLARE_BITMAP(supported_interfaces, PHY_INTERFACE_MODE_MAX); + u32 firmware_ver; bool rate_match; + phy_interface_t const_interface; struct device *hwmon_dev; char *hwmon_name; }; +static const struct mv3310_chip *to_mv3310_chip(struct phy_device *phydev) +{ + return phydev->drv->driver_data; +} + #ifdef CONFIG_HWMON static umode_t mv3310_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, @@ -121,18 +156,11 @@ static int mv2110_hwmon_read_temp_reg(struct phy_device *phydev) return phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_TEMP); } -static int mv10g_hwmon_read_temp_reg(struct phy_device *phydev) -{ - if (phydev->drv->phy_id == MARVELL_PHY_ID_88X3310) - return mv3310_hwmon_read_temp_reg(phydev); - else /* MARVELL_PHY_ID_88E2110 */ - return mv2110_hwmon_read_temp_reg(phydev); -} - static int mv3310_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *value) { struct phy_device *phydev = dev_get_drvdata(dev); + const struct mv3310_chip *chip = to_mv3310_chip(phydev); int temp; if (type == hwmon_chip && attr == hwmon_chip_update_interval) { @@ -141,7 +169,7 @@ static int mv3310_hwmon_read(struct device *dev, enum hwmon_sensor_types type, } if (type == hwmon_temp && attr == hwmon_temp_input) { - temp = mv10g_hwmon_read_temp_reg(phydev); + temp = chip->hwmon_read_temp_reg(phydev); if (temp < 0) return temp; @@ -268,7 +296,7 @@ static int mv3310_power_up(struct phy_device *phydev) return ret; return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL, - MV_V2_PORT_CTRL_SWRST); + MV_V2_33X0_PORT_CTRL_SWRST); } static int mv3310_reset(struct phy_device *phydev, u32 unit) @@ -363,6 +391,7 @@ static const struct sfp_upstream_ops mv3310_sfp_ops = { static int mv3310_probe(struct phy_device *phydev) { + const struct mv3310_chip *chip = to_mv3310_chip(phydev); struct mv3310_priv *priv; u32 mmd_mask = MDIO_DEVS_PMAPMD | MDIO_DEVS_AN; int ret; @@ -412,6 +441,8 @@ static int mv3310_probe(struct phy_device *phydev) if (ret) return ret; + chip->init_supported_interfaces(priv->supported_interfaces); + return phy_sfp_probe(phydev, &mv3310_sfp_ops); } @@ -453,18 +484,102 @@ static bool mv3310_has_pma_ngbaset_quirk(struct phy_device *phydev) MV_PHY_ALASKA_NBT_QUIRK_MASK) == MV_PHY_ALASKA_NBT_QUIRK_REV; } +static int mv2110_get_mactype(struct phy_device *phydev) +{ + int mactype; + + mactype = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MV_PMA_21X0_PORT_CTRL); + if (mactype < 0) + return mactype; + + return mactype & MV_PMA_21X0_PORT_CTRL_MACTYPE_MASK; +} + +static int mv3310_get_mactype(struct phy_device *phydev) +{ + int mactype; + + mactype = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL); + if (mactype < 0) + return mactype; + + return mactype & MV_V2_33X0_PORT_CTRL_MACTYPE_MASK; +} + +static int mv2110_init_interface(struct phy_device *phydev, int mactype) +{ + struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev); + + priv->rate_match = false; + + if (mactype == MV_PMA_21X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH) + priv->rate_match = true; + + if (mactype == MV_PMA_21X0_PORT_CTRL_MACTYPE_USXGMII) + priv->const_interface = PHY_INTERFACE_MODE_USXGMII; + else if (mactype == MV_PMA_21X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH) + priv->const_interface = PHY_INTERFACE_MODE_10GBASER; + else if (mactype == MV_PMA_21X0_PORT_CTRL_MACTYPE_5GBASER || + mactype == MV_PMA_21X0_PORT_CTRL_MACTYPE_5GBASER_NO_SGMII_AN) + priv->const_interface = PHY_INTERFACE_MODE_NA; + else + return -EINVAL; + + return 0; +} + +static int mv3310_init_interface(struct phy_device *phydev, int mactype) +{ + struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev); + + priv->rate_match = false; + + if (mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH || + mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI_RATE_MATCH || + mactype == MV_V2_3310_PORT_CTRL_MACTYPE_XAUI_RATE_MATCH) + priv->rate_match = true; + + if (mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_USXGMII) + priv->const_interface = PHY_INTERFACE_MODE_USXGMII; + else if (mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH || + mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_NO_SGMII_AN || + mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER) + priv->const_interface = PHY_INTERFACE_MODE_10GBASER; + else if (mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI_RATE_MATCH || + mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI) + priv->const_interface = PHY_INTERFACE_MODE_RXAUI; + else if (mactype == MV_V2_3310_PORT_CTRL_MACTYPE_XAUI_RATE_MATCH || + mactype == MV_V2_3310_PORT_CTRL_MACTYPE_XAUI) + priv->const_interface = PHY_INTERFACE_MODE_XAUI; + else + return -EINVAL; + + return 0; +} + +static int mv3340_init_interface(struct phy_device *phydev, int mactype) +{ + struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev); + int err = 0; + + priv->rate_match = false; + + if (mactype == MV_V2_3340_PORT_CTRL_MACTYPE_RXAUI_NO_SGMII_AN) + priv->const_interface = PHY_INTERFACE_MODE_RXAUI; + else + err = mv3310_init_interface(phydev, mactype); + + return err; +} + static int mv3310_config_init(struct phy_device *phydev) { struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev); - int err; - int val; + const struct mv3310_chip *chip = to_mv3310_chip(phydev); + int err, mactype; /* Check that the PHY interface type is compatible */ - if (phydev->interface != PHY_INTERFACE_MODE_SGMII && - phydev->interface != PHY_INTERFACE_MODE_2500BASEX && - phydev->interface != PHY_INTERFACE_MODE_XAUI && - phydev->interface != PHY_INTERFACE_MODE_RXAUI && - phydev->interface != PHY_INTERFACE_MODE_10GBASER) + if (!test_bit(phydev->interface, priv->supported_interfaces)) return -ENODEV; phydev->mdix_ctrl = ETH_TP_MDI_AUTO; @@ -474,11 +589,15 @@ static int mv3310_config_init(struct phy_device *phydev) if (err) return err; - val = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL); - if (val < 0) - return val; - priv->rate_match = ((val & MV_V2_PORT_MAC_TYPE_MASK) == - MV_V2_PORT_MAC_TYPE_RATE_MATCH); + mactype = chip->get_mactype(phydev); + if (mactype < 0) + return mactype; + + err = chip->init_interface(phydev, mactype); + if (err) { + phydev_err(phydev, "MACTYPE configuration invalid\n"); + return err; + } /* Enable EDPD mode - saving 600mW */ return mv3310_set_edpd(phydev, ETHTOOL_PHY_EDPD_DFLT_TX_MSECS); @@ -588,40 +707,44 @@ static void mv3310_update_interface(struct phy_device *phydev) { struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev); - /* In "XFI with Rate Matching" mode the PHY interface is fixed at - * 10Gb. The PHY adapts the rate to actual wire speed with help of + if (!phydev->link) + return; + + /* In all of the "* with Rate Matching" modes the PHY interface is fixed + * at 10Gb. The PHY adapts the rate to actual wire speed with help of * internal 16KB buffer. + * + * In USXGMII mode the PHY interface mode is also fixed. */ - if (priv->rate_match) { - phydev->interface = PHY_INTERFACE_MODE_10GBASER; + if (priv->rate_match || + priv->const_interface == PHY_INTERFACE_MODE_USXGMII) { + phydev->interface = priv->const_interface; return; } - if ((phydev->interface == PHY_INTERFACE_MODE_SGMII || - phydev->interface == PHY_INTERFACE_MODE_2500BASEX || - phydev->interface == PHY_INTERFACE_MODE_10GBASER) && - phydev->link) { - /* The PHY automatically switches its serdes interface (and - * active PHYXS instance) between Cisco SGMII, 10GBase-R and - * 2500BaseX modes according to the speed. Florian suggests - * setting phydev->interface to communicate this to the MAC. - * Only do this if we are already in one of the above modes. - */ - switch (phydev->speed) { - case SPEED_10000: - phydev->interface = PHY_INTERFACE_MODE_10GBASER; - break; - case SPEED_2500: - phydev->interface = PHY_INTERFACE_MODE_2500BASEX; - break; - case SPEED_1000: - case SPEED_100: - case SPEED_10: - phydev->interface = PHY_INTERFACE_MODE_SGMII; - break; - default: - break; - } + /* The PHY automatically switches its serdes interface (and active PHYXS + * instance) between Cisco SGMII, 2500BaseX, 5GBase-R and 10GBase-R / + * xaui / rxaui modes according to the speed. + * Florian suggests setting phydev->interface to communicate this to the + * MAC. Only do this if we are already in one of the above modes. + */ + switch (phydev->speed) { + case SPEED_10000: + phydev->interface = priv->const_interface; + break; + case SPEED_5000: + phydev->interface = PHY_INTERFACE_MODE_5GBASER; + break; + case SPEED_2500: + phydev->interface = PHY_INTERFACE_MODE_2500BASEX; + break; + case SPEED_1000: + case SPEED_100: + case SPEED_10: + phydev->interface = PHY_INTERFACE_MODE_SGMII; + break; + default: + break; } } @@ -765,11 +888,133 @@ static int mv3310_set_tunable(struct phy_device *phydev, } } +static void mv3310_init_supported_interfaces(unsigned long *mask) +{ + __set_bit(PHY_INTERFACE_MODE_SGMII, mask); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, mask); + __set_bit(PHY_INTERFACE_MODE_5GBASER, mask); + __set_bit(PHY_INTERFACE_MODE_XAUI, mask); + __set_bit(PHY_INTERFACE_MODE_RXAUI, mask); + __set_bit(PHY_INTERFACE_MODE_10GBASER, mask); + __set_bit(PHY_INTERFACE_MODE_USXGMII, mask); +} + +static void mv3340_init_supported_interfaces(unsigned long *mask) +{ + __set_bit(PHY_INTERFACE_MODE_SGMII, mask); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, mask); + __set_bit(PHY_INTERFACE_MODE_5GBASER, mask); + __set_bit(PHY_INTERFACE_MODE_RXAUI, mask); + __set_bit(PHY_INTERFACE_MODE_10GBASER, mask); + __set_bit(PHY_INTERFACE_MODE_USXGMII, mask); +} + +static void mv2110_init_supported_interfaces(unsigned long *mask) +{ + __set_bit(PHY_INTERFACE_MODE_SGMII, mask); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, mask); + __set_bit(PHY_INTERFACE_MODE_5GBASER, mask); + __set_bit(PHY_INTERFACE_MODE_10GBASER, mask); + __set_bit(PHY_INTERFACE_MODE_USXGMII, mask); +} + +static void mv2111_init_supported_interfaces(unsigned long *mask) +{ + __set_bit(PHY_INTERFACE_MODE_SGMII, mask); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, mask); + __set_bit(PHY_INTERFACE_MODE_10GBASER, mask); + __set_bit(PHY_INTERFACE_MODE_USXGMII, mask); +} + +static const struct mv3310_chip mv3310_type = { + .init_supported_interfaces = mv3310_init_supported_interfaces, + .get_mactype = mv3310_get_mactype, + .init_interface = mv3310_init_interface, + +#ifdef CONFIG_HWMON + .hwmon_read_temp_reg = mv3310_hwmon_read_temp_reg, +#endif +}; + +static const struct mv3310_chip mv3340_type = { + .init_supported_interfaces = mv3340_init_supported_interfaces, + .get_mactype = mv3310_get_mactype, + .init_interface = mv3340_init_interface, + +#ifdef CONFIG_HWMON + .hwmon_read_temp_reg = mv3310_hwmon_read_temp_reg, +#endif +}; + +static const struct mv3310_chip mv2110_type = { + .init_supported_interfaces = mv2110_init_supported_interfaces, + .get_mactype = mv2110_get_mactype, + .init_interface = mv2110_init_interface, + +#ifdef CONFIG_HWMON + .hwmon_read_temp_reg = mv2110_hwmon_read_temp_reg, +#endif +}; + +static const struct mv3310_chip mv2111_type = { + .init_supported_interfaces = mv2111_init_supported_interfaces, + .get_mactype = mv2110_get_mactype, + .init_interface = mv2110_init_interface, + +#ifdef CONFIG_HWMON + .hwmon_read_temp_reg = mv2110_hwmon_read_temp_reg, +#endif +}; + +static int mv211x_match_phy_device(struct phy_device *phydev, bool has_5g) +{ + int val; + + if ((phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] & + MARVELL_PHY_ID_MASK) != MARVELL_PHY_ID_88E2110) + return 0; + + val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_SPEED); + if (val < 0) + return val; + + return !!(val & MDIO_PCS_SPEED_5G) == has_5g; +} + +static int mv2110_match_phy_device(struct phy_device *phydev) +{ + return mv211x_match_phy_device(phydev, true); +} + +static int mv2111_match_phy_device(struct phy_device *phydev) +{ + return mv211x_match_phy_device(phydev, false); +} + static struct phy_driver mv3310_drivers[] = { { .phy_id = MARVELL_PHY_ID_88X3310, - .phy_id_mask = MARVELL_PHY_ID_MASK, + .phy_id_mask = MARVELL_PHY_ID_88X33X0_MASK, .name = "mv88x3310", + .driver_data = &mv3310_type, + .get_features = mv3310_get_features, + .config_init = mv3310_config_init, + .probe = mv3310_probe, + .suspend = mv3310_suspend, + .resume = mv3310_resume, + .config_aneg = mv3310_config_aneg, + .aneg_done = mv3310_aneg_done, + .read_status = mv3310_read_status, + .get_tunable = mv3310_get_tunable, + .set_tunable = mv3310_set_tunable, + .remove = mv3310_remove, + .set_loopback = genphy_c45_loopback, + }, + { + .phy_id = MARVELL_PHY_ID_88X3340, + .phy_id_mask = MARVELL_PHY_ID_88X33X0_MASK, + .name = "mv88x3340", + .driver_data = &mv3340_type, .get_features = mv3310_get_features, .config_init = mv3310_config_init, .probe = mv3310_probe, @@ -781,11 +1026,32 @@ static struct phy_driver mv3310_drivers[] = { .get_tunable = mv3310_get_tunable, .set_tunable = mv3310_set_tunable, .remove = mv3310_remove, + .set_loopback = genphy_c45_loopback, + }, + { + .phy_id = MARVELL_PHY_ID_88E2110, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .match_phy_device = mv2110_match_phy_device, + .name = "mv88e2110", + .driver_data = &mv2110_type, + .probe = mv3310_probe, + .suspend = mv3310_suspend, + .resume = mv3310_resume, + .config_init = mv3310_config_init, + .config_aneg = mv3310_config_aneg, + .aneg_done = mv3310_aneg_done, + .read_status = mv3310_read_status, + .get_tunable = mv3310_get_tunable, + .set_tunable = mv3310_set_tunable, + .remove = mv3310_remove, + .set_loopback = genphy_c45_loopback, }, { .phy_id = MARVELL_PHY_ID_88E2110, .phy_id_mask = MARVELL_PHY_ID_MASK, - .name = "mv88x2110", + .match_phy_device = mv2111_match_phy_device, + .name = "mv88e2111", + .driver_data = &mv2111_type, .probe = mv3310_probe, .suspend = mv3310_suspend, .resume = mv3310_resume, @@ -796,16 +1062,18 @@ static struct phy_driver mv3310_drivers[] = { .get_tunable = mv3310_get_tunable, .set_tunable = mv3310_set_tunable, .remove = mv3310_remove, + .set_loopback = genphy_c45_loopback, }, }; module_phy_driver(mv3310_drivers); static struct mdio_device_id __maybe_unused mv3310_tbl[] = { - { MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_88X33X0_MASK }, + { MARVELL_PHY_ID_88X3340, MARVELL_PHY_ID_88X33X0_MASK }, { MARVELL_PHY_ID_88E2110, MARVELL_PHY_ID_MASK }, { }, }; MODULE_DEVICE_TABLE(mdio, mv3310_tbl); -MODULE_DESCRIPTION("Marvell Alaska X 10Gigabit Ethernet PHY driver (MV88X3310)"); +MODULE_DESCRIPTION("Marvell Alaska X/M multi-gigabit Ethernet PHY driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c index 033df435f76c..2de679a68115 100644 --- a/drivers/net/phy/mdio-boardinfo.c +++ b/drivers/net/phy/mdio-boardinfo.c @@ -50,7 +50,7 @@ void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus, EXPORT_SYMBOL(mdiobus_setup_mdiodev_from_board_info); /** - * mdio_register_board_info - register MDIO devices for a given board + * mdiobus_register_board_info - register MDIO devices for a given board * @info: array of devices descriptors * @n: number of descriptors provided * Context: can sleep diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 823518554079..dadf75ff3ab9 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -510,7 +510,7 @@ static int mdiobus_create_device(struct mii_bus *bus, * on a given bus, and attach them to the bus. Drivers should use * mdiobus_register() rather than __mdiobus_register() unless they * need to pass a specific owner module. MDIO devices which are not - * PHYs will not be brought up by this function. They are expected to + * PHYs will not be brought up by this function. They are expected * to be explicitly listed in DT and instantiated by of_mdiobus_register(). * * Returns 0 on success or < 0 on error. diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 3a7705228ed5..6e32da28e138 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -1362,6 +1362,12 @@ static int vsc8584_config_pre_init(struct phy_device *phydev) u16 crc, reg; int ret; + ret = vsc8584_pll5g_reset(phydev); + if (ret < 0) { + dev_err(dev, "failed LCPLL reset, ret: %d\n", ret); + return ret; + } + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); /* all writes below are broadcasted to all PHYs in the same package */ @@ -1466,6 +1472,24 @@ static int vsc8584_config_pre_init(struct phy_device *phydev) if (ret) goto out; + /* Write patch vector 0, to skip IB cal polling */ + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_GPIO); + reg = MSCC_ROM_TRAP_SERDES_6G_CFG; /* ROM address to trap, for patch vector 0 */ + ret = phy_base_write(phydev, MSCC_TRAP_ROM_ADDR(1), reg); + if (ret) + goto out; + + reg = MSCC_RAM_TRAP_SERDES_6G_CFG; /* RAM address to jump to, when patch vector 0 enabled */ + ret = phy_base_write(phydev, MSCC_PATCH_RAM_ADDR(1), reg); + if (ret) + goto out; + + reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL); + reg |= PATCH_VEC_ZERO_EN; /* bit 8, enable patch vector 0 */ + ret = phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg); + if (ret) + goto out; + vsc8584_micro_deassert_reset(phydev, true); out: @@ -1531,62 +1555,81 @@ static void vsc85xx_coma_mode_release(struct phy_device *phydev) vsc85xx_phy_write_page(phydev, MSCC_PHY_PAGE_STANDARD); } -static int vsc8584_config_init(struct phy_device *phydev) +static int vsc8584_config_host_serdes(struct phy_device *phydev) { struct vsc8531_private *vsc8531 = phydev->priv; - int ret, i; + int ret; u16 val; - phydev->mdix_ctrl = ETH_TP_MDI_AUTO; + ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_EXTENDED_GPIO); + if (ret) + return ret; - phy_lock_mdio_bus(phydev); + val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK); + val &= ~MAC_CFG_MASK; + if (phydev->interface == PHY_INTERFACE_MODE_QSGMII) { + val |= MAC_CFG_QSGMII; + } else if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { + val |= MAC_CFG_SGMII; + } else { + ret = -EINVAL; + return ret; + } - /* Some parts of the init sequence are identical for every PHY in the - * package. Some parts are modifying the GPIO register bank which is a - * set of registers that are affecting all PHYs, a few resetting the - * microprocessor common to all PHYs. The CRC check responsible of the - * checking the firmware within the 8051 microprocessor can only be - * accessed via the PHY whose internal address in the package is 0. - * All PHYs' interrupts mask register has to be zeroed before enabling - * any PHY's interrupt in this register. - * For all these reasons, we need to do the init sequence once and only - * once whatever is the first PHY in the package that is initialized and - * do the correct init sequence for all PHYs that are package-critical - * in this pre-init function. - */ - if (phy_package_init_once(phydev)) { - /* The following switch statement assumes that the lowest - * nibble of the phy_id_mask is always 0. This works because - * the lowest nibble of the PHY_ID's below are also 0. - */ - WARN_ON(phydev->drv->phy_id_mask & 0xf); + ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val); + if (ret) + return ret; - switch (phydev->phy_id & phydev->drv->phy_id_mask) { - case PHY_ID_VSC8504: - case PHY_ID_VSC8552: - case PHY_ID_VSC8572: - case PHY_ID_VSC8574: - ret = vsc8574_config_pre_init(phydev); - break; - case PHY_ID_VSC856X: - case PHY_ID_VSC8575: - case PHY_ID_VSC8582: - case PHY_ID_VSC8584: - ret = vsc8584_config_pre_init(phydev); - break; - default: - ret = -EINVAL; - break; - } + ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_STANDARD); + if (ret) + return ret; - if (ret) - goto err; - } + val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT | + PROC_CMD_READ_MOD_WRITE_PORT; + if (phydev->interface == PHY_INTERFACE_MODE_QSGMII) + val |= PROC_CMD_QSGMII_MAC; + else + val |= PROC_CMD_SGMII_MAC; + + ret = vsc8584_cmd(phydev, val); + if (ret) + return ret; + + usleep_range(10000, 20000); + + /* Disable SerDes for 100Base-FX */ + ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF | + PROC_CMD_FIBER_PORT(vsc8531->addr) | + PROC_CMD_FIBER_DISABLE | + PROC_CMD_READ_MOD_WRITE_PORT | + PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_100BASE_FX); + if (ret) + return ret; + + /* Disable SerDes for 1000Base-X */ + ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF | + PROC_CMD_FIBER_PORT(vsc8531->addr) | + PROC_CMD_FIBER_DISABLE | + PROC_CMD_READ_MOD_WRITE_PORT | + PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_1000BASE_X); + if (ret) + return ret; + + return vsc85xx_sd6g_config_v2(phydev); +} + +static int vsc8574_config_host_serdes(struct phy_device *phydev) +{ + struct vsc8531_private *vsc8531 = phydev->priv; + int ret; + u16 val; ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_GPIO); if (ret) - goto err; + return ret; val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK); val &= ~MAC_CFG_MASK; @@ -1598,17 +1641,17 @@ static int vsc8584_config_init(struct phy_device *phydev) val |= MAC_CFG_RGMII; } else { ret = -EINVAL; - goto err; + return ret; } ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val); if (ret) - goto err; + return ret; ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); if (ret) - goto err; + return ret; if (!phy_interface_is_rgmii(phydev)) { val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT | @@ -1620,7 +1663,7 @@ static int vsc8584_config_init(struct phy_device *phydev) ret = vsc8584_cmd(phydev, val); if (ret) - goto err; + return ret; usleep_range(10000, 20000); } @@ -1632,16 +1675,78 @@ static int vsc8584_config_init(struct phy_device *phydev) PROC_CMD_READ_MOD_WRITE_PORT | PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_100BASE_FX); if (ret) - goto err; + return ret; /* Disable SerDes for 1000Base-X */ - ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF | - PROC_CMD_FIBER_PORT(vsc8531->addr) | - PROC_CMD_FIBER_DISABLE | - PROC_CMD_READ_MOD_WRITE_PORT | - PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_1000BASE_X); - if (ret) - goto err; + return vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF | + PROC_CMD_FIBER_PORT(vsc8531->addr) | + PROC_CMD_FIBER_DISABLE | + PROC_CMD_READ_MOD_WRITE_PORT | + PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_1000BASE_X); +} + +static int vsc8584_config_init(struct phy_device *phydev) +{ + struct vsc8531_private *vsc8531 = phydev->priv; + int ret, i; + u16 val; + + phydev->mdix_ctrl = ETH_TP_MDI_AUTO; + + phy_lock_mdio_bus(phydev); + + /* Some parts of the init sequence are identical for every PHY in the + * package. Some parts are modifying the GPIO register bank which is a + * set of registers that are affecting all PHYs, a few resetting the + * microprocessor common to all PHYs. The CRC check responsible of the + * checking the firmware within the 8051 microprocessor can only be + * accessed via the PHY whose internal address in the package is 0. + * All PHYs' interrupts mask register has to be zeroed before enabling + * any PHY's interrupt in this register. + * For all these reasons, we need to do the init sequence once and only + * once whatever is the first PHY in the package that is initialized and + * do the correct init sequence for all PHYs that are package-critical + * in this pre-init function. + */ + if (phy_package_init_once(phydev)) { + /* The following switch statement assumes that the lowest + * nibble of the phy_id_mask is always 0. This works because + * the lowest nibble of the PHY_ID's below are also 0. + */ + WARN_ON(phydev->drv->phy_id_mask & 0xf); + + switch (phydev->phy_id & phydev->drv->phy_id_mask) { + case PHY_ID_VSC8504: + case PHY_ID_VSC8552: + case PHY_ID_VSC8572: + case PHY_ID_VSC8574: + ret = vsc8574_config_pre_init(phydev); + if (ret) + goto err; + ret = vsc8574_config_host_serdes(phydev); + if (ret) + goto err; + break; + case PHY_ID_VSC856X: + case PHY_ID_VSC8575: + case PHY_ID_VSC8582: + case PHY_ID_VSC8584: + ret = vsc8584_config_pre_init(phydev); + if (ret) + goto err; + ret = vsc8584_config_host_serdes(phydev); + if (ret) + goto err; + vsc85xx_coma_mode_release(phydev); + break; + default: + ret = -EINVAL; + break; + } + + if (ret) + goto err; + } phy_unlock_mdio_bus(phydev); diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c new file mode 100644 index 000000000000..26b9c0d7cb9d --- /dev/null +++ b/drivers/net/phy/nxp-c45-tja11xx.c @@ -0,0 +1,621 @@ +// SPDX-License-Identifier: GPL-2.0 +/* NXP C45 PHY driver + * Copyright (C) 2021 NXP + * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com> + */ + +#include <linux/delay.h> +#include <linux/ethtool.h> +#include <linux/ethtool_netlink.h> +#include <linux/kernel.h> +#include <linux/mii.h> +#include <linux/module.h> +#include <linux/phy.h> +#include <linux/processor.h> +#include <linux/property.h> + +#define PHY_ID_TJA_1103 0x001BB010 + +#define PMAPMD_B100T1_PMAPMD_CTL 0x0834 +#define B100T1_PMAPMD_CONFIG_EN BIT(15) +#define B100T1_PMAPMD_MASTER BIT(14) +#define MASTER_MODE (B100T1_PMAPMD_CONFIG_EN | \ + B100T1_PMAPMD_MASTER) +#define SLAVE_MODE (B100T1_PMAPMD_CONFIG_EN) + +#define VEND1_DEVICE_CONTROL 0x0040 +#define DEVICE_CONTROL_RESET BIT(15) +#define DEVICE_CONTROL_CONFIG_GLOBAL_EN BIT(14) +#define DEVICE_CONTROL_CONFIG_ALL_EN BIT(13) + +#define VEND1_PHY_IRQ_ACK 0x80A0 +#define VEND1_PHY_IRQ_EN 0x80A1 +#define VEND1_PHY_IRQ_STATUS 0x80A2 +#define PHY_IRQ_LINK_EVENT BIT(1) + +#define VEND1_PHY_CONTROL 0x8100 +#define PHY_CONFIG_EN BIT(14) +#define PHY_START_OP BIT(0) + +#define VEND1_PHY_CONFIG 0x8108 +#define PHY_CONFIG_AUTO BIT(0) + +#define VEND1_SIGNAL_QUALITY 0x8320 +#define SQI_VALID BIT(14) +#define SQI_MASK GENMASK(2, 0) +#define MAX_SQI SQI_MASK + +#define VEND1_CABLE_TEST 0x8330 +#define CABLE_TEST_ENABLE BIT(15) +#define CABLE_TEST_START BIT(14) +#define CABLE_TEST_VALID BIT(13) +#define CABLE_TEST_OK 0x00 +#define CABLE_TEST_SHORTED 0x01 +#define CABLE_TEST_OPEN 0x02 +#define CABLE_TEST_UNKNOWN 0x07 + +#define VEND1_PORT_CONTROL 0x8040 +#define PORT_CONTROL_EN BIT(14) + +#define VEND1_PORT_INFRA_CONTROL 0xAC00 +#define PORT_INFRA_CONTROL_EN BIT(14) + +#define VEND1_RXID 0xAFCC +#define VEND1_TXID 0xAFCD +#define ID_ENABLE BIT(15) + +#define VEND1_ABILITIES 0xAFC4 +#define RGMII_ID_ABILITY BIT(15) +#define RGMII_ABILITY BIT(14) +#define RMII_ABILITY BIT(10) +#define REVMII_ABILITY BIT(9) +#define MII_ABILITY BIT(8) +#define SGMII_ABILITY BIT(0) + +#define VEND1_MII_BASIC_CONFIG 0xAFC6 +#define MII_BASIC_CONFIG_REV BIT(8) +#define MII_BASIC_CONFIG_SGMII 0x9 +#define MII_BASIC_CONFIG_RGMII 0x7 +#define MII_BASIC_CONFIG_RMII 0x5 +#define MII_BASIC_CONFIG_MII 0x4 + +#define VEND1_SYMBOL_ERROR_COUNTER 0x8350 +#define VEND1_LINK_DROP_COUNTER 0x8352 +#define VEND1_LINK_LOSSES_AND_FAILURES 0x8353 +#define VEND1_R_GOOD_FRAME_CNT 0xA950 +#define VEND1_R_BAD_FRAME_CNT 0xA952 +#define VEND1_R_RXER_FRAME_CNT 0xA954 +#define VEND1_RX_PREAMBLE_COUNT 0xAFCE +#define VEND1_TX_PREAMBLE_COUNT 0xAFCF +#define VEND1_RX_IPG_LENGTH 0xAFD0 +#define VEND1_TX_IPG_LENGTH 0xAFD1 +#define COUNTER_EN BIT(15) + +#define RGMII_PERIOD_PS 8000U +#define PS_PER_DEGREE div_u64(RGMII_PERIOD_PS, 360) +#define MIN_ID_PS 1644U +#define MAX_ID_PS 2260U +#define DEFAULT_ID_PS 2000U + +struct nxp_c45_phy { + u32 tx_delay; + u32 rx_delay; +}; + +struct nxp_c45_phy_stats { + const char *name; + u8 mmd; + u16 reg; + u8 off; + u16 mask; +}; + +static const struct nxp_c45_phy_stats nxp_c45_hw_stats[] = { + { "phy_symbol_error_cnt", MDIO_MMD_VEND1, + VEND1_SYMBOL_ERROR_COUNTER, 0, GENMASK(15, 0) }, + { "phy_link_status_drop_cnt", MDIO_MMD_VEND1, + VEND1_LINK_DROP_COUNTER, 8, GENMASK(13, 8) }, + { "phy_link_availability_drop_cnt", MDIO_MMD_VEND1, + VEND1_LINK_DROP_COUNTER, 0, GENMASK(5, 0) }, + { "phy_link_loss_cnt", MDIO_MMD_VEND1, + VEND1_LINK_LOSSES_AND_FAILURES, 10, GENMASK(15, 10) }, + { "phy_link_failure_cnt", MDIO_MMD_VEND1, + VEND1_LINK_LOSSES_AND_FAILURES, 0, GENMASK(9, 0) }, + { "r_good_frame_cnt", MDIO_MMD_VEND1, + VEND1_R_GOOD_FRAME_CNT, 0, GENMASK(15, 0) }, + { "r_bad_frame_cnt", MDIO_MMD_VEND1, + VEND1_R_BAD_FRAME_CNT, 0, GENMASK(15, 0) }, + { "r_rxer_frame_cnt", MDIO_MMD_VEND1, + VEND1_R_RXER_FRAME_CNT, 0, GENMASK(15, 0) }, + { "rx_preamble_count", MDIO_MMD_VEND1, + VEND1_RX_PREAMBLE_COUNT, 0, GENMASK(5, 0) }, + { "tx_preamble_count", MDIO_MMD_VEND1, + VEND1_TX_PREAMBLE_COUNT, 0, GENMASK(5, 0) }, + { "rx_ipg_length", MDIO_MMD_VEND1, + VEND1_RX_IPG_LENGTH, 0, GENMASK(8, 0) }, + { "tx_ipg_length", MDIO_MMD_VEND1, + VEND1_TX_IPG_LENGTH, 0, GENMASK(8, 0) }, +}; + +static int nxp_c45_get_sset_count(struct phy_device *phydev) +{ + return ARRAY_SIZE(nxp_c45_hw_stats); +} + +static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE(nxp_c45_hw_stats); i++) { + strncpy(data + i * ETH_GSTRING_LEN, + nxp_c45_hw_stats[i].name, ETH_GSTRING_LEN); + } +} + +static void nxp_c45_get_stats(struct phy_device *phydev, + struct ethtool_stats *stats, u64 *data) +{ + size_t i; + int ret; + + for (i = 0; i < ARRAY_SIZE(nxp_c45_hw_stats); i++) { + ret = phy_read_mmd(phydev, nxp_c45_hw_stats[i].mmd, + nxp_c45_hw_stats[i].reg); + if (ret < 0) { + data[i] = U64_MAX; + } else { + data[i] = ret & nxp_c45_hw_stats[i].mask; + data[i] >>= nxp_c45_hw_stats[i].off; + } + } +} + +static int nxp_c45_config_enable(struct phy_device *phydev) +{ + phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL, + DEVICE_CONTROL_CONFIG_GLOBAL_EN | + DEVICE_CONTROL_CONFIG_ALL_EN); + usleep_range(400, 450); + + phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL, + PORT_CONTROL_EN); + phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL, + PHY_CONFIG_EN); + phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL, + PORT_INFRA_CONTROL_EN); + + return 0; +} + +static int nxp_c45_start_op(struct phy_device *phydev) +{ + return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL, + PHY_START_OP); +} + +static int nxp_c45_config_intr(struct phy_device *phydev) +{ + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, + VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT); + else + return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, + VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT); +} + +static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev) +{ + irqreturn_t ret = IRQ_NONE; + int irq; + + irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS); + if (irq & PHY_IRQ_LINK_EVENT) { + phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK, + PHY_IRQ_LINK_EVENT); + phy_trigger_machine(phydev); + ret = IRQ_HANDLED; + } + + return ret; +} + +static int nxp_c45_soft_reset(struct phy_device *phydev) +{ + int ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL, + DEVICE_CONTROL_RESET); + if (ret) + return ret; + + return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, + VEND1_DEVICE_CONTROL, ret, + !(ret & DEVICE_CONTROL_RESET), 20000, + 240000, false); +} + +static int nxp_c45_cable_test_start(struct phy_device *phydev) +{ + return phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST, + CABLE_TEST_ENABLE | CABLE_TEST_START); +} + +static int nxp_c45_cable_test_get_status(struct phy_device *phydev, + bool *finished) +{ + int ret; + u8 cable_test_result; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST); + if (!(ret & CABLE_TEST_VALID)) { + *finished = false; + return 0; + } + + *finished = true; + cable_test_result = ret & GENMASK(2, 0); + + switch (cable_test_result) { + case CABLE_TEST_OK: + ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, + ETHTOOL_A_CABLE_RESULT_CODE_OK); + break; + case CABLE_TEST_SHORTED: + ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, + ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT); + break; + case CABLE_TEST_OPEN: + ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, + ETHTOOL_A_CABLE_RESULT_CODE_OPEN); + break; + default: + ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, + ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC); + } + + phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST, + CABLE_TEST_ENABLE); + + return nxp_c45_start_op(phydev); +} + +static int nxp_c45_setup_master_slave(struct phy_device *phydev) +{ + switch (phydev->master_slave_set) { + case MASTER_SLAVE_CFG_MASTER_FORCE: + case MASTER_SLAVE_CFG_MASTER_PREFERRED: + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL, + MASTER_MODE); + break; + case MASTER_SLAVE_CFG_SLAVE_PREFERRED: + case MASTER_SLAVE_CFG_SLAVE_FORCE: + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL, + SLAVE_MODE); + break; + case MASTER_SLAVE_CFG_UNKNOWN: + case MASTER_SLAVE_CFG_UNSUPPORTED: + return 0; + default: + phydev_warn(phydev, "Unsupported Master/Slave mode\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int nxp_c45_read_master_slave(struct phy_device *phydev) +{ + int reg; + + phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN; + phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN; + + reg = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL); + if (reg < 0) + return reg; + + if (reg & B100T1_PMAPMD_MASTER) { + phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_FORCE; + phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER; + } else { + phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_FORCE; + phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE; + } + + return 0; +} + +static int nxp_c45_config_aneg(struct phy_device *phydev) +{ + return nxp_c45_setup_master_slave(phydev); +} + +static int nxp_c45_read_status(struct phy_device *phydev) +{ + int ret; + + ret = genphy_c45_read_status(phydev); + if (ret) + return ret; + + ret = nxp_c45_read_master_slave(phydev); + if (ret) + return ret; + + return 0; +} + +static int nxp_c45_get_sqi(struct phy_device *phydev) +{ + int reg; + + reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY); + if (!(reg & SQI_VALID)) + return -EINVAL; + + reg &= SQI_MASK; + + return reg; +} + +static int nxp_c45_get_sqi_max(struct phy_device *phydev) +{ + return MAX_SQI; +} + +static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay) +{ + if (delay < MIN_ID_PS) { + phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS); + return -EINVAL; + } + + if (delay > MAX_ID_PS) { + phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS); + return -EINVAL; + } + + return 0; +} + +static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw) +{ + /* The delay in degree phase is 73.8 + phase_offset_raw * 0.9. + * To avoid floating point operations we'll multiply by 10 + * and get 1 decimal point precision. + */ + phase_offset_raw *= 10; + phase_offset_raw -= 738; + return div_u64(phase_offset_raw, 9); +} + +static void nxp_c45_disable_delays(struct phy_device *phydev) +{ + phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE); + phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE); +} + +static void nxp_c45_set_delays(struct phy_device *phydev) +{ + struct nxp_c45_phy *priv = phydev->priv; + u64 tx_delay = priv->tx_delay; + u64 rx_delay = priv->rx_delay; + u64 degree; + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { + degree = div_u64(tx_delay, PS_PER_DEGREE); + phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, + ID_ENABLE | nxp_c45_get_phase_shift(degree)); + } else { + phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, + ID_ENABLE); + } + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { + degree = div_u64(rx_delay, PS_PER_DEGREE); + phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, + ID_ENABLE | nxp_c45_get_phase_shift(degree)); + } else { + phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, + ID_ENABLE); + } +} + +static int nxp_c45_get_delays(struct phy_device *phydev) +{ + struct nxp_c45_phy *priv = phydev->priv; + int ret; + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { + ret = device_property_read_u32(&phydev->mdio.dev, + "tx-internal-delay-ps", + &priv->tx_delay); + if (ret) + priv->tx_delay = DEFAULT_ID_PS; + + ret = nxp_c45_check_delay(phydev, priv->tx_delay); + if (ret) { + phydev_err(phydev, + "tx-internal-delay-ps invalid value\n"); + return ret; + } + } + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { + ret = device_property_read_u32(&phydev->mdio.dev, + "rx-internal-delay-ps", + &priv->rx_delay); + if (ret) + priv->rx_delay = DEFAULT_ID_PS; + + ret = nxp_c45_check_delay(phydev, priv->rx_delay); + if (ret) { + phydev_err(phydev, + "rx-internal-delay-ps invalid value\n"); + return ret; + } + } + + return 0; +} + +static int nxp_c45_set_phy_mode(struct phy_device *phydev) +{ + int ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES); + phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret); + + switch (phydev->interface) { + case PHY_INTERFACE_MODE_RGMII: + if (!(ret & RGMII_ABILITY)) { + phydev_err(phydev, "rgmii mode not supported\n"); + return -EINVAL; + } + phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG, + MII_BASIC_CONFIG_RGMII); + nxp_c45_disable_delays(phydev); + break; + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: + if (!(ret & RGMII_ID_ABILITY)) { + phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n"); + return -EINVAL; + } + phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG, + MII_BASIC_CONFIG_RGMII); + ret = nxp_c45_get_delays(phydev); + if (ret) + return ret; + + nxp_c45_set_delays(phydev); + break; + case PHY_INTERFACE_MODE_MII: + if (!(ret & MII_ABILITY)) { + phydev_err(phydev, "mii mode not supported\n"); + return -EINVAL; + } + phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG, + MII_BASIC_CONFIG_MII); + break; + case PHY_INTERFACE_MODE_REVMII: + if (!(ret & REVMII_ABILITY)) { + phydev_err(phydev, "rev-mii mode not supported\n"); + return -EINVAL; + } + phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG, + MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV); + break; + case PHY_INTERFACE_MODE_RMII: + if (!(ret & RMII_ABILITY)) { + phydev_err(phydev, "rmii mode not supported\n"); + return -EINVAL; + } + phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG, + MII_BASIC_CONFIG_RMII); + break; + case PHY_INTERFACE_MODE_SGMII: + if (!(ret & SGMII_ABILITY)) { + phydev_err(phydev, "sgmii mode not supported\n"); + return -EINVAL; + } + phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG, + MII_BASIC_CONFIG_SGMII); + break; + case PHY_INTERFACE_MODE_INTERNAL: + break; + default: + return -EINVAL; + } + + return 0; +} + +static int nxp_c45_config_init(struct phy_device *phydev) +{ + int ret; + + ret = nxp_c45_config_enable(phydev); + if (ret) { + phydev_err(phydev, "Failed to enable config\n"); + return ret; + } + + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG, + PHY_CONFIG_AUTO); + + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER, + COUNTER_EN); + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT, + COUNTER_EN); + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT, + COUNTER_EN); + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH, + COUNTER_EN); + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH, + COUNTER_EN); + + ret = nxp_c45_set_phy_mode(phydev); + if (ret) + return ret; + + phydev->autoneg = AUTONEG_DISABLE; + + return nxp_c45_start_op(phydev); +} + +static int nxp_c45_probe(struct phy_device *phydev) +{ + struct nxp_c45_phy *priv; + + priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + phydev->priv = priv; + + return 0; +} + +static struct phy_driver nxp_c45_driver[] = { + { + PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103), + .name = "NXP C45 TJA1103", + .features = PHY_BASIC_T1_FEATURES, + .probe = nxp_c45_probe, + .soft_reset = nxp_c45_soft_reset, + .config_aneg = nxp_c45_config_aneg, + .config_init = nxp_c45_config_init, + .config_intr = nxp_c45_config_intr, + .handle_interrupt = nxp_c45_handle_interrupt, + .read_status = nxp_c45_read_status, + .suspend = genphy_c45_pma_suspend, + .resume = genphy_c45_pma_resume, + .get_sset_count = nxp_c45_get_sset_count, + .get_strings = nxp_c45_get_strings, + .get_stats = nxp_c45_get_stats, + .cable_test_start = nxp_c45_cable_test_start, + .cable_test_get_status = nxp_c45_cable_test_get_status, + .set_loopback = genphy_c45_loopback, + .get_sqi = nxp_c45_get_sqi, + .get_sqi_max = nxp_c45_get_sqi_max, + }, +}; + +module_phy_driver(nxp_c45_driver); + +static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = { + { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) }, + { /*sentinel*/ }, +}; + +MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl); + +MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>"); +MODULE_DESCRIPTION("NXP C45 PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index 077f2929c45e..f4816b7d31b3 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -9,6 +9,49 @@ #include <linux/phy.h> /** + * genphy_c45_pma_can_sleep - checks if the PMA have sleep support + * @phydev: target phy_device struct + */ +static bool genphy_c45_pma_can_sleep(struct phy_device *phydev) +{ + int stat1; + + stat1 = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_STAT1); + if (stat1 < 0) + return false; + + return !!(stat1 & MDIO_STAT1_LPOWERABLE); +} + +/** + * genphy_c45_pma_resume - wakes up the PMA module + * @phydev: target phy_device struct + */ +int genphy_c45_pma_resume(struct phy_device *phydev) +{ + if (!genphy_c45_pma_can_sleep(phydev)) + return -EOPNOTSUPP; + + return phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1, + MDIO_CTRL1_LPOWER); +} +EXPORT_SYMBOL_GPL(genphy_c45_pma_resume); + +/** + * genphy_c45_pma_suspend - suspends the PMA module + * @phydev: target phy_device struct + */ +int genphy_c45_pma_suspend(struct phy_device *phydev) +{ + if (!genphy_c45_pma_can_sleep(phydev)) + return -EOPNOTSUPP; + + return phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1, + MDIO_CTRL1_LPOWER); +} +EXPORT_SYMBOL_GPL(genphy_c45_pma_suspend); + +/** * genphy_c45_pma_setup_forced - configures a forced speed * @phydev: target phy_device struct */ @@ -560,6 +603,14 @@ int gen10g_config_aneg(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(gen10g_config_aneg); +int genphy_c45_loopback(struct phy_device *phydev, bool enable) +{ + return phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, + MDIO_PCS_CTRL1_LOOPBACK, + enable ? MDIO_PCS_CTRL1_LOOPBACK : 0); +} +EXPORT_SYMBOL_GPL(genphy_c45_loopback); + struct phy_driver genphy_c45_driver = { .phy_id = 0xffffffff, .phy_id_mask = 0xffffffff, diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index fc2e7cb5b2e5..1f0512e39c65 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -701,7 +701,7 @@ out: } EXPORT_SYMBOL(phy_start_cable_test_tdr); -static int phy_config_aneg(struct phy_device *phydev) +int phy_config_aneg(struct phy_device *phydev) { if (phydev->drv->config_aneg) return phydev->drv->config_aneg(phydev); @@ -714,6 +714,7 @@ static int phy_config_aneg(struct phy_device *phydev) return genphy_config_aneg(phydev); } +EXPORT_SYMBOL(phy_config_aneg); /** * phy_check_link_status - check link status and set state accordingly diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index cc38e326405a..0a2d8bedf73d 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -273,6 +273,9 @@ static __maybe_unused int mdio_bus_phy_suspend(struct device *dev) { struct phy_device *phydev = to_phy_device(dev); + if (phydev->mac_managed_pm) + return 0; + /* We must stop the state machine manually, otherwise it stops out of * control, possibly with the phydev->lock held. Upon resume, netdev * may call phy routines that try to grab the same lock, and that may @@ -294,6 +297,9 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev) struct phy_device *phydev = to_phy_device(dev); int ret; + if (phydev->mac_managed_pm) + return 0; + if (!phydev->suspended_by_mdio_bus) goto no_resume; @@ -512,10 +518,21 @@ phy_has_fixups_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(phy_has_fixups); +static ssize_t phy_dev_flags_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct phy_device *phydev = to_phy_device(dev); + + return sprintf(buf, "0x%08x\n", phydev->dev_flags); +} +static DEVICE_ATTR_RO(phy_dev_flags); + static struct attribute *phy_dev_attrs[] = { &dev_attr_phy_id.attr, &dev_attr_phy_interface.attr, &dev_attr_phy_has_fixups.attr, + &dev_attr_phy_dev_flags.attr, NULL, }; ATTRIBUTE_GROUPS(phy_dev); @@ -1760,6 +1777,9 @@ int phy_loopback(struct phy_device *phydev, bool enable) struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); int ret = 0; + if (!phydrv) + return -ENODEV; + mutex_lock(&phydev->lock); if (enable && phydev->loopback_enabled) { @@ -1772,10 +1792,10 @@ int phy_loopback(struct phy_device *phydev, bool enable) goto out; } - if (phydev->drv && phydrv->set_loopback) + if (phydrv->set_loopback) ret = phydrv->set_loopback(phydev, enable); else - ret = -EOPNOTSUPP; + ret = genphy_loopback(phydev, enable); if (ret) goto out; @@ -2545,8 +2565,32 @@ EXPORT_SYMBOL(genphy_resume); int genphy_loopback(struct phy_device *phydev, bool enable) { - return phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, - enable ? BMCR_LOOPBACK : 0); + if (enable) { + u16 val, ctl = BMCR_LOOPBACK; + int ret; + + if (phydev->speed == SPEED_1000) + ctl |= BMCR_SPEED1000; + else if (phydev->speed == SPEED_100) + ctl |= BMCR_SPEED100; + + if (phydev->duplex == DUPLEX_FULL) + ctl |= BMCR_FULLDPLX; + + phy_modify(phydev, MII_BMCR, ~0, ctl); + + ret = phy_read_poll_timeout(phydev, MII_BMSR, val, + val & BMSR_LSTATUS, + 5000, 500000, true); + if (ret) + return ret; + } else { + phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, 0); + + phy_config_aneg(phydev); + } + + return 0; } EXPORT_SYMBOL(genphy_loopback); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index dc2800beacc3..96d8e88b4e46 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -271,8 +271,9 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode) pl->cfg_link_an_mode = MLO_AN_FIXED; fwnode_handle_put(dn); - if (fwnode_property_read_string(fwnode, "managed", &managed) == 0 && - strcmp(managed, "in-band-status") == 0) { + if ((fwnode_property_read_string(fwnode, "managed", &managed) == 0 && + strcmp(managed, "in-band-status") == 0) || + pl->config->ovr_an_inband) { if (pl->cfg_link_an_mode == MLO_AN_FIXED) { phylink_err(pl, "can't use both fixed-link and in-band-status\n"); diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 2e11176c6b94..e61de66e973b 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -556,6 +556,26 @@ int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee, EXPORT_SYMBOL_GPL(sfp_get_module_eeprom); /** + * sfp_get_module_eeprom_by_page() - Read a page from the SFP module EEPROM + * @bus: a pointer to the &struct sfp_bus structure for the sfp module + * @page: a &struct ethtool_module_eeprom + * @extack: extack for reporting problems + * + * Read an EEPROM page as specified by the supplied @page. See the + * documentation for &struct ethtool_module_eeprom for the page to be read. + * + * Returns 0 on success or a negative errno number. More error + * information might be provided via extack + */ +int sfp_get_module_eeprom_by_page(struct sfp_bus *bus, + const struct ethtool_module_eeprom *page, + struct netlink_ext_ack *extack) +{ + return bus->socket_ops->module_eeprom_by_page(bus->sfp, page, extack); +} +EXPORT_SYMBOL_GPL(sfp_get_module_eeprom_by_page); + +/** * sfp_upstream_start() - Inform the SFP that the network device is up * @bus: a pointer to the &struct sfp_bus structure for the sfp module * diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 7998acc689b7..37f722c763d7 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -2330,6 +2330,30 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee, return 0; } +static int sfp_module_eeprom_by_page(struct sfp *sfp, + const struct ethtool_module_eeprom *page, + struct netlink_ext_ack *extack) +{ + if (page->bank) { + NL_SET_ERR_MSG(extack, "Banks not supported"); + return -EOPNOTSUPP; + } + + if (page->page) { + NL_SET_ERR_MSG(extack, "Only page 0 supported"); + return -EOPNOTSUPP; + } + + if (page->i2c_address != 0x50 && + page->i2c_address != 0x51) { + NL_SET_ERR_MSG(extack, "Only address 0x50 and 0x51 supported"); + return -EOPNOTSUPP; + } + + return sfp_read(sfp, page->i2c_address == 0x51, page->offset, + page->data, page->length); +}; + static const struct sfp_socket_ops sfp_module_ops = { .attach = sfp_attach, .detach = sfp_detach, @@ -2337,6 +2361,7 @@ static const struct sfp_socket_ops sfp_module_ops = { .stop = sfp_stop, .module_info = sfp_module_info, .module_eeprom = sfp_module_eeprom, + .module_eeprom_by_page = sfp_module_eeprom_by_page, }; static void sfp_timeout(struct work_struct *work) diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h index b83f70526270..27226535c72b 100644 --- a/drivers/net/phy/sfp.h +++ b/drivers/net/phy/sfp.h @@ -14,6 +14,9 @@ struct sfp_socket_ops { int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo); int (*module_eeprom)(struct sfp *sfp, struct ethtool_eeprom *ee, u8 *data); + int (*module_eeprom_by_page)(struct sfp *sfp, + const struct ethtool_module_eeprom *page, + struct netlink_ext_ack *extack); }; int sfp_add_phy(struct sfp_bus *bus, struct phy_device *phydev); diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index ddb78fb4d6dc..d8cac02a79b9 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -185,10 +185,13 @@ static int lan87xx_config_aneg(struct phy_device *phydev) return genphy_config_aneg(phydev); } -static int lan87xx_config_aneg_ext(struct phy_device *phydev) +static int lan95xx_config_aneg_ext(struct phy_device *phydev) { int rc; + if (phydev->phy_id != 0x0007c0f0) /* not (LAN9500A or LAN9505A) */ + return lan87xx_config_aneg(phydev); + /* Extend Manual AutoMDIX timer */ rc = phy_read(phydev, PHY_EDPD_CONFIG); if (rc < 0) @@ -441,7 +444,7 @@ static struct phy_driver smsc_phy_driver[] = { .read_status = lan87xx_read_status, .config_init = smsc_phy_config_init, .soft_reset = smsc_phy_reset, - .config_aneg = lan87xx_config_aneg_ext, + .config_aneg = lan95xx_config_aneg_ext, /* IRQ related */ .config_intr = smsc_phy_config_intr, diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c index 4406b353123e..e26cf91bdec2 100644 --- a/drivers/net/plip/plip.c +++ b/drivers/net/plip/plip.c @@ -516,6 +516,7 @@ plip_receive(unsigned short nibble_timeout, struct net_device *dev, *data_p |= (c0 << 1) & 0xf0; write_data (dev, 0x00); /* send ACK */ *ns_p = PLIP_NB_BEGIN; + break; case PLIP_NB_2: break; } @@ -808,6 +809,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl, return HS_TIMEOUT; } } + break; case PLIP_PK_LENGTH_LSB: if (plip_send(nibble_timeout, dev, diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c index c457f849e553..e6d48e5c65a3 100644 --- a/drivers/net/ppp/ppp_deflate.c +++ b/drivers/net/ppp/ppp_deflate.c @@ -279,7 +279,6 @@ static void z_decomp_free(void *arg) struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg; if (state) { - zlib_inflateEnd(&state->strm); vfree(state->strm.workspace); kfree(state); } diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index d445ecb1d0c7..930e49ef15f6 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1560,12 +1560,34 @@ static void ppp_dev_priv_destructor(struct net_device *dev) ppp_destroy_interface(ppp); } +static int ppp_fill_forward_path(struct net_device_path_ctx *ctx, + struct net_device_path *path) +{ + struct ppp *ppp = netdev_priv(ctx->dev); + struct ppp_channel *chan; + struct channel *pch; + + if (ppp->flags & SC_MULTILINK) + return -EOPNOTSUPP; + + if (list_empty(&ppp->channels)) + return -ENODEV; + + pch = list_first_entry(&ppp->channels, struct channel, clist); + chan = pch->chan; + if (!chan->ops->fill_forward_path) + return -EOPNOTSUPP; + + return chan->ops->fill_forward_path(ctx, path, chan); +} + static const struct net_device_ops ppp_netdev_ops = { .ndo_init = ppp_dev_init, .ndo_uninit = ppp_dev_uninit, .ndo_start_xmit = ppp_start_xmit, .ndo_do_ioctl = ppp_net_ioctl, .ndo_get_stats64 = ppp_get_stats64, + .ndo_fill_forward_path = ppp_fill_forward_path, }; static struct device_type ppp_type = { diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index d7f50b835050..3619520340b7 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -25,7 +25,7 @@ * in pppoe_release. * 051000 : Initialization cleanup. * 111100 : Fix recvmsg. - * 050101 : Fix PADT procesing. + * 050101 : Fix PADT processing. * 140501 : Use pppoe_rcv_core to handle all backlog. (Alexey) * 170701 : Do not lock_sock with rwlock held. (DaveM) * Ignore discovery frames if user has socket @@ -96,7 +96,7 @@ struct pppoe_net { * we could use _single_ hash table for all * nets by injecting net id into the hash but * it would increase hash chains and add - * a few additional math comparations messy + * a few additional math comparisons messy * as well, moreover in case of SMP less locking * controversy here */ @@ -972,8 +972,31 @@ static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb) return __pppoe_xmit(sk, skb); } +static int pppoe_fill_forward_path(struct net_device_path_ctx *ctx, + struct net_device_path *path, + const struct ppp_channel *chan) +{ + struct sock *sk = (struct sock *)chan->private; + struct pppox_sock *po = pppox_sk(sk); + struct net_device *dev = po->pppoe_dev; + + if (sock_flag(sk, SOCK_DEAD) || + !(sk->sk_state & PPPOX_CONNECTED) || !dev) + return -1; + + path->type = DEV_PATH_PPPOE; + path->encap.proto = htons(ETH_P_PPP_SES); + path->encap.id = be16_to_cpu(po->num); + memcpy(path->encap.h_dest, po->pppoe_pa.remote, ETH_ALEN); + path->dev = ctx->dev; + ctx->dev = dev; + + return 0; +} + static const struct ppp_channel_ops pppoe_chan_ops = { .start_xmit = pppoe_xmit, + .fill_forward_path = pppoe_fill_forward_path, }; static int pppoe_recvmsg(struct socket *sock, struct msghdr *m, diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 4cf38be26dc9..84f832806313 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1189,8 +1189,7 @@ static int tun_xdp_xmit(struct net_device *dev, int n, struct tun_struct *tun = netdev_priv(dev); struct tun_file *tfile; u32 numqueues; - int drops = 0; - int cnt = n; + int nxmit = 0; int i; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) @@ -1220,9 +1219,9 @@ resample: if (__ptr_ring_produce(&tfile->tx_ring, frame)) { atomic_long_inc(&dev->tx_dropped); - xdp_return_frame_rx_napi(xdp); - drops++; + break; } + nxmit++; } spin_unlock(&tfile->tx_ring.producer_lock); @@ -1230,17 +1229,21 @@ resample: __tun_xdp_flush_tfile(tfile); rcu_read_unlock(); - return cnt - drops; + return nxmit; } static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) { struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp); + int nxmit; if (unlikely(!frame)) return -EOVERFLOW; - return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH); + nxmit = tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH); + if (!nxmit) + xdp_return_frame_rx_napi(frame); + return nxmit; } static const struct net_device_ops tap_netdev_ops = { @@ -3005,7 +3008,6 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, return open_related_ns(&net->ns, get_net_ns); } - ret = 0; rtnl_lock(); tun = tun_get(tfile); diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 6e13d8165852..19a8fafb8f04 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -125,8 +125,8 @@ static const struct ethtool_ops ax88172_ethtool_ops = { .get_eeprom = asix_get_eeprom, .set_eeprom = asix_set_eeprom, .nway_reset = usbnet_nway_reset, - .get_link_ksettings = usbnet_get_link_ksettings, - .set_link_ksettings = usbnet_set_link_ksettings, + .get_link_ksettings = usbnet_get_link_ksettings_mii, + .set_link_ksettings = usbnet_set_link_ksettings_mii, }; static void ax88172_set_multicast(struct net_device *net) @@ -291,8 +291,8 @@ static const struct ethtool_ops ax88772_ethtool_ops = { .get_eeprom = asix_get_eeprom, .set_eeprom = asix_set_eeprom, .nway_reset = usbnet_nway_reset, - .get_link_ksettings = usbnet_get_link_ksettings, - .set_link_ksettings = usbnet_set_link_ksettings, + .get_link_ksettings = usbnet_get_link_ksettings_mii, + .set_link_ksettings = usbnet_set_link_ksettings_mii, }; static int ax88772_link_reset(struct usbnet *dev) @@ -782,8 +782,8 @@ static const struct ethtool_ops ax88178_ethtool_ops = { .get_eeprom = asix_get_eeprom, .set_eeprom = asix_set_eeprom, .nway_reset = usbnet_nway_reset, - .get_link_ksettings = usbnet_get_link_ksettings, - .set_link_ksettings = usbnet_set_link_ksettings, + .get_link_ksettings = usbnet_get_link_ksettings_mii, + .set_link_ksettings = usbnet_set_link_ksettings_mii, }; static int marvell_phy_init(struct usbnet *dev) diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index d650b39b6e5d..c1316718304d 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -296,12 +296,12 @@ static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, int ret; if (2 == size) { - u16 buf; + u16 buf = 0; ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0); le16_to_cpus(&buf); *((u16 *)data) = buf; } else if (4 == size) { - u32 buf; + u32 buf = 0; ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0); le32_to_cpus(&buf); *((u32 *)data) = buf; @@ -1296,6 +1296,8 @@ static void ax88179_get_mac_addr(struct usbnet *dev) { u8 mac[ETH_ALEN]; + memset(mac, 0, sizeof(mac)); + /* Maybe the boot loader passed the MAC address via device tree */ if (!eth_platform_get_mac_address(&dev->udev->dev, mac)) { netif_dbg(dev, ifup, dev->net, diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index a9b551028659..7eb0109e9baa 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -92,6 +92,18 @@ void usbnet_cdc_update_filter(struct usbnet *dev) } EXPORT_SYMBOL_GPL(usbnet_cdc_update_filter); +/* We need to override usbnet_*_link_ksettings in bind() */ +static const struct ethtool_ops cdc_ether_ethtool_ops = { + .get_link = usbnet_get_link, + .nway_reset = usbnet_nway_reset, + .get_drvinfo = usbnet_get_drvinfo, + .get_msglevel = usbnet_get_msglevel, + .set_msglevel = usbnet_set_msglevel, + .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = usbnet_get_link_ksettings_internal, + .set_link_ksettings = NULL, +}; + /* probes control interface, claims data interface, collects the bulk * endpoints, activates data interface (if needed), maybe sets MTU. * all pure cdc, except for certain firmware workarounds, and knowing @@ -310,6 +322,9 @@ skip: return -ENODEV; } + /* override ethtool_ops */ + dev->net->ethtool_ops = &cdc_ether_ethtool_ops; + return 0; bad_desc: @@ -379,12 +394,10 @@ EXPORT_SYMBOL_GPL(usbnet_cdc_unbind); * (by Brad Hards) talked with, with more functionality. */ -static void dumpspeed(struct usbnet *dev, __le32 *speeds) +static void speed_change(struct usbnet *dev, __le32 *speeds) { - netif_info(dev, timer, dev->net, - "link speeds: %u kbps up, %u kbps down\n", - __le32_to_cpu(speeds[0]) / 1000, - __le32_to_cpu(speeds[1]) / 1000); + dev->tx_speed = __le32_to_cpu(speeds[0]); + dev->rx_speed = __le32_to_cpu(speeds[1]); } void usbnet_cdc_status(struct usbnet *dev, struct urb *urb) @@ -396,7 +409,7 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb) /* SPEED_CHANGE can get split into two 8-byte packets */ if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) { - dumpspeed(dev, (__le32 *) urb->transfer_buffer); + speed_change(dev, (__le32 *) urb->transfer_buffer); return; } @@ -413,7 +426,7 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb) if (urb->actual_length != (sizeof(*event) + 8)) set_bit(EVENT_STS_SPLIT, &dev->flags); else - dumpspeed(dev, (__le32 *) &event[1]); + speed_change(dev, (__le32 *) &event[1]); break; /* USB_CDC_NOTIFY_RESPONSE_AVAILABLE can happen too (e.g. RNDIS), * but there are no standard formats for the response data. diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 8acf30115428..b04055fd1b79 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -133,17 +133,17 @@ static void cdc_ncm_get_strings(struct net_device __always_unused *netdev, u32 s static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx); static const struct ethtool_ops cdc_ncm_ethtool_ops = { - .get_link = usbnet_get_link, - .nway_reset = usbnet_nway_reset, - .get_drvinfo = usbnet_get_drvinfo, - .get_msglevel = usbnet_get_msglevel, - .set_msglevel = usbnet_set_msglevel, - .get_ts_info = ethtool_op_get_ts_info, - .get_sset_count = cdc_ncm_get_sset_count, - .get_strings = cdc_ncm_get_strings, - .get_ethtool_stats = cdc_ncm_get_ethtool_stats, - .get_link_ksettings = usbnet_get_link_ksettings, - .set_link_ksettings = usbnet_set_link_ksettings, + .get_link = usbnet_get_link, + .nway_reset = usbnet_nway_reset, + .get_drvinfo = usbnet_get_drvinfo, + .get_msglevel = usbnet_get_msglevel, + .set_msglevel = usbnet_set_msglevel, + .get_ts_info = ethtool_op_get_ts_info, + .get_sset_count = cdc_ncm_get_sset_count, + .get_strings = cdc_ncm_get_strings, + .get_ethtool_stats = cdc_ncm_get_ethtool_stats, + .get_link_ksettings = usbnet_get_link_ksettings_internal, + .set_link_ksettings = NULL, }; static u32 cdc_ncm_check_rx_max(struct usbnet *dev, u32 new_rx) @@ -920,7 +920,6 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ goto error2; } - usb_set_intfdata(ctx->data, dev); usb_set_intfdata(ctx->control, dev); if (ctx->ether_desc) { @@ -1826,33 +1825,9 @@ static void cdc_ncm_speed_change(struct usbnet *dev, struct usb_cdc_speed_change *data) { - uint32_t rx_speed = le32_to_cpu(data->DLBitRRate); - uint32_t tx_speed = le32_to_cpu(data->ULBitRate); - - /* if the speed hasn't changed, don't report it. - * RTL8156 shipped before 2021 sends notification about every 32ms. - */ - if (dev->rx_speed == rx_speed && dev->tx_speed == tx_speed) - return; - - dev->rx_speed = rx_speed; - dev->tx_speed = tx_speed; - - /* - * Currently the USB-NET API does not support reporting the actual - * device speed. Do print it instead. - */ - if ((tx_speed > 1000000) && (rx_speed > 1000000)) { - netif_info(dev, link, dev->net, - "%u mbit/s downlink %u mbit/s uplink\n", - (unsigned int)(rx_speed / 1000000U), - (unsigned int)(tx_speed / 1000000U)); - } else { - netif_info(dev, link, dev->net, - "%u kbit/s downlink %u kbit/s uplink\n", - (unsigned int)(rx_speed / 1000U), - (unsigned int)(tx_speed / 1000U)); - } + /* RTL8156 shipped before 2021 sends notification about every 32ms. */ + dev->rx_speed = le32_to_cpu(data->DLBitRRate); + dev->tx_speed = le32_to_cpu(data->ULBitRate); } static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) @@ -1878,6 +1853,9 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE. */ + /* RTL8156 shipped before 2021 sends notification about + * every 32ms. Don't forward notification if state is same. + */ if (netif_carrier_ok(dev->net) != !!event->wValue) usbnet_link_change(dev, !!event->wValue, 0); break; diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index b5d2ac55a874..89cc61d7a675 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -282,8 +282,8 @@ static const struct ethtool_ops dm9601_ethtool_ops = { .get_eeprom_len = dm9601_get_eeprom_len, .get_eeprom = dm9601_get_eeprom, .nway_reset = usbnet_nway_reset, - .get_link_ksettings = usbnet_get_link_ksettings, - .set_link_ksettings = usbnet_set_link_ksettings, + .get_link_ksettings = usbnet_get_link_ksettings_mii, + .set_link_ksettings = usbnet_set_link_ksettings_mii, }; static void dm9601_set_multicast(struct net_device *net) diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 9bc58e64b5b7..3ef4b2841402 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -3104,7 +3104,7 @@ static void hso_free_interface(struct usb_interface *interface) cancel_work_sync(&serial_table[i]->async_put_intf); cancel_work_sync(&serial_table[i]->async_get_intf); hso_serial_tty_unregister(serial); - kref_put(&serial_table[i]->ref, hso_serial_ref_free); + kref_put(&serial->parent->ref, hso_serial_ref_free); } } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index e81c5699c952..6acc5e904518 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -2,7 +2,6 @@ /* * Copyright (C) 2015 Microchip Technology */ -#include <linux/version.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index fc512b780d15..9f9352a4522f 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c @@ -452,8 +452,8 @@ static const struct ethtool_ops mcs7830_ethtool_ops = { .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .nway_reset = usbnet_nway_reset, - .get_link_ksettings = usbnet_get_link_ksettings, - .set_link_ksettings = usbnet_set_link_ksettings, + .get_link_ksettings = usbnet_get_link_ksettings_mii, + .set_link_ksettings = usbnet_set_link_ksettings_mii, }; static const struct net_device_ops mcs7830_netdev_ops = { diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 20fb5638ac65..136ea06540ff 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -29,7 +29,7 @@ #include <linux/usb/r8152.h> /* Information for net-next */ -#define NETNEXT_VERSION "11" +#define NETNEXT_VERSION "12" /* Information for net */ #define NET_VERSION "11" @@ -43,10 +43,14 @@ #define PLA_IDR 0xc000 #define PLA_RCR 0xc010 +#define PLA_RCR1 0xc012 #define PLA_RMS 0xc016 #define PLA_RXFIFO_CTRL0 0xc0a0 +#define PLA_RXFIFO_FULL 0xc0a2 #define PLA_RXFIFO_CTRL1 0xc0a4 +#define PLA_RX_FIFO_FULL 0xc0a6 #define PLA_RXFIFO_CTRL2 0xc0a8 +#define PLA_RX_FIFO_EMPTY 0xc0aa #define PLA_DMY_REG0 0xc0b0 #define PLA_FMC 0xc0b4 #define PLA_CFG_WOL 0xc0b6 @@ -63,6 +67,8 @@ #define PLA_MACDBG_PRE 0xd38c /* RTL_VER_04 only */ #define PLA_MACDBG_POST 0xd38e /* RTL_VER_04 only */ #define PLA_EXTRA_STATUS 0xd398 +#define PLA_GPHY_CTRL 0xd3ae +#define PLA_POL_GPIO_CTRL 0xdc6a #define PLA_EFUSE_DATA 0xdd00 #define PLA_EFUSE_CMD 0xdd02 #define PLA_LEDSEL 0xdd90 @@ -72,6 +78,8 @@ #define PLA_LWAKE_CTRL_REG 0xe007 #define PLA_GPHY_INTR_IMR 0xe022 #define PLA_EEE_CR 0xe040 +#define PLA_EEE_TXTWSYS 0xe04c +#define PLA_EEE_TXTWSYS_2P5G 0xe058 #define PLA_EEEP_CR 0xe080 #define PLA_MAC_PWR_CTRL 0xe0c0 #define PLA_MAC_PWR_CTRL2 0xe0ca @@ -82,6 +90,7 @@ #define PLA_TCR1 0xe612 #define PLA_MTPS 0xe615 #define PLA_TXFIFO_CTRL 0xe618 +#define PLA_TXFIFO_FULL 0xe61a #define PLA_RSTTALLY 0xe800 #define PLA_CR 0xe813 #define PLA_CRWECR 0xe81c @@ -98,6 +107,7 @@ #define PLA_SFF_STS_7 0xe8de #define PLA_PHYSTATUS 0xe908 #define PLA_CONFIG6 0xe90a /* CONFIG6 */ +#define PLA_USB_CFG 0xe952 #define PLA_BP_BA 0xfc26 #define PLA_BP_0 0xfc28 #define PLA_BP_1 0xfc2a @@ -112,6 +122,7 @@ #define USB_USB2PHY 0xb41e #define USB_SSPHYLINK1 0xb426 #define USB_SSPHYLINK2 0xb428 +#define USB_L1_CTRL 0xb45e #define USB_U2P3_CTRL 0xb460 #define USB_CSR_DUMMY1 0xb464 #define USB_CSR_DUMMY2 0xb466 @@ -122,7 +133,12 @@ #define USB_FW_FIX_EN0 0xcfca #define USB_FW_FIX_EN1 0xcfcc #define USB_LPM_CONFIG 0xcfd8 +#define USB_ECM_OPTION 0xcfee #define USB_CSTMR 0xcfef /* RTL8153A */ +#define USB_MISC_2 0xcfff +#define USB_ECM_OP 0xd26b +#define USB_GPHY_CTRL 0xd284 +#define USB_SPEED_OPTION 0xd32a #define USB_FW_CTRL 0xd334 /* RTL8153B */ #define USB_FC_TIMER 0xd340 #define USB_USB_CTRL 0xd406 @@ -136,16 +152,20 @@ #define USB_RX_EXTRA_AGGR_TMR 0xd432 /* RTL8153B */ #define USB_TX_DMA 0xd434 #define USB_UPT_RXDMA_OWN 0xd437 +#define USB_UPHY3_MDCMDIO 0xd480 #define USB_TOLERANCE 0xd490 #define USB_LPM_CTRL 0xd41a #define USB_BMU_RESET 0xd4b0 +#define USB_BMU_CONFIG 0xd4b4 #define USB_U1U2_TIMER 0xd4da #define USB_FW_TASK 0xd4e8 /* RTL8153B */ +#define USB_RX_AGGR_NUM 0xd4ee #define USB_UPS_CTRL 0xd800 #define USB_POWER_CUT 0xd80a #define USB_MISC_0 0xd81a #define USB_MISC_1 0xd81f #define USB_AFE_CTRL2 0xd824 +#define USB_UPHY_XTAL 0xd826 #define USB_UPS_CFG 0xd842 #define USB_UPS_FLAGS 0xd848 #define USB_WDT1_CTRL 0xe404 @@ -188,6 +208,9 @@ #define OCP_EEE_ABLE 0xa5c4 #define OCP_EEE_ADV 0xa5d0 #define OCP_EEE_LPABLE 0xa5d2 +#define OCP_10GBT_CTRL 0xa5d4 +#define OCP_10GBT_STAT 0xa5d6 +#define OCP_EEE_ADV2 0xa6d4 #define OCP_PHY_STATE 0xa708 /* nway state for 8153 */ #define OCP_PHY_PATCH_STAT 0xb800 #define OCP_PHY_PATCH_CMD 0xb820 @@ -199,6 +222,7 @@ /* SRAM Register */ #define SRAM_GREEN_CFG 0x8011 #define SRAM_LPF_CFG 0x8012 +#define SRAM_GPHY_FW_VER 0x801e #define SRAM_10M_AMP1 0x8080 #define SRAM_10M_AMP2 0x8082 #define SRAM_IMPEDANCE 0x8084 @@ -210,11 +234,19 @@ #define RCR_AM 0x00000004 #define RCR_AB 0x00000008 #define RCR_ACPT_ALL (RCR_AAP | RCR_APM | RCR_AM | RCR_AB) +#define SLOT_EN BIT(11) + +/* PLA_RCR1 */ +#define OUTER_VLAN BIT(7) +#define INNER_VLAN BIT(6) /* PLA_RXFIFO_CTRL0 */ #define RXFIFO_THR1_NORMAL 0x00080002 #define RXFIFO_THR1_OOB 0x01800003 +/* PLA_RXFIFO_FULL */ +#define RXFIFO_FULL_MASK 0xfff + /* PLA_RXFIFO_CTRL1 */ #define RXFIFO_THR2_FULL 0x00000060 #define RXFIFO_THR2_HIGH 0x00000038 @@ -249,6 +281,9 @@ /* PLA_TCR1 */ #define VERSION_MASK 0x7cf0 +#define IFG_MASK (BIT(3) | BIT(9) | BIT(8)) +#define IFG_144NS BIT(9) +#define IFG_96NS (BIT(9) | BIT(8)) /* PLA_MTPS */ #define MTPS_JUMBO (12 * 1024 / 64) @@ -282,6 +317,7 @@ #define MCU_BORW_EN 0x4000 /* PLA_CPCR */ +#define FLOW_CTRL_EN BIT(0) #define CPCR_RX_VLAN 0x0040 /* PLA_CFG_WOL */ @@ -307,6 +343,10 @@ /* PLA_CONFIG6 */ #define LANWAKE_CLR_EN BIT(0) +/* PLA_USB_CFG */ +#define EN_XG_LIP BIT(1) +#define EN_G_LIP BIT(2) + /* PLA_CONFIG5 */ #define BWF_EN 0x0040 #define MWF_EN 0x0020 @@ -330,6 +370,7 @@ /* PLA_MAC_PWR_CTRL2 */ #define EEE_SPDWN_RATIO 0x8007 #define MAC_CLK_SPDWN_EN BIT(15) +#define EEE_SPDWN_RATIO_MASK 0xff /* PLA_MAC_PWR_CTRL3 */ #define PLA_MCU_SPDWN_EN BIT(14) @@ -342,6 +383,7 @@ #define PWRSAVE_SPDWN_EN 0x1000 #define RXDV_SPDWN_EN 0x0800 #define TX10MIDLE_EN 0x0100 +#define IDLE_SPDWN_EN BIT(6) #define TP100_SPDWN_EN 0x0020 #define TP500_SPDWN_EN 0x0010 #define TP1000_SPDWN_EN 0x0008 @@ -382,6 +424,13 @@ #define LINK_CHANGE_FLAG BIT(8) #define POLL_LINK_CHG BIT(0) +/* PLA_GPHY_CTRL */ +#define GPHY_FLASH BIT(1) + +/* PLA_POL_GPIO_CTRL */ +#define DACK_DET_EN BIT(15) +#define POL_GPHY_PATCH BIT(4) + /* USB_USB2PHY */ #define USB2PHY_SUSPEND 0x0001 #define USB2PHY_L1 0x0002 @@ -430,6 +479,9 @@ #define BMU_RESET_EP_IN 0x01 #define BMU_RESET_EP_OUT 0x02 +/* USB_BMU_CONFIG */ +#define ACT_ODMA BIT(1) + /* USB_UPT_RXDMA_OWN */ #define OWN_UPDATE BIT(0) #define OWN_CLEAR BIT(1) @@ -437,27 +489,52 @@ /* USB_FW_TASK */ #define FC_PATCH_TASK BIT(1) +/* USB_RX_AGGR_NUM */ +#define RX_AGGR_NUM_MASK 0x1ff + /* USB_UPS_CTRL */ #define POWER_CUT 0x0100 /* USB_PM_CTRL_STATUS */ #define RESUME_INDICATE 0x0001 +/* USB_ECM_OPTION */ +#define BYPASS_MAC_RESET BIT(5) + /* USB_CSTMR */ #define FORCE_SUPER BIT(0) +/* USB_MISC_2 */ +#define UPS_FORCE_PWR_DOWN BIT(0) + +/* USB_ECM_OP */ +#define EN_ALL_SPEED BIT(0) + +/* USB_GPHY_CTRL */ +#define GPHY_PATCH_DONE BIT(2) +#define BYPASS_FLASH BIT(5) +#define BACKUP_RESTRORE BIT(6) + +/* USB_SPEED_OPTION */ +#define RG_PWRDN_EN BIT(8) +#define ALL_SPEED_OFF BIT(9) + /* USB_FW_CTRL */ #define FLOW_CTRL_PATCH_OPT BIT(1) +#define AUTO_SPEEDUP BIT(3) +#define FLOW_CTRL_PATCH_2 BIT(8) /* USB_FC_TIMER */ #define CTRL_TIMER_EN BIT(15) /* USB_USB_CTRL */ +#define CDC_ECM_EN BIT(3) #define RX_AGG_DISABLE 0x0010 #define RX_ZERO_EN 0x0080 /* USB_U2P3_CTRL */ #define U2P3_ENABLE 0x0001 +#define RX_DETECT8 BIT(3) /* USB_POWER_CUT */ #define PWR_EN 0x0001 @@ -493,8 +570,12 @@ #define SEN_VAL_NORMAL 0xa000 #define SEL_RXIDLE 0x0100 +/* USB_UPHY_XTAL */ +#define OOBS_POLLING BIT(8) + /* USB_UPS_CFG */ #define SAW_CNT_1MS_MASK 0x0fff +#define MID_REVERSE BIT(5) /* RTL8156A */ /* USB_UPS_FLAGS */ #define UPS_FLAGS_R_TUNE BIT(0) @@ -502,6 +583,7 @@ #define UPS_FLAGS_250M_CKDIV BIT(2) #define UPS_FLAGS_EN_ALDPS BIT(3) #define UPS_FLAGS_CTAP_SHORT_DIS BIT(4) +#define UPS_FLAGS_SPEED_MASK (0xf << 16) #define ups_flags_speed(x) ((x) << 16) #define UPS_FLAGS_EN_EEE BIT(20) #define UPS_FLAGS_EN_500M_EEE BIT(21) @@ -522,6 +604,8 @@ enum spd_duplex { FORCE_10M_FULL, FORCE_100M_HALF, FORCE_100M_FULL, + FORCE_1000M_FULL, + NWAY_2500M_FULL, }; /* OCP_ALDPS_CONFIG */ @@ -586,6 +670,9 @@ enum spd_duplex { #define EN_10M_CLKDIV BIT(11) #define EN_10M_BGOFF 0x0080 +/* OCP_10GBT_CTRL */ +#define RTL_ADV2_5G_F_R BIT(5) /* Advertise 2.5GBASE-T fast-retrain */ + /* OCP_PHY_STATE */ #define TXDIS_STATE 0x01 #define ABD_STATE 0x02 @@ -605,7 +692,8 @@ enum spd_duplex { #define EN_EMI_L 0x0040 /* OCP_SYSCLK_CFG */ -#define clk_div_expo(x) (min(x, 5) << 8) +#define sysclk_div_expo(x) (min(x, 5) << 8) +#define clk_div_expo(x) (min(x, 5) << 4) /* SRAM_GREEN_CFG */ #define GREEN_ETH_EN BIT(15) @@ -636,6 +724,11 @@ enum spd_duplex { #define BP4_SUPER_ONLY 0x1578 /* RTL_VER_04 only */ enum rtl_register_content { + _2500bps = BIT(10), + _1250bps = BIT(9), + _500bps = BIT(8), + _tx_flow = BIT(6), + _rx_flow = BIT(5), _1000bps = 0x10, _100bps = 0x08, _10bps = 0x04, @@ -643,6 +736,9 @@ enum rtl_register_content { FULL_DUP = 0x01, }; +#define is_speed_2500(_speed) (((_speed) & (_2500bps | LINK_STATUS)) == (_2500bps | LINK_STATUS)) +#define is_flow_control(_speed) (((_speed) & (_tx_flow | _rx_flow)) == (_tx_flow | _rx_flow)) + #define RTL8152_MAX_TX 4 #define RTL8152_MAX_RX 10 #define INTBUFSIZE 2 @@ -654,15 +750,12 @@ enum rtl_register_content { #define INTR_LINK 0x0004 -#define RTL8153_MAX_PACKET 9216 /* 9K */ -#define RTL8153_MAX_MTU (RTL8153_MAX_PACKET - VLAN_ETH_HLEN - \ - ETH_FCS_LEN) #define RTL8152_RMS (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) #define RTL8153_RMS RTL8153_MAX_PACKET #define RTL8152_TX_TIMEOUT (5 * HZ) -#define RTL8152_NAPI_WEIGHT 64 -#define rx_reserved_size(x) ((x) + VLAN_ETH_HLEN + ETH_FCS_LEN + \ - sizeof(struct rx_desc) + RX_ALIGN) +#define mtu_to_size(m) ((m) + VLAN_ETH_HLEN + ETH_FCS_LEN) +#define size_to_mtu(s) ((s) - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define rx_reserved_size(x) (mtu_to_size(x) + sizeof(struct rx_desc) + RX_ALIGN) /* rtl8152 flags */ enum rtl8152_flags { @@ -674,8 +767,6 @@ enum rtl8152_flags { PHY_RESET, SCHEDULE_TASKLET, GREEN_ETHERNET, - DELL_TB_RX_AGG_BUG, - LENOVO_MACPASSTHRU, }; #define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082 @@ -792,9 +883,11 @@ struct r8152 { bool (*in_nway)(struct r8152 *tp); void (*hw_phy_cfg)(struct r8152 *tp); void (*autosuspend_en)(struct r8152 *tp, bool enable); + void (*change_mtu)(struct r8152 *tp); } rtl_ops; struct ups_info { + u32 r_tune:1; u32 _10m_ckdiv:1; u32 _250m_ckdiv:1; u32 aldps:1; @@ -836,7 +929,11 @@ struct r8152 { u32 rx_buf_sz; u32 rx_copybreak; u32 rx_pending; + u32 fc_pause_on, fc_pause_off; + u32 support_2500full:1; + u32 lenovo_macpassthru:1; + u32 dell_tb_rx_agg_bug:1; u16 ocp_base; u16 speed; u16 eee_adv; @@ -871,6 +968,66 @@ struct fw_header { struct fw_block blocks[]; } __packed; +enum rtl8152_fw_flags { + FW_FLAGS_USB = 0, + FW_FLAGS_PLA, + FW_FLAGS_START, + FW_FLAGS_STOP, + FW_FLAGS_NC, + FW_FLAGS_NC1, + FW_FLAGS_NC2, + FW_FLAGS_UC2, + FW_FLAGS_UC, + FW_FLAGS_SPEED_UP, + FW_FLAGS_VER, +}; + +enum rtl8152_fw_fixup_cmd { + FW_FIXUP_AND = 0, + FW_FIXUP_OR, + FW_FIXUP_NOT, + FW_FIXUP_XOR, +}; + +struct fw_phy_set { + __le16 addr; + __le16 data; +} __packed; + +struct fw_phy_speed_up { + struct fw_block blk_hdr; + __le16 fw_offset; + __le16 version; + __le16 fw_reg; + __le16 reserved; + char info[]; +} __packed; + +struct fw_phy_ver { + struct fw_block blk_hdr; + struct fw_phy_set ver; + __le32 reserved; +} __packed; + +struct fw_phy_fixup { + struct fw_block blk_hdr; + struct fw_phy_set setting; + __le16 bit_cmd; + __le16 reserved; +} __packed; + +struct fw_phy_union { + struct fw_block blk_hdr; + __le16 fw_offset; + __le16 fw_reg; + struct fw_phy_set pre_set[2]; + struct fw_phy_set bp[8]; + struct fw_phy_set bp_en; + u8 pre_num; + u8 bp_num; + char info[]; +} __packed; + /** * struct fw_mac - a firmware block used by RTL_FW_PLA and RTL_FW_USB. * The layout of the firmware block is: @@ -975,6 +1132,15 @@ enum rtl_fw_type { RTL_FW_PHY_START, RTL_FW_PHY_STOP, RTL_FW_PHY_NC, + RTL_FW_PHY_FIXUP, + RTL_FW_PHY_UNION_NC, + RTL_FW_PHY_UNION_NC1, + RTL_FW_PHY_UNION_NC2, + RTL_FW_PHY_UNION_UC2, + RTL_FW_PHY_UNION_UC, + RTL_FW_PHY_UNION_MISC, + RTL_FW_PHY_SPEED_UP, + RTL_FW_PHY_VER, }; enum rtl_version { @@ -988,6 +1154,15 @@ enum rtl_version { RTL_VER_07, RTL_VER_08, RTL_VER_09, + + RTL_TEST_01, + RTL_VER_10, + RTL_VER_11, + RTL_VER_12, + RTL_VER_13, + RTL_VER_14, + RTL_VER_15, + RTL_VER_MAX }; @@ -1003,6 +1178,7 @@ enum tx_csum_stat { #define RTL_ADVERTISED_100_FULL BIT(3) #define RTL_ADVERTISED_1000_HALF BIT(4) #define RTL_ADVERTISED_1000_FULL BIT(5) +#define RTL_ADVERTISED_2500_FULL BIT(6) /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). * The RTL chips use a 64 element hash table based on the Ethernet CRC. @@ -1010,8 +1186,7 @@ enum tx_csum_stat { static const int multicast_filter_limit = 32; static unsigned int agg_buf_sz = 16384; -#define RTL_LIMITED_TSO_SIZE (agg_buf_sz - sizeof(struct tx_desc) - \ - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define RTL_LIMITED_TSO_SIZE (size_to_mtu(agg_buf_sz) - sizeof(struct tx_desc)) static int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) @@ -1419,7 +1594,7 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa) acpi_object_type mac_obj_type; int mac_strlen; - if (test_bit(LENOVO_MACPASSTHRU, &tp->flags)) { + if (tp->lenovo_macpassthru) { mac_obj_name = "\\MACA"; mac_obj_type = ACPI_TYPE_STRING; mac_strlen = 0x16; @@ -2108,7 +2283,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) remain = agg_buf_sz - (int)(tx_agg_align(tx_data) - agg->head); - if (test_bit(DELL_TB_RX_AGG_BUG, &tp->flags)) + if (tp->dell_tb_rx_agg_bug) break; } @@ -2597,7 +2772,7 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, static void r8152b_reset_packet_filter(struct r8152 *tp) { - u32 ocp_data; + u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_FMC); ocp_data &= ~FMC_FCR_MCU_EN; @@ -2608,28 +2783,58 @@ static void r8152b_reset_packet_filter(struct r8152 *tp) static void rtl8152_nic_reset(struct r8152 *tp) { - int i; + u32 ocp_data; + int i; - ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, CR_RST); + switch (tp->version) { + case RTL_TEST_01: + case RTL_VER_10: + case RTL_VER_11: + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR); + ocp_data &= ~CR_TE; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_BMU_RESET); + ocp_data &= ~BMU_RESET_EP_IN; + ocp_write_word(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); + ocp_data |= CDC_ECM_EN; + ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR); + ocp_data &= ~CR_RE; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_BMU_RESET); + ocp_data |= BMU_RESET_EP_IN; + ocp_write_word(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); + ocp_data &= ~CDC_ECM_EN; + ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); + break; - for (i = 0; i < 1000; i++) { - if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST)) - break; - usleep_range(100, 400); + default: + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, CR_RST); + + for (i = 0; i < 1000; i++) { + if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST)) + break; + usleep_range(100, 400); + } + break; } } static void set_tx_qlen(struct r8152 *tp) { - struct net_device *netdev = tp->netdev; - - tp->tx_qlen = agg_buf_sz / (netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN + - sizeof(struct tx_desc)); + tp->tx_qlen = agg_buf_sz / (mtu_to_size(tp->netdev->mtu) + sizeof(struct tx_desc)); } -static inline u8 rtl8152_get_speed(struct r8152 *tp) +static inline u16 rtl8152_get_speed(struct r8152 *tp) { - return ocp_read_byte(tp, MCU_TYPE_PLA, PLA_PHYSTATUS); + return ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHYSTATUS); } static void rtl_eee_plus_en(struct r8152 *tp, bool enable) @@ -2747,6 +2952,29 @@ static int rtl_stop_rx(struct r8152 *tp) return 0; } +static void rtl_set_ifg(struct r8152 *tp, u16 speed) +{ + u32 ocp_data; + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR1); + ocp_data &= ~IFG_MASK; + if ((speed & (_10bps | _100bps)) && !(speed & FULL_DUP)) { + ocp_data |= IFG_144NS; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR1, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); + ocp_data &= ~TX10MIDLE_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); + } else { + ocp_data |= IFG_96NS; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR1, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); + ocp_data |= TX10MIDLE_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); + } +} + static inline void r8153b_rx_agg_chg_indicate(struct r8152 *tp) { ocp_write_byte(tp, MCU_TYPE_USB, USB_UPT_RXDMA_OWN, @@ -2766,6 +2994,7 @@ static int rtl_enable(struct r8152 *tp) switch (tp->version) { case RTL_VER_08: case RTL_VER_09: + case RTL_VER_14: r8153b_rx_agg_chg_indicate(tp); break; default: @@ -2803,6 +3032,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp) case RTL_VER_08: case RTL_VER_09: + case RTL_VER_14: /* The RTL8153B uses USB_RX_EXTRA_AGGR_TMR for rx timeout * primarily. For USB_RX_EARLY_TIMEOUT, we fix it to 128ns. */ @@ -2812,6 +3042,18 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp) ocp_data); break; + case RTL_VER_10: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_15: + ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_TIMEOUT, + 640 / 8); + ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EXTRA_AGGR_TMR, + ocp_data); + r8153b_rx_agg_chg_indicate(tp); + break; + default: break; } @@ -2831,9 +3073,20 @@ static void r8153_set_rx_early_size(struct r8152 *tp) break; case RTL_VER_08: case RTL_VER_09: + case RTL_VER_14: ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data / 8); break; + case RTL_TEST_01: + case RTL_VER_10: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_15: + ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, + ocp_data / 8); + r8153b_rx_agg_chg_indicate(tp); + break; default: WARN_ON_ONCE(1); break; @@ -2842,6 +3095,8 @@ static void r8153_set_rx_early_size(struct r8152 *tp) static int rtl8153_enable(struct r8152 *tp) { + u32 ocp_data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) return -ENODEV; @@ -2850,15 +3105,20 @@ static int rtl8153_enable(struct r8152 *tp) r8153_set_rx_early_timeout(tp); r8153_set_rx_early_size(tp); - if (tp->version == RTL_VER_09) { - u32 ocp_data; + rtl_set_ifg(tp, rtl8152_get_speed(tp)); + switch (tp->version) { + case RTL_VER_09: + case RTL_VER_14: ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); ocp_data &= ~FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); usleep_range(1000, 2000); ocp_data |= FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); + break; + default: + break; } return rtl_enable(tp); @@ -2923,12 +3183,40 @@ static void rtl_rx_vlan_en(struct r8152 *tp, bool enable) { u32 ocp_data; - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); - if (enable) - ocp_data |= CPCR_RX_VLAN; - else - ocp_data &= ~CPCR_RX_VLAN; - ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); + switch (tp->version) { + case RTL_VER_01: + case RTL_VER_02: + case RTL_VER_03: + case RTL_VER_04: + case RTL_VER_05: + case RTL_VER_06: + case RTL_VER_07: + case RTL_VER_08: + case RTL_VER_09: + case RTL_VER_14: + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); + if (enable) + ocp_data |= CPCR_RX_VLAN; + else + ocp_data &= ~CPCR_RX_VLAN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); + break; + + case RTL_TEST_01: + case RTL_VER_10: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_15: + default: + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RCR1); + if (enable) + ocp_data |= OUTER_VLAN | INNER_VLAN; + else + ocp_data &= ~(OUTER_VLAN | INNER_VLAN); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RCR1, ocp_data); + break; + } } static int rtl8152_set_features(struct net_device *dev, @@ -3021,6 +3309,40 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts) device_set_wakeup_enable(&tp->udev->dev, false); } +static void r8153_mac_clk_speed_down(struct r8152 *tp, bool enable) +{ + u32 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); + + /* MAC clock speed down */ + if (enable) + ocp_data |= MAC_CLK_SPDWN_EN; + else + ocp_data &= ~MAC_CLK_SPDWN_EN; + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); +} + +static void r8156_mac_clk_spd(struct r8152 *tp, bool enable) +{ + u32 ocp_data; + + /* MAC clock speed down */ + if (enable) { + /* aldps_spdwn_ratio, tp10_spdwn_ratio */ + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, + 0x0403); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); + ocp_data &= ~EEE_SPDWN_RATIO_MASK; + ocp_data |= MAC_CLK_SPDWN_EN | 0x03; /* eee_spdwn_ratio */ + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); + } else { + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); + ocp_data &= ~MAC_CLK_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); + } +} + static void r8153_u1u2en(struct r8152 *tp, bool enable) { u8 u1u2[8]; @@ -3080,6 +3402,9 @@ static void r8153b_ups_flags(struct r8152 *tp) if (tp->ups_info.eee_cmod_lv) ups_flags |= UPS_FLAGS_EEE_CMOD_LV_EN; + if (tp->ups_info.r_tune) + ups_flags |= UPS_FLAGS_R_TUNE; + if (tp->ups_info._10m_ckdiv) ups_flags |= UPS_FLAGS_EN_10M_CKDIV; @@ -3130,6 +3455,88 @@ static void r8153b_ups_flags(struct r8152 *tp) ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ups_flags); } +static void r8156_ups_flags(struct r8152 *tp) +{ + u32 ups_flags = 0; + + if (tp->ups_info.green) + ups_flags |= UPS_FLAGS_EN_GREEN; + + if (tp->ups_info.aldps) + ups_flags |= UPS_FLAGS_EN_ALDPS; + + if (tp->ups_info.eee) + ups_flags |= UPS_FLAGS_EN_EEE; + + if (tp->ups_info.flow_control) + ups_flags |= UPS_FLAGS_EN_FLOW_CTR; + + if (tp->ups_info.eee_ckdiv) + ups_flags |= UPS_FLAGS_EN_EEE_CKDIV; + + if (tp->ups_info._10m_ckdiv) + ups_flags |= UPS_FLAGS_EN_10M_CKDIV; + + if (tp->ups_info.eee_plloff_100) + ups_flags |= UPS_FLAGS_EEE_PLLOFF_100; + + if (tp->ups_info.eee_plloff_giga) + ups_flags |= UPS_FLAGS_EEE_PLLOFF_GIGA; + + if (tp->ups_info._250m_ckdiv) + ups_flags |= UPS_FLAGS_250M_CKDIV; + + switch (tp->ups_info.speed_duplex) { + case FORCE_10M_HALF: + ups_flags |= ups_flags_speed(0); + break; + case FORCE_10M_FULL: + ups_flags |= ups_flags_speed(1); + break; + case FORCE_100M_HALF: + ups_flags |= ups_flags_speed(2); + break; + case FORCE_100M_FULL: + ups_flags |= ups_flags_speed(3); + break; + case NWAY_10M_HALF: + ups_flags |= ups_flags_speed(4); + break; + case NWAY_10M_FULL: + ups_flags |= ups_flags_speed(5); + break; + case NWAY_100M_HALF: + ups_flags |= ups_flags_speed(6); + break; + case NWAY_100M_FULL: + ups_flags |= ups_flags_speed(7); + break; + case NWAY_1000M_FULL: + ups_flags |= ups_flags_speed(8); + break; + case NWAY_2500M_FULL: + ups_flags |= ups_flags_speed(9); + break; + default: + break; + } + + switch (tp->ups_info.lite_mode) { + case 1: + ups_flags |= 0 << 5; + break; + case 2: + ups_flags |= 2 << 5; + break; + case 0: + default: + ups_flags |= 1 << 5; + break; + } + + ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ups_flags); +} + static void rtl_green_en(struct r8152 *tp, bool enable) { u16 data; @@ -3193,16 +3600,16 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable) ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN; ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); - ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, 0xcfff); - ocp_data |= BIT(0); - ocp_write_byte(tp, MCU_TYPE_USB, 0xcfff, ocp_data); + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); + ocp_data |= UPS_FORCE_PWR_DOWN; + ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); } else { ocp_data &= ~(UPS_EN | USP_PREWAKE); ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); - ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, 0xcfff); - ocp_data &= ~BIT(0); - ocp_write_byte(tp, MCU_TYPE_USB, 0xcfff, ocp_data); + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); + ocp_data &= ~UPS_FORCE_PWR_DOWN; + ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); if (ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0) & PCUT_STATUS) { int i; @@ -3222,6 +3629,95 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable) } } +static void r8153c_ups_en(struct r8152 *tp, bool enable) +{ + u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_POWER_CUT); + + if (enable) { + r8153b_ups_flags(tp); + + ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN; + ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); + ocp_data |= UPS_FORCE_PWR_DOWN; + ocp_data &= ~BIT(7); + ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); + } else { + ocp_data &= ~(UPS_EN | USP_PREWAKE); + ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); + ocp_data &= ~UPS_FORCE_PWR_DOWN; + ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); + + if (ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0) & PCUT_STATUS) { + int i; + + for (i = 0; i < 500; i++) { + if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & + AUTOLOAD_DONE) + break; + msleep(20); + } + + tp->rtl_ops.hw_phy_cfg(tp); + + rtl8152_set_speed(tp, tp->autoneg, tp->speed, + tp->duplex, tp->advertising); + } + + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); + ocp_data |= BIT(8); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); + + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); + } +} + +static void r8156_ups_en(struct r8152 *tp, bool enable) +{ + u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_POWER_CUT); + + if (enable) { + r8156_ups_flags(tp); + + ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN; + ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); + ocp_data |= UPS_FORCE_PWR_DOWN; + ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); + + switch (tp->version) { + case RTL_VER_13: + case RTL_VER_15: + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPHY_XTAL); + ocp_data &= ~OOBS_POLLING; + ocp_write_byte(tp, MCU_TYPE_USB, USB_UPHY_XTAL, ocp_data); + break; + default: + break; + } + } else { + ocp_data &= ~(UPS_EN | USP_PREWAKE); + ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); + ocp_data &= ~UPS_FORCE_PWR_DOWN; + ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); + + if (ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0) & PCUT_STATUS) { + tp->rtl_ops.hw_phy_cfg(tp); + + rtl8152_set_speed(tp, tp->autoneg, tp->speed, + tp->duplex, tp->advertising); + } + } +} + static void r8153_power_cut_en(struct r8152 *tp, bool enable) { u32 ocp_data; @@ -3351,6 +3847,38 @@ static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable) } } +static void rtl8153c_runtime_enable(struct r8152 *tp, bool enable) +{ + if (enable) { + r8153_queue_wake(tp, true); + r8153b_u1u2en(tp, false); + r8153_u2p3en(tp, false); + rtl_runtime_suspend_enable(tp, true); + r8153c_ups_en(tp, true); + } else { + r8153c_ups_en(tp, false); + r8153_queue_wake(tp, false); + rtl_runtime_suspend_enable(tp, false); + r8153b_u1u2en(tp, true); + } +} + +static void rtl8156_runtime_enable(struct r8152 *tp, bool enable) +{ + if (enable) { + r8153_queue_wake(tp, true); + r8153b_u1u2en(tp, false); + r8153_u2p3en(tp, false); + rtl_runtime_suspend_enable(tp, true); + } else { + r8153_queue_wake(tp, false); + rtl_runtime_suspend_enable(tp, false); + r8153_u2p3en(tp, true); + if (tp->udev->speed >= USB_SPEED_SUPER) + r8153b_u1u2en(tp, true); + } +} + static void r8153_teredo_off(struct r8152 *tp) { u32 ocp_data; @@ -3371,14 +3899,19 @@ static void r8153_teredo_off(struct r8152 *tp) case RTL_VER_08: case RTL_VER_09: + case RTL_TEST_01: + case RTL_VER_10: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_14: + case RTL_VER_15: + default: /* The bit 0 ~ 7 are relative with teredo settings. They are * W1C (write 1 to clear), so set all 1 to disable it. */ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, 0xff); break; - - default: - break; } ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE); @@ -3413,6 +3946,12 @@ static void rtl_clear_bp(struct r8152 *tp, u16 type) break; case RTL_VER_08: case RTL_VER_09: + case RTL_VER_10: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_14: + case RTL_VER_15: default: if (type == MCU_TYPE_USB) { ocp_write_byte(tp, MCU_TYPE_USB, USB_BP2_EN, 0); @@ -3521,6 +4060,162 @@ static int rtl_post_ram_code(struct r8152 *tp, u16 key_addr, bool wait) return 0; } +static bool rtl8152_is_fw_phy_speed_up_ok(struct r8152 *tp, struct fw_phy_speed_up *phy) +{ + u16 fw_offset; + u32 length; + bool rc = false; + + switch (tp->version) { + case RTL_VER_01: + case RTL_VER_02: + case RTL_VER_03: + case RTL_VER_04: + case RTL_VER_05: + case RTL_VER_06: + case RTL_VER_07: + case RTL_VER_08: + case RTL_VER_09: + case RTL_VER_10: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_14: + goto out; + case RTL_VER_13: + case RTL_VER_15: + default: + break; + } + + fw_offset = __le16_to_cpu(phy->fw_offset); + length = __le32_to_cpu(phy->blk_hdr.length); + if (fw_offset < sizeof(*phy) || length <= fw_offset) { + dev_err(&tp->intf->dev, "invalid fw_offset\n"); + goto out; + } + + length -= fw_offset; + if (length & 3) { + dev_err(&tp->intf->dev, "invalid block length\n"); + goto out; + } + + if (__le16_to_cpu(phy->fw_reg) != 0x9A00) { + dev_err(&tp->intf->dev, "invalid register to load firmware\n"); + goto out; + } + + rc = true; +out: + return rc; +} + +static bool rtl8152_is_fw_phy_ver_ok(struct r8152 *tp, struct fw_phy_ver *ver) +{ + bool rc = false; + + switch (tp->version) { + case RTL_VER_10: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_15: + break; + default: + goto out; + } + + if (__le32_to_cpu(ver->blk_hdr.length) != sizeof(*ver)) { + dev_err(&tp->intf->dev, "invalid block length\n"); + goto out; + } + + if (__le16_to_cpu(ver->ver.addr) != SRAM_GPHY_FW_VER) { + dev_err(&tp->intf->dev, "invalid phy ver addr\n"); + goto out; + } + + rc = true; +out: + return rc; +} + +static bool rtl8152_is_fw_phy_fixup_ok(struct r8152 *tp, struct fw_phy_fixup *fix) +{ + bool rc = false; + + switch (tp->version) { + case RTL_VER_10: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_15: + break; + default: + goto out; + } + + if (__le32_to_cpu(fix->blk_hdr.length) != sizeof(*fix)) { + dev_err(&tp->intf->dev, "invalid block length\n"); + goto out; + } + + if (__le16_to_cpu(fix->setting.addr) != OCP_PHY_PATCH_CMD || + __le16_to_cpu(fix->setting.data) != BIT(7)) { + dev_err(&tp->intf->dev, "invalid phy fixup\n"); + goto out; + } + + rc = true; +out: + return rc; +} + +static bool rtl8152_is_fw_phy_union_ok(struct r8152 *tp, struct fw_phy_union *phy) +{ + u16 fw_offset; + u32 length; + bool rc = false; + + switch (tp->version) { + case RTL_VER_10: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_15: + break; + default: + goto out; + } + + fw_offset = __le16_to_cpu(phy->fw_offset); + length = __le32_to_cpu(phy->blk_hdr.length); + if (fw_offset < sizeof(*phy) || length <= fw_offset) { + dev_err(&tp->intf->dev, "invalid fw_offset\n"); + goto out; + } + + length -= fw_offset; + if (length & 1) { + dev_err(&tp->intf->dev, "invalid block length\n"); + goto out; + } + + if (phy->pre_num > 2) { + dev_err(&tp->intf->dev, "invalid pre_num %d\n", phy->pre_num); + goto out; + } + + if (phy->bp_num > 8) { + dev_err(&tp->intf->dev, "invalid bp_num %d\n", phy->bp_num); + goto out; + } + + rc = true; +out: + return rc; +} + static bool rtl8152_is_fw_phy_nc_ok(struct r8152 *tp, struct fw_phy_nc *phy) { u32 length; @@ -3622,6 +4317,11 @@ static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac) case RTL_VER_06: case RTL_VER_08: case RTL_VER_09: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_14: + case RTL_VER_15: fw_reg = 0xf800; bp_ba_addr = PLA_BP_BA; bp_en_addr = PLA_BP_EN; @@ -3645,6 +4345,11 @@ static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac) break; case RTL_VER_08: case RTL_VER_09: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_14: + case RTL_VER_15: fw_reg = 0xe600; bp_ba_addr = USB_BP_BA; bp_en_addr = USB_BP2_EN; @@ -3772,10 +4477,7 @@ static long rtl8152_check_firmware(struct r8152 *tp, struct rtl_fw *rtl_fw) { const struct firmware *fw = rtl_fw->fw; struct fw_header *fw_hdr = (struct fw_header *)fw->data; - struct fw_mac *pla = NULL, *usb = NULL; - struct fw_phy_patch_key *start = NULL; - struct fw_phy_nc *phy_nc = NULL; - struct fw_block *stop = NULL; + unsigned long fw_flags = 0; long ret = -EFAULT; int i; @@ -3804,50 +4506,56 @@ static long rtl8152_check_firmware(struct r8152 *tp, struct rtl_fw *rtl_fw) goto fail; goto fw_end; case RTL_FW_PLA: - if (pla) { + if (test_bit(FW_FLAGS_PLA, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PLA firmware encountered"); goto fail; } - pla = (struct fw_mac *)block; - if (!rtl8152_is_fw_mac_ok(tp, pla)) { + if (!rtl8152_is_fw_mac_ok(tp, (struct fw_mac *)block)) { dev_err(&tp->intf->dev, "check PLA firmware failed\n"); goto fail; } + __set_bit(FW_FLAGS_PLA, &fw_flags); break; case RTL_FW_USB: - if (usb) { + if (test_bit(FW_FLAGS_USB, &fw_flags)) { dev_err(&tp->intf->dev, "multiple USB firmware encountered"); goto fail; } - usb = (struct fw_mac *)block; - if (!rtl8152_is_fw_mac_ok(tp, usb)) { + if (!rtl8152_is_fw_mac_ok(tp, (struct fw_mac *)block)) { dev_err(&tp->intf->dev, "check USB firmware failed\n"); goto fail; } + __set_bit(FW_FLAGS_USB, &fw_flags); break; case RTL_FW_PHY_START: - if (start || phy_nc || stop) { + if (test_bit(FW_FLAGS_START, &fw_flags) || + test_bit(FW_FLAGS_NC, &fw_flags) || + test_bit(FW_FLAGS_NC1, &fw_flags) || + test_bit(FW_FLAGS_NC2, &fw_flags) || + test_bit(FW_FLAGS_UC2, &fw_flags) || + test_bit(FW_FLAGS_UC, &fw_flags) || + test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "check PHY_START fail\n"); goto fail; } - if (__le32_to_cpu(block->length) != sizeof(*start)) { + if (__le32_to_cpu(block->length) != sizeof(struct fw_phy_patch_key)) { dev_err(&tp->intf->dev, "Invalid length for PHY_START\n"); goto fail; } - - start = (struct fw_phy_patch_key *)block; + __set_bit(FW_FLAGS_START, &fw_flags); break; case RTL_FW_PHY_STOP: - if (stop || !start) { + if (test_bit(FW_FLAGS_STOP, &fw_flags) || + !test_bit(FW_FLAGS_START, &fw_flags)) { dev_err(&tp->intf->dev, "Check PHY_STOP fail\n"); goto fail; @@ -3858,29 +4566,175 @@ static long rtl8152_check_firmware(struct r8152 *tp, struct rtl_fw *rtl_fw) "Invalid length for PHY_STOP\n"); goto fail; } - - stop = block; + __set_bit(FW_FLAGS_STOP, &fw_flags); break; case RTL_FW_PHY_NC: - if (!start || stop) { + if (!test_bit(FW_FLAGS_START, &fw_flags) || + test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "check PHY_NC fail\n"); goto fail; } - if (phy_nc) { + if (test_bit(FW_FLAGS_NC, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY NC encountered\n"); goto fail; } - phy_nc = (struct fw_phy_nc *)block; - if (!rtl8152_is_fw_phy_nc_ok(tp, phy_nc)) { + if (!rtl8152_is_fw_phy_nc_ok(tp, (struct fw_phy_nc *)block)) { dev_err(&tp->intf->dev, "check PHY NC firmware failed\n"); goto fail; } + __set_bit(FW_FLAGS_NC, &fw_flags); + break; + case RTL_FW_PHY_UNION_NC: + if (!test_bit(FW_FLAGS_START, &fw_flags) || + test_bit(FW_FLAGS_NC1, &fw_flags) || + test_bit(FW_FLAGS_NC2, &fw_flags) || + test_bit(FW_FLAGS_UC2, &fw_flags) || + test_bit(FW_FLAGS_UC, &fw_flags) || + test_bit(FW_FLAGS_STOP, &fw_flags)) { + dev_err(&tp->intf->dev, "PHY_UNION_NC out of order\n"); + goto fail; + } + + if (test_bit(FW_FLAGS_NC, &fw_flags)) { + dev_err(&tp->intf->dev, "multiple PHY_UNION_NC encountered\n"); + goto fail; + } + + if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { + dev_err(&tp->intf->dev, "check PHY_UNION_NC failed\n"); + goto fail; + } + __set_bit(FW_FLAGS_NC, &fw_flags); + break; + case RTL_FW_PHY_UNION_NC1: + if (!test_bit(FW_FLAGS_START, &fw_flags) || + test_bit(FW_FLAGS_NC2, &fw_flags) || + test_bit(FW_FLAGS_UC2, &fw_flags) || + test_bit(FW_FLAGS_UC, &fw_flags) || + test_bit(FW_FLAGS_STOP, &fw_flags)) { + dev_err(&tp->intf->dev, "PHY_UNION_NC1 out of order\n"); + goto fail; + } + if (test_bit(FW_FLAGS_NC1, &fw_flags)) { + dev_err(&tp->intf->dev, "multiple PHY NC1 encountered\n"); + goto fail; + } + + if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { + dev_err(&tp->intf->dev, "check PHY_UNION_NC1 failed\n"); + goto fail; + } + __set_bit(FW_FLAGS_NC1, &fw_flags); + break; + case RTL_FW_PHY_UNION_NC2: + if (!test_bit(FW_FLAGS_START, &fw_flags) || + test_bit(FW_FLAGS_UC2, &fw_flags) || + test_bit(FW_FLAGS_UC, &fw_flags) || + test_bit(FW_FLAGS_STOP, &fw_flags)) { + dev_err(&tp->intf->dev, "PHY_UNION_NC2 out of order\n"); + goto fail; + } + + if (test_bit(FW_FLAGS_NC2, &fw_flags)) { + dev_err(&tp->intf->dev, "multiple PHY NC2 encountered\n"); + goto fail; + } + + if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { + dev_err(&tp->intf->dev, "check PHY_UNION_NC2 failed\n"); + goto fail; + } + __set_bit(FW_FLAGS_NC2, &fw_flags); + break; + case RTL_FW_PHY_UNION_UC2: + if (!test_bit(FW_FLAGS_START, &fw_flags) || + test_bit(FW_FLAGS_UC, &fw_flags) || + test_bit(FW_FLAGS_STOP, &fw_flags)) { + dev_err(&tp->intf->dev, "PHY_UNION_UC2 out of order\n"); + goto fail; + } + + if (test_bit(FW_FLAGS_UC2, &fw_flags)) { + dev_err(&tp->intf->dev, "multiple PHY UC2 encountered\n"); + goto fail; + } + + if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { + dev_err(&tp->intf->dev, "check PHY_UNION_UC2 failed\n"); + goto fail; + } + __set_bit(FW_FLAGS_UC2, &fw_flags); + break; + case RTL_FW_PHY_UNION_UC: + if (!test_bit(FW_FLAGS_START, &fw_flags) || + test_bit(FW_FLAGS_STOP, &fw_flags)) { + dev_err(&tp->intf->dev, "PHY_UNION_UC out of order\n"); + goto fail; + } + + if (test_bit(FW_FLAGS_UC, &fw_flags)) { + dev_err(&tp->intf->dev, "multiple PHY UC encountered\n"); + goto fail; + } + + if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { + dev_err(&tp->intf->dev, "check PHY_UNION_UC failed\n"); + goto fail; + } + __set_bit(FW_FLAGS_UC, &fw_flags); + break; + case RTL_FW_PHY_UNION_MISC: + if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { + dev_err(&tp->intf->dev, "check RTL_FW_PHY_UNION_MISC failed\n"); + goto fail; + } + break; + case RTL_FW_PHY_FIXUP: + if (!rtl8152_is_fw_phy_fixup_ok(tp, (struct fw_phy_fixup *)block)) { + dev_err(&tp->intf->dev, "check PHY fixup failed\n"); + goto fail; + } + break; + case RTL_FW_PHY_SPEED_UP: + if (test_bit(FW_FLAGS_SPEED_UP, &fw_flags)) { + dev_err(&tp->intf->dev, "multiple PHY firmware encountered"); + goto fail; + } + + if (!rtl8152_is_fw_phy_speed_up_ok(tp, (struct fw_phy_speed_up *)block)) { + dev_err(&tp->intf->dev, "check PHY speed up failed\n"); + goto fail; + } + __set_bit(FW_FLAGS_SPEED_UP, &fw_flags); + break; + case RTL_FW_PHY_VER: + if (test_bit(FW_FLAGS_START, &fw_flags) || + test_bit(FW_FLAGS_NC, &fw_flags) || + test_bit(FW_FLAGS_NC1, &fw_flags) || + test_bit(FW_FLAGS_NC2, &fw_flags) || + test_bit(FW_FLAGS_UC2, &fw_flags) || + test_bit(FW_FLAGS_UC, &fw_flags) || + test_bit(FW_FLAGS_STOP, &fw_flags)) { + dev_err(&tp->intf->dev, "Invalid order to set PHY version\n"); + goto fail; + } + + if (test_bit(FW_FLAGS_VER, &fw_flags)) { + dev_err(&tp->intf->dev, "multiple PHY version encountered"); + goto fail; + } + + if (!rtl8152_is_fw_phy_ver_ok(tp, (struct fw_phy_ver *)block)) { + dev_err(&tp->intf->dev, "check PHY version failed\n"); + goto fail; + } + __set_bit(FW_FLAGS_VER, &fw_flags); break; default: dev_warn(&tp->intf->dev, "Unknown type %u is found\n", @@ -3893,7 +4747,7 @@ static long rtl8152_check_firmware(struct r8152 *tp, struct rtl_fw *rtl_fw) } fw_end: - if ((phy_nc || start) && !stop) { + if (test_bit(FW_FLAGS_START, &fw_flags) && !test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "without PHY_STOP\n"); goto fail; } @@ -3903,6 +4757,143 @@ fail: return ret; } +static void rtl_ram_code_speed_up(struct r8152 *tp, struct fw_phy_speed_up *phy, bool wait) +{ + u32 len; + u8 *data; + + if (sram_read(tp, SRAM_GPHY_FW_VER) >= __le16_to_cpu(phy->version)) { + dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n"); + return; + } + + len = __le32_to_cpu(phy->blk_hdr.length); + len -= __le16_to_cpu(phy->fw_offset); + data = (u8 *)phy + __le16_to_cpu(phy->fw_offset); + + if (rtl_phy_patch_request(tp, true, wait)) + return; + + while (len) { + u32 ocp_data, size; + int i; + + if (len < 2048) + size = len; + else + size = 2048; + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL); + ocp_data |= GPHY_PATCH_DONE | BACKUP_RESTRORE; + ocp_write_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL, ocp_data); + + generic_ocp_write(tp, __le16_to_cpu(phy->fw_reg), 0xff, size, data, MCU_TYPE_USB); + + data += size; + len -= size; + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL); + ocp_data |= POL_GPHY_PATCH; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL, ocp_data); + + for (i = 0; i < 1000; i++) { + if (!(ocp_read_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL) & POL_GPHY_PATCH)) + break; + } + + if (i == 1000) { + dev_err(&tp->intf->dev, "ram code speedup mode timeout\n"); + break; + } + } + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, tp->ocp_base); + rtl_phy_patch_request(tp, false, wait); + + if (sram_read(tp, SRAM_GPHY_FW_VER) == __le16_to_cpu(phy->version)) + dev_dbg(&tp->intf->dev, "successfully applied %s\n", phy->info); + else + dev_err(&tp->intf->dev, "ram code speedup mode fail\n"); +} + +static int rtl8152_fw_phy_ver(struct r8152 *tp, struct fw_phy_ver *phy_ver) +{ + u16 ver_addr, ver; + + ver_addr = __le16_to_cpu(phy_ver->ver.addr); + ver = __le16_to_cpu(phy_ver->ver.data); + + if (sram_read(tp, ver_addr) >= ver) { + dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n"); + return 0; + } + + sram_write(tp, ver_addr, ver); + + dev_dbg(&tp->intf->dev, "PHY firmware version %x\n", ver); + + return ver; +} + +static void rtl8152_fw_phy_fixup(struct r8152 *tp, struct fw_phy_fixup *fix) +{ + u16 addr, data; + + addr = __le16_to_cpu(fix->setting.addr); + data = ocp_reg_read(tp, addr); + + switch (__le16_to_cpu(fix->bit_cmd)) { + case FW_FIXUP_AND: + data &= __le16_to_cpu(fix->setting.data); + break; + case FW_FIXUP_OR: + data |= __le16_to_cpu(fix->setting.data); + break; + case FW_FIXUP_NOT: + data &= ~__le16_to_cpu(fix->setting.data); + break; + case FW_FIXUP_XOR: + data ^= __le16_to_cpu(fix->setting.data); + break; + default: + return; + } + + ocp_reg_write(tp, addr, data); + + dev_dbg(&tp->intf->dev, "applied ocp %x %x\n", addr, data); +} + +static void rtl8152_fw_phy_union_apply(struct r8152 *tp, struct fw_phy_union *phy) +{ + __le16 *data; + u32 length; + int i, num; + + num = phy->pre_num; + for (i = 0; i < num; i++) + sram_write(tp, __le16_to_cpu(phy->pre_set[i].addr), + __le16_to_cpu(phy->pre_set[i].data)); + + length = __le32_to_cpu(phy->blk_hdr.length); + length -= __le16_to_cpu(phy->fw_offset); + num = length / 2; + data = (__le16 *)((u8 *)phy + __le16_to_cpu(phy->fw_offset)); + + ocp_reg_write(tp, OCP_SRAM_ADDR, __le16_to_cpu(phy->fw_reg)); + for (i = 0; i < num; i++) + ocp_reg_write(tp, OCP_SRAM_DATA, __le16_to_cpu(data[i])); + + num = phy->bp_num; + for (i = 0; i < num; i++) + sram_write(tp, __le16_to_cpu(phy->bp[i].addr), __le16_to_cpu(phy->bp[i].data)); + + if (phy->bp_num && phy->bp_en.addr) + sram_write(tp, __le16_to_cpu(phy->bp_en.addr), __le16_to_cpu(phy->bp_en.data)); + + dev_dbg(&tp->intf->dev, "successfully applied %s\n", phy->info); +} + static void rtl8152_fw_phy_nc_apply(struct r8152 *tp, struct fw_phy_nc *phy) { u16 mode_reg, bp_index; @@ -3956,6 +4947,12 @@ static void rtl8152_fw_mac_apply(struct r8152 *tp, struct fw_mac *mac) return; } + fw_ver_reg = __le16_to_cpu(mac->fw_ver_reg); + if (fw_ver_reg && ocp_read_byte(tp, MCU_TYPE_USB, fw_ver_reg) >= mac->fw_ver_data) { + dev_dbg(&tp->intf->dev, "%s firmware has been the newest\n", type ? "PLA" : "USB"); + return; + } + rtl_clear_bp(tp, type); /* Enable backup/restore of MACDBG. This is required after clearing PLA @@ -3991,7 +4988,6 @@ static void rtl8152_fw_mac_apply(struct r8152 *tp, struct fw_mac *mac) ocp_write_word(tp, type, bp_en_addr, __le16_to_cpu(mac->bp_en_value)); - fw_ver_reg = __le16_to_cpu(mac->fw_ver_reg); if (fw_ver_reg) ocp_write_byte(tp, MCU_TYPE_USB, fw_ver_reg, mac->fw_ver_data); @@ -4006,7 +5002,7 @@ static void rtl8152_apply_firmware(struct r8152 *tp, bool power_cut) struct fw_header *fw_hdr; struct fw_phy_patch_key *key; u16 key_addr = 0; - int i; + int i, patch_phy = 1; if (IS_ERR_OR_NULL(rtl_fw->fw)) return; @@ -4028,17 +5024,40 @@ static void rtl8152_apply_firmware(struct r8152 *tp, bool power_cut) rtl8152_fw_mac_apply(tp, (struct fw_mac *)block); break; case RTL_FW_PHY_START: + if (!patch_phy) + break; key = (struct fw_phy_patch_key *)block; key_addr = __le16_to_cpu(key->key_reg); rtl_pre_ram_code(tp, key_addr, __le16_to_cpu(key->key_data), !power_cut); break; case RTL_FW_PHY_STOP: + if (!patch_phy) + break; WARN_ON(!key_addr); rtl_post_ram_code(tp, key_addr, !power_cut); break; case RTL_FW_PHY_NC: rtl8152_fw_phy_nc_apply(tp, (struct fw_phy_nc *)block); break; + case RTL_FW_PHY_VER: + patch_phy = rtl8152_fw_phy_ver(tp, (struct fw_phy_ver *)block); + break; + case RTL_FW_PHY_UNION_NC: + case RTL_FW_PHY_UNION_NC1: + case RTL_FW_PHY_UNION_NC2: + case RTL_FW_PHY_UNION_UC2: + case RTL_FW_PHY_UNION_UC: + case RTL_FW_PHY_UNION_MISC: + if (patch_phy) + rtl8152_fw_phy_union_apply(tp, (struct fw_phy_union *)block); + break; + case RTL_FW_PHY_FIXUP: + if (patch_phy) + rtl8152_fw_phy_fixup(tp, (struct fw_phy_fixup *)block); + break; + case RTL_FW_PHY_SPEED_UP: + rtl_ram_code_speed_up(tp, (struct fw_phy_speed_up *)block, !power_cut); + break; default: break; } @@ -4185,6 +5204,22 @@ static void r8153_eee_en(struct r8152 *tp, bool enable) tp->ups_info.eee = enable; } +static void r8156_eee_en(struct r8152 *tp, bool enable) +{ + u16 config; + + r8153_eee_en(tp, enable); + + config = ocp_reg_read(tp, OCP_EEE_ADV2); + + if (enable) + config |= MDIO_EEE_2_5GT; + else + config &= ~MDIO_EEE_2_5GT; + + ocp_reg_write(tp, OCP_EEE_ADV2, config); +} + static void rtl_eee_enable(struct r8152 *tp, bool enable) { switch (tp->version) { @@ -4206,6 +5241,7 @@ static void rtl_eee_enable(struct r8152 *tp, bool enable) case RTL_VER_06: case RTL_VER_08: case RTL_VER_09: + case RTL_VER_14: if (enable) { r8153_eee_en(tp, true); ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv); @@ -4214,6 +5250,19 @@ static void rtl_eee_enable(struct r8152 *tp, bool enable) ocp_reg_write(tp, OCP_EEE_ADV, 0); } break; + case RTL_VER_10: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_15: + if (enable) { + r8156_eee_en(tp, true); + ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv); + } else { + r8156_eee_en(tp, false); + ocp_reg_write(tp, OCP_EEE_ADV, 0); + } + break; default: break; } @@ -4260,6 +5309,20 @@ static void wait_oob_link_list_ready(struct r8152 *tp) } } +static void r8156b_wait_loading_flash(struct r8152 *tp) +{ + if ((ocp_read_word(tp, MCU_TYPE_PLA, PLA_GPHY_CTRL) & GPHY_FLASH) && + !(ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL) & BYPASS_FLASH)) { + int i; + + for (i = 0; i < 100; i++) { + if (ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL) & GPHY_PATCH_DONE) + break; + usleep_range(1000, 2000); + } + } +} + static void r8152b_exit_oob(struct r8152 *tp) { u32 ocp_data; @@ -4310,7 +5373,7 @@ static void r8152b_exit_oob(struct r8152 *tp) } /* TX share fifo free credit full threshold */ - ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL); + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL2); ocp_write_byte(tp, MCU_TYPE_USB, USB_TX_AGG, TX_AGG_MAX_THRESHOLD); ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_HIGH); @@ -4487,6 +5550,36 @@ static int r8153b_post_firmware_1(struct r8152 *tp) return 0; } +static int r8153c_post_firmware_1(struct r8152 *tp) +{ + u32 ocp_data; + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_CTRL); + ocp_data |= FLOW_CTRL_PATCH_2; + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_CTRL, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); + ocp_data |= FC_PATCH_TASK; + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); + + return 0; +} + +static int r8156a_post_firmware_1(struct r8152 *tp) +{ + u32 ocp_data; + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1); + ocp_data |= FW_IP_RESET_EN; + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1, ocp_data); + + /* Modify U3PHY parameter for compatibility issue */ + ocp_write_dword(tp, MCU_TYPE_USB, USB_UPHY3_MDCMDIO, 0x4026840e); + ocp_write_dword(tp, MCU_TYPE_USB, USB_UPHY3_MDCMDIO, 0x4001acc9); + + return 0; +} + static void r8153_aldps_en(struct r8152 *tp, bool enable) { u16 data; @@ -4689,6 +5782,19 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp) set_bit(PHY_RESET, &tp->flags); } +static void r8153c_hw_phy_cfg(struct r8152 *tp) +{ + r8153b_hw_phy_cfg(tp); + + tp->ups_info.r_tune = true; +} + +static void rtl8153_change_mtu(struct r8152 *tp) +{ + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu)); + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); +} + static void r8153_first_init(struct r8152 *tp) { u32 ocp_data; @@ -4721,9 +5827,7 @@ static void r8153_first_init(struct r8152 *tp) rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); - ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; - ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data); - ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); + rtl8153_change_mtu(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0); ocp_data |= TCR0_AUTO_FIFO; @@ -4758,8 +5862,7 @@ static void r8153_enter_oob(struct r8152 *tp) wait_oob_link_list_ready(tp); - ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; - ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu)); switch (tp->version) { case RTL_VER_03: @@ -4773,6 +5876,7 @@ static void r8153_enter_oob(struct r8152 *tp) case RTL_VER_08: case RTL_VER_09: + case RTL_VER_14: /* Clear teredo wake event. bit[15:8] is the teredo wakeup * type. Set it to zero. bits[7:0] are the W1C bits about * the events. Set them to all 1 to clear them. @@ -4809,6 +5913,96 @@ static void rtl8153_disable(struct r8152 *tp) r8153_aldps_en(tp, true); } +static int rtl8156_enable(struct r8152 *tp) +{ + u32 ocp_data; + u16 speed; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return -ENODEV; + + set_tx_qlen(tp); + rtl_set_eee_plus(tp); + r8153_set_rx_early_timeout(tp); + r8153_set_rx_early_size(tp); + + speed = rtl8152_get_speed(tp); + rtl_set_ifg(tp, speed); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); + if (speed & _2500bps) + ocp_data &= ~IDLE_SPDWN_EN; + else + ocp_data |= IDLE_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); + + if (speed & _1000bps) + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_TXTWSYS, 0x11); + else if (speed & _500bps) + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_TXTWSYS, 0x3d); + + if (tp->udev->speed == USB_SPEED_HIGH) { + /* USB 0xb45e[3:0] l1_nyet_hird */ + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_L1_CTRL); + ocp_data &= ~0xf; + if (is_flow_control(speed)) + ocp_data |= 0xf; + else + ocp_data |= 0x1; + ocp_write_word(tp, MCU_TYPE_USB, USB_L1_CTRL, ocp_data); + } + + return rtl_enable(tp); +} + +static int rtl8156b_enable(struct r8152 *tp) +{ + u32 ocp_data; + u16 speed; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return -ENODEV; + + set_tx_qlen(tp); + rtl_set_eee_plus(tp); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_RX_AGGR_NUM); + ocp_data &= ~RX_AGGR_NUM_MASK; + ocp_write_word(tp, MCU_TYPE_USB, USB_RX_AGGR_NUM, ocp_data); + + r8153_set_rx_early_timeout(tp); + r8153_set_rx_early_size(tp); + + speed = rtl8152_get_speed(tp); + rtl_set_ifg(tp, speed); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); + if (speed & _2500bps) + ocp_data &= ~IDLE_SPDWN_EN; + else + ocp_data |= IDLE_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); + + if (tp->udev->speed == USB_SPEED_HIGH) { + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_L1_CTRL); + ocp_data &= ~0xf; + if (is_flow_control(speed)) + ocp_data |= 0xf; + else + ocp_data |= 0x1; + ocp_write_word(tp, MCU_TYPE_USB, USB_L1_CTRL, ocp_data); + } + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); + ocp_data &= ~FC_PATCH_TASK; + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); + usleep_range(1000, 2000); + ocp_data |= FC_PATCH_TASK; + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); + + return rtl_enable(tp); +} + static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex, u32 advertising) { @@ -4857,58 +6051,73 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex, tp->mii.force_media = 1; } else { - u16 anar, tmp1; + u16 orig, new1; u32 support; support = RTL_ADVERTISED_10_HALF | RTL_ADVERTISED_10_FULL | RTL_ADVERTISED_100_HALF | RTL_ADVERTISED_100_FULL; - if (tp->mii.supports_gmii) + if (tp->mii.supports_gmii) { support |= RTL_ADVERTISED_1000_FULL; + if (tp->support_2500full) + support |= RTL_ADVERTISED_2500_FULL; + } + if (!(advertising & support)) return -EINVAL; - anar = r8152_mdio_read(tp, MII_ADVERTISE); - tmp1 = anar & ~(ADVERTISE_10HALF | ADVERTISE_10FULL | + orig = r8152_mdio_read(tp, MII_ADVERTISE); + new1 = orig & ~(ADVERTISE_10HALF | ADVERTISE_10FULL | ADVERTISE_100HALF | ADVERTISE_100FULL); if (advertising & RTL_ADVERTISED_10_HALF) { - tmp1 |= ADVERTISE_10HALF; + new1 |= ADVERTISE_10HALF; tp->ups_info.speed_duplex = NWAY_10M_HALF; } if (advertising & RTL_ADVERTISED_10_FULL) { - tmp1 |= ADVERTISE_10FULL; + new1 |= ADVERTISE_10FULL; tp->ups_info.speed_duplex = NWAY_10M_FULL; } if (advertising & RTL_ADVERTISED_100_HALF) { - tmp1 |= ADVERTISE_100HALF; + new1 |= ADVERTISE_100HALF; tp->ups_info.speed_duplex = NWAY_100M_HALF; } if (advertising & RTL_ADVERTISED_100_FULL) { - tmp1 |= ADVERTISE_100FULL; + new1 |= ADVERTISE_100FULL; tp->ups_info.speed_duplex = NWAY_100M_FULL; } - if (anar != tmp1) { - r8152_mdio_write(tp, MII_ADVERTISE, tmp1); - tp->mii.advertising = tmp1; + if (orig != new1) { + r8152_mdio_write(tp, MII_ADVERTISE, new1); + tp->mii.advertising = new1; } if (tp->mii.supports_gmii) { - u16 gbcr; - - gbcr = r8152_mdio_read(tp, MII_CTRL1000); - tmp1 = gbcr & ~(ADVERTISE_1000FULL | + orig = r8152_mdio_read(tp, MII_CTRL1000); + new1 = orig & ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); if (advertising & RTL_ADVERTISED_1000_FULL) { - tmp1 |= ADVERTISE_1000FULL; + new1 |= ADVERTISE_1000FULL; tp->ups_info.speed_duplex = NWAY_1000M_FULL; } - if (gbcr != tmp1) - r8152_mdio_write(tp, MII_CTRL1000, tmp1); + if (orig != new1) + r8152_mdio_write(tp, MII_CTRL1000, new1); + } + + if (tp->support_2500full) { + orig = ocp_reg_read(tp, OCP_10GBT_CTRL); + new1 = orig & ~MDIO_AN_10GBT_CTRL_ADV2_5G; + + if (advertising & RTL_ADVERTISED_2500_FULL) { + new1 |= MDIO_AN_10GBT_CTRL_ADV2_5G; + tp->ups_info.speed_duplex = NWAY_2500M_FULL; + } + + if (orig != new1) + ocp_reg_write(tp, OCP_10GBT_CTRL, new1); } bmcr = BMCR_ANENABLE | BMCR_ANRESTART; @@ -5064,6 +6273,253 @@ static void rtl8153b_down(struct r8152 *tp) r8153_aldps_en(tp, true); } +static void rtl8153c_change_mtu(struct r8152 *tp) +{ + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu)); + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, 10 * 1024 / 64); + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, 512 / 64); + + /* Adjust the tx fifo free credit full threshold, otherwise + * the fifo would be too small to send a jumbo frame packet. + */ + if (tp->netdev->mtu < 8000) + ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_FULL, 2048 / 8); + else + ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_FULL, 900 / 8); +} + +static void rtl8153c_up(struct r8152 *tp) +{ + u32 ocp_data; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + r8153b_u1u2en(tp, false); + r8153_u2p3en(tp, false); + r8153_aldps_en(tp, false); + + rxdy_gated_en(tp, true); + r8153_teredo_off(tp); + + ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); + ocp_data &= ~RCR_ACPT_ALL; + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); + + rtl8152_nic_reset(tp); + rtl_reset_bmu(tp); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); + ocp_data &= ~NOW_IS_OOB; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); + ocp_data &= ~MCU_BORW_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); + + wait_oob_link_list_ready(tp); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); + ocp_data |= RE_INIT_LL; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); + + wait_oob_link_list_ready(tp); + + rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); + + rtl8153c_change_mtu(tp); + + rtl8152_nic_reset(tp); + + /* rx share fifo credit full threshold */ + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, 0x02); + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, 0x08); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_NORMAL); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_NORMAL); + + ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B); + + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); + ocp_data |= BIT(8); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); + + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data &= ~PLA_MCU_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + + r8153_aldps_en(tp, true); + r8153b_u1u2en(tp, true); +} + +static inline u32 fc_pause_on_auto(struct r8152 *tp) +{ + return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 6 * 1024); +} + +static inline u32 fc_pause_off_auto(struct r8152 *tp) +{ + return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 14 * 1024); +} + +static void r8156_fc_parameter(struct r8152 *tp) +{ + u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp); + u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp); + + switch (tp->version) { + case RTL_VER_10: + case RTL_VER_11: + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 8); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 8); + break; + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_15: + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16); + break; + default: + break; + } +} + +static void rtl8156_change_mtu(struct r8152 *tp) +{ + u32 rx_max_size = mtu_to_size(tp->netdev->mtu); + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rx_max_size); + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); + r8156_fc_parameter(tp); + + /* TX share fifo free credit full threshold */ + ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, 512 / 64); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_FULL, + ALIGN(rx_max_size + sizeof(struct tx_desc), 1024) / 16); +} + +static void rtl8156_up(struct r8152 *tp) +{ + u32 ocp_data; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + r8153b_u1u2en(tp, false); + r8153_u2p3en(tp, false); + r8153_aldps_en(tp, false); + + rxdy_gated_en(tp, true); + r8153_teredo_off(tp); + + ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); + ocp_data &= ~RCR_ACPT_ALL; + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); + + rtl8152_nic_reset(tp); + rtl_reset_bmu(tp); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); + ocp_data &= ~NOW_IS_OOB; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); + ocp_data &= ~MCU_BORW_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); + + rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); + + rtl8156_change_mtu(tp); + + switch (tp->version) { + case RTL_TEST_01: + case RTL_VER_10: + case RTL_VER_11: + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_BMU_CONFIG); + ocp_data |= ACT_ODMA; + ocp_write_word(tp, MCU_TYPE_USB, USB_BMU_CONFIG, ocp_data); + break; + default: + break; + } + + /* share FIFO settings */ + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL); + ocp_data &= ~RXFIFO_FULL_MASK; + ocp_data |= 0x08; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data &= ~PLA_MCU_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION); + ocp_data &= ~(RG_PWRDN_EN | ALL_SPEED_OFF); + ocp_write_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION, ocp_data); + + ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, 0x00600400); + + if (tp->saved_wolopts != __rtl_get_wol(tp)) { + netif_warn(tp, ifup, tp->netdev, "wol setting is changed\n"); + __rtl_set_wol(tp, tp->saved_wolopts); + } + + r8153_aldps_en(tp, true); + r8153_u2p3en(tp, true); + + if (tp->udev->speed >= USB_SPEED_SUPER) + r8153b_u1u2en(tp, true); +} + +static void rtl8156_down(struct r8152 *tp) +{ + u32 ocp_data; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) { + rtl_drop_queued_tx(tp); + return; + } + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data |= PLA_MCU_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + + r8153b_u1u2en(tp, false); + r8153_u2p3en(tp, false); + r8153b_power_cut_en(tp, false); + r8153_aldps_en(tp, false); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); + ocp_data &= ~NOW_IS_OOB; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); + + rtl_disable(tp); + rtl_reset_bmu(tp); + + /* Clear teredo wake event. bit[15:8] is the teredo wakeup + * type. Set it to zero. bits[7:0] are the W1C bits about + * the events. Set them to all 1 to clear them. + */ + ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_WAKE_BASE, 0x00ff); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); + ocp_data |= NOW_IS_OOB; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); + + rtl_rx_vlan_en(tp, true); + rxdy_gated_en(tp, false); + + ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); + ocp_data |= RCR_APM | RCR_AM | RCR_AB; + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); + + r8153_aldps_en(tp, true); +} + static bool rtl8152_in_nway(struct r8152 *tp) { u16 nway_state; @@ -5094,7 +6550,7 @@ static void set_carrier(struct r8152 *tp) { struct net_device *netdev = tp->netdev; struct napi_struct *napi = &tp->napi; - u8 speed; + u16 speed; speed = rtl8152_get_speed(tp); @@ -5107,7 +6563,7 @@ static void set_carrier(struct r8152 *tp) rtl_start_rx(tp); clear_bit(RTL8152_SET_RX_MODE, &tp->flags); _rtl8152_set_rx_mode(netdev); - napi_enable(&tp->napi); + napi_enable(napi); netif_wake_queue(netdev); netif_info(tp, link, netdev, "carrier on\n"); } else if (netif_queue_stopped(netdev) && @@ -5468,14 +6924,9 @@ static void r8153_init(struct r8152 *tp) ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001); - /* MAC clock speed down */ - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0); - r8153_power_cut_en(tp, false); rtl_runtime_suspend_enable(tp, false); + r8153_mac_clk_speed_down(tp, false); r8153_u1u2en(tp, true); usb_enable_lpm(tp->udev); @@ -5490,7 +6941,7 @@ static void r8153_init(struct r8152 *tp) /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); - if (test_bit(DELL_TB_RX_AGG_BUG, &tp->flags)) + if (tp->dell_tb_rx_agg_bug) ocp_data |= RX_AGG_DISABLE; ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); @@ -5566,9 +7017,7 @@ static void r8153b_init(struct r8152 *tp) usb_enable_lpm(tp->udev); /* MAC clock speed down */ - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); - ocp_data |= MAC_CLK_SPDWN_EN; - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); + r8153_mac_clk_speed_down(tp, true); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data &= ~PLA_MCU_SPDWN_EN; @@ -5595,6 +7044,1102 @@ static void r8153b_init(struct r8152 *tp) tp->coalesce = 15000; /* 15 us */ } +static void r8153c_init(struct r8152 *tp) +{ + u32 ocp_data; + u16 data; + int i; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + r8153b_u1u2en(tp, false); + + /* Disable spi_en */ + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5); + ocp_data &= ~BIT(3); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data); + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, 0xcbf0); + ocp_data |= BIT(1); + ocp_write_word(tp, MCU_TYPE_USB, 0xcbf0, ocp_data); + + for (i = 0; i < 500; i++) { + if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & + AUTOLOAD_DONE) + break; + + msleep(20); + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + } + + data = r8153_phy_status(tp, 0); + + data = r8152_mdio_read(tp, MII_BMCR); + if (data & BMCR_PDOWN) { + data &= ~BMCR_PDOWN; + r8152_mdio_write(tp, MII_BMCR, data); + } + + data = r8153_phy_status(tp, PHY_STAT_LAN_ON); + + r8153_u2p3en(tp, false); + + /* MSC timer = 0xfff * 8ms = 32760 ms */ + ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff); + + r8153b_power_cut_en(tp, false); + r8153c_ups_en(tp, false); + r8153_queue_wake(tp, false); + rtl_runtime_suspend_enable(tp, false); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); + if (rtl8152_get_speed(tp) & LINK_STATUS) + ocp_data |= CUR_LINK_OK; + else + ocp_data &= ~CUR_LINK_OK; + + ocp_data |= POLL_LINK_CHG; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); + + r8153b_u1u2en(tp, true); + + usb_enable_lpm(tp->udev); + + /* MAC clock speed down */ + r8153_mac_clk_speed_down(tp, true); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); + ocp_data &= ~BIT(7); + ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); + + set_bit(GREEN_ETHERNET, &tp->flags); + + /* rx aggregation */ + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); + ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); + ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); + + rtl_tally_reset(tp); + + tp->coalesce = 15000; /* 15 us */ +} + +static void r8156_hw_phy_cfg(struct r8152 *tp) +{ + u32 ocp_data; + u16 data; + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); + if (ocp_data & PCUT_STATUS) { + ocp_data &= ~PCUT_STATUS; + ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); + } + + data = r8153_phy_status(tp, 0); + switch (data) { + case PHY_STAT_EXT_INIT: + rtl8152_apply_firmware(tp, true); + + data = ocp_reg_read(tp, 0xa468); + data &= ~(BIT(3) | BIT(1)); + ocp_reg_write(tp, 0xa468, data); + break; + case PHY_STAT_LAN_ON: + case PHY_STAT_PWRDN: + default: + rtl8152_apply_firmware(tp, false); + break; + } + + /* disable ALDPS before updating the PHY parameters */ + r8153_aldps_en(tp, false); + + /* disable EEE before updating the PHY parameters */ + rtl_eee_enable(tp, false); + + data = r8153_phy_status(tp, PHY_STAT_LAN_ON); + WARN_ON_ONCE(data != PHY_STAT_LAN_ON); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); + ocp_data |= PFM_PWM_SWITCH; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); + + switch (tp->version) { + case RTL_VER_10: + data = ocp_reg_read(tp, 0xad40); + data &= ~0x3ff; + data |= BIT(7) | BIT(2); + ocp_reg_write(tp, 0xad40, data); + + data = ocp_reg_read(tp, 0xad4e); + data |= BIT(4); + ocp_reg_write(tp, 0xad4e, data); + data = ocp_reg_read(tp, 0xad16); + data &= ~0x3ff; + data |= 0x6; + ocp_reg_write(tp, 0xad16, data); + data = ocp_reg_read(tp, 0xad32); + data &= ~0x3f; + data |= 6; + ocp_reg_write(tp, 0xad32, data); + data = ocp_reg_read(tp, 0xac08); + data &= ~(BIT(12) | BIT(8)); + ocp_reg_write(tp, 0xac08, data); + data = ocp_reg_read(tp, 0xac8a); + data |= BIT(12) | BIT(13) | BIT(14); + data &= ~BIT(15); + ocp_reg_write(tp, 0xac8a, data); + data = ocp_reg_read(tp, 0xad18); + data |= BIT(10); + ocp_reg_write(tp, 0xad18, data); + data = ocp_reg_read(tp, 0xad1a); + data |= 0x3ff; + ocp_reg_write(tp, 0xad1a, data); + data = ocp_reg_read(tp, 0xad1c); + data |= 0x3ff; + ocp_reg_write(tp, 0xad1c, data); + + data = sram_read(tp, 0x80ea); + data &= ~0xff00; + data |= 0xc400; + sram_write(tp, 0x80ea, data); + data = sram_read(tp, 0x80eb); + data &= ~0x0700; + data |= 0x0300; + sram_write(tp, 0x80eb, data); + data = sram_read(tp, 0x80f8); + data &= ~0xff00; + data |= 0x1c00; + sram_write(tp, 0x80f8, data); + data = sram_read(tp, 0x80f1); + data &= ~0xff00; + data |= 0x3000; + sram_write(tp, 0x80f1, data); + + data = sram_read(tp, 0x80fe); + data &= ~0xff00; + data |= 0xa500; + sram_write(tp, 0x80fe, data); + data = sram_read(tp, 0x8102); + data &= ~0xff00; + data |= 0x5000; + sram_write(tp, 0x8102, data); + data = sram_read(tp, 0x8015); + data &= ~0xff00; + data |= 0x3300; + sram_write(tp, 0x8015, data); + data = sram_read(tp, 0x8100); + data &= ~0xff00; + data |= 0x7000; + sram_write(tp, 0x8100, data); + data = sram_read(tp, 0x8014); + data &= ~0xff00; + data |= 0xf000; + sram_write(tp, 0x8014, data); + data = sram_read(tp, 0x8016); + data &= ~0xff00; + data |= 0x6500; + sram_write(tp, 0x8016, data); + data = sram_read(tp, 0x80dc); + data &= ~0xff00; + data |= 0xed00; + sram_write(tp, 0x80dc, data); + data = sram_read(tp, 0x80df); + data |= BIT(8); + sram_write(tp, 0x80df, data); + data = sram_read(tp, 0x80e1); + data &= ~BIT(8); + sram_write(tp, 0x80e1, data); + + data = ocp_reg_read(tp, 0xbf06); + data &= ~0x003f; + data |= 0x0038; + ocp_reg_write(tp, 0xbf06, data); + + sram_write(tp, 0x819f, 0xddb6); + + ocp_reg_write(tp, 0xbc34, 0x5555); + data = ocp_reg_read(tp, 0xbf0a); + data &= ~0x0e00; + data |= 0x0a00; + ocp_reg_write(tp, 0xbf0a, data); + + data = ocp_reg_read(tp, 0xbd2c); + data &= ~BIT(13); + ocp_reg_write(tp, 0xbd2c, data); + break; + case RTL_VER_11: + data = ocp_reg_read(tp, 0xad16); + data |= 0x3ff; + ocp_reg_write(tp, 0xad16, data); + data = ocp_reg_read(tp, 0xad32); + data &= ~0x3f; + data |= 6; + ocp_reg_write(tp, 0xad32, data); + data = ocp_reg_read(tp, 0xac08); + data &= ~(BIT(12) | BIT(8)); + ocp_reg_write(tp, 0xac08, data); + data = ocp_reg_read(tp, 0xacc0); + data &= ~0x3; + data |= BIT(1); + ocp_reg_write(tp, 0xacc0, data); + data = ocp_reg_read(tp, 0xad40); + data &= ~0xe7; + data |= BIT(6) | BIT(2); + ocp_reg_write(tp, 0xad40, data); + data = ocp_reg_read(tp, 0xac14); + data &= ~BIT(7); + ocp_reg_write(tp, 0xac14, data); + data = ocp_reg_read(tp, 0xac80); + data &= ~(BIT(8) | BIT(9)); + ocp_reg_write(tp, 0xac80, data); + data = ocp_reg_read(tp, 0xac5e); + data &= ~0x7; + data |= BIT(1); + ocp_reg_write(tp, 0xac5e, data); + ocp_reg_write(tp, 0xad4c, 0x00a8); + ocp_reg_write(tp, 0xac5c, 0x01ff); + data = ocp_reg_read(tp, 0xac8a); + data &= ~0xf0; + data |= BIT(4) | BIT(5); + ocp_reg_write(tp, 0xac8a, data); + ocp_reg_write(tp, 0xb87c, 0x8157); + data = ocp_reg_read(tp, 0xb87e); + data &= ~0xff00; + data |= 0x0500; + ocp_reg_write(tp, 0xb87e, data); + ocp_reg_write(tp, 0xb87c, 0x8159); + data = ocp_reg_read(tp, 0xb87e); + data &= ~0xff00; + data |= 0x0700; + ocp_reg_write(tp, 0xb87e, data); + + /* AAGC */ + ocp_reg_write(tp, 0xb87c, 0x80a2); + ocp_reg_write(tp, 0xb87e, 0x0153); + ocp_reg_write(tp, 0xb87c, 0x809c); + ocp_reg_write(tp, 0xb87e, 0x0153); + + /* EEE parameter */ + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_TXTWSYS_2P5G, 0x0056); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_USB_CFG); + ocp_data |= EN_XG_LIP | EN_G_LIP; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_USB_CFG, ocp_data); + + sram_write(tp, 0x8257, 0x020f); /* XG PLL */ + sram_write(tp, 0x80ea, 0x7843); /* GIGA Master */ + + if (rtl_phy_patch_request(tp, true, true)) + return; + + /* Advance EEE */ + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); + ocp_data |= EEE_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); + + data = ocp_reg_read(tp, OCP_DOWN_SPEED); + data &= ~(EN_EEE_100 | EN_EEE_1000); + data |= EN_10M_CLKDIV; + ocp_reg_write(tp, OCP_DOWN_SPEED, data); + tp->ups_info._10m_ckdiv = true; + tp->ups_info.eee_plloff_100 = false; + tp->ups_info.eee_plloff_giga = false; + + data = ocp_reg_read(tp, OCP_POWER_CFG); + data &= ~EEE_CLKDIV_EN; + ocp_reg_write(tp, OCP_POWER_CFG, data); + tp->ups_info.eee_ckdiv = false; + + ocp_reg_write(tp, OCP_SYSCLK_CFG, 0); + ocp_reg_write(tp, OCP_SYSCLK_CFG, sysclk_div_expo(5)); + tp->ups_info._250m_ckdiv = false; + + rtl_phy_patch_request(tp, false, true); + + /* enable ADC Ibias Cal */ + data = ocp_reg_read(tp, 0xd068); + data |= BIT(13); + ocp_reg_write(tp, 0xd068, data); + + /* enable Thermal Sensor */ + data = sram_read(tp, 0x81a2); + data &= ~BIT(8); + sram_write(tp, 0x81a2, data); + data = ocp_reg_read(tp, 0xb54c); + data &= ~0xff00; + data |= 0xdb00; + ocp_reg_write(tp, 0xb54c, data); + + /* Nway 2.5G Lite */ + data = ocp_reg_read(tp, 0xa454); + data &= ~BIT(0); + ocp_reg_write(tp, 0xa454, data); + + /* CS DSP solution */ + data = ocp_reg_read(tp, OCP_10GBT_CTRL); + data |= RTL_ADV2_5G_F_R; + ocp_reg_write(tp, OCP_10GBT_CTRL, data); + data = ocp_reg_read(tp, 0xad4e); + data &= ~BIT(4); + ocp_reg_write(tp, 0xad4e, data); + data = ocp_reg_read(tp, 0xa86a); + data &= ~BIT(0); + ocp_reg_write(tp, 0xa86a, data); + + /* MDI SWAP */ + if ((ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CFG) & MID_REVERSE) && + (ocp_reg_read(tp, 0xd068) & BIT(1))) { + u16 swap_a, swap_b; + + data = ocp_reg_read(tp, 0xd068); + data &= ~0x1f; + data |= 0x1; /* p0 */ + ocp_reg_write(tp, 0xd068, data); + swap_a = ocp_reg_read(tp, 0xd06a); + data &= ~0x18; + data |= 0x18; /* p3 */ + ocp_reg_write(tp, 0xd068, data); + swap_b = ocp_reg_read(tp, 0xd06a); + data &= ~0x18; /* p0 */ + ocp_reg_write(tp, 0xd068, data); + ocp_reg_write(tp, 0xd06a, + (swap_a & ~0x7ff) | (swap_b & 0x7ff)); + data |= 0x18; /* p3 */ + ocp_reg_write(tp, 0xd068, data); + ocp_reg_write(tp, 0xd06a, + (swap_b & ~0x7ff) | (swap_a & 0x7ff)); + data &= ~0x18; + data |= 0x08; /* p1 */ + ocp_reg_write(tp, 0xd068, data); + swap_a = ocp_reg_read(tp, 0xd06a); + data &= ~0x18; + data |= 0x10; /* p2 */ + ocp_reg_write(tp, 0xd068, data); + swap_b = ocp_reg_read(tp, 0xd06a); + data &= ~0x18; + data |= 0x08; /* p1 */ + ocp_reg_write(tp, 0xd068, data); + ocp_reg_write(tp, 0xd06a, + (swap_a & ~0x7ff) | (swap_b & 0x7ff)); + data &= ~0x18; + data |= 0x10; /* p2 */ + ocp_reg_write(tp, 0xd068, data); + ocp_reg_write(tp, 0xd06a, + (swap_b & ~0x7ff) | (swap_a & 0x7ff)); + swap_a = ocp_reg_read(tp, 0xbd5a); + swap_b = ocp_reg_read(tp, 0xbd5c); + ocp_reg_write(tp, 0xbd5a, (swap_a & ~0x1f1f) | + ((swap_b & 0x1f) << 8) | + ((swap_b >> 8) & 0x1f)); + ocp_reg_write(tp, 0xbd5c, (swap_b & ~0x1f1f) | + ((swap_a & 0x1f) << 8) | + ((swap_a >> 8) & 0x1f)); + swap_a = ocp_reg_read(tp, 0xbc18); + swap_b = ocp_reg_read(tp, 0xbc1a); + ocp_reg_write(tp, 0xbc18, (swap_a & ~0x1f1f) | + ((swap_b & 0x1f) << 8) | + ((swap_b >> 8) & 0x1f)); + ocp_reg_write(tp, 0xbc1a, (swap_b & ~0x1f1f) | + ((swap_a & 0x1f) << 8) | + ((swap_a >> 8) & 0x1f)); + } + break; + default: + break; + } + + rtl_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags)); + + data = ocp_reg_read(tp, 0xa428); + data &= ~BIT(9); + ocp_reg_write(tp, 0xa428, data); + data = ocp_reg_read(tp, 0xa5ea); + data &= ~BIT(0); + ocp_reg_write(tp, 0xa5ea, data); + tp->ups_info.lite_mode = 0; + + if (tp->eee_en) + rtl_eee_enable(tp, true); + + r8153_aldps_en(tp, true); + r8152b_enable_fc(tp); + r8153_u2p3en(tp, true); + + set_bit(PHY_RESET, &tp->flags); +} + +static void r8156b_hw_phy_cfg(struct r8152 *tp) +{ + u32 ocp_data; + u16 data; + + switch (tp->version) { + case RTL_VER_12: + ocp_reg_write(tp, 0xbf86, 0x9000); + data = ocp_reg_read(tp, 0xc402); + data |= BIT(10); + ocp_reg_write(tp, 0xc402, data); + data &= ~BIT(10); + ocp_reg_write(tp, 0xc402, data); + ocp_reg_write(tp, 0xbd86, 0x1010); + ocp_reg_write(tp, 0xbd88, 0x1010); + data = ocp_reg_read(tp, 0xbd4e); + data &= ~(BIT(10) | BIT(11)); + data |= BIT(11); + ocp_reg_write(tp, 0xbd4e, data); + data = ocp_reg_read(tp, 0xbf46); + data &= ~0xf00; + data |= 0x700; + ocp_reg_write(tp, 0xbf46, data); + break; + case RTL_VER_13: + case RTL_VER_15: + r8156b_wait_loading_flash(tp); + break; + default: + break; + } + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); + if (ocp_data & PCUT_STATUS) { + ocp_data &= ~PCUT_STATUS; + ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); + } + + data = r8153_phy_status(tp, 0); + switch (data) { + case PHY_STAT_EXT_INIT: + rtl8152_apply_firmware(tp, true); + + data = ocp_reg_read(tp, 0xa466); + data &= ~BIT(0); + ocp_reg_write(tp, 0xa466, data); + + data = ocp_reg_read(tp, 0xa468); + data &= ~(BIT(3) | BIT(1)); + ocp_reg_write(tp, 0xa468, data); + break; + case PHY_STAT_LAN_ON: + case PHY_STAT_PWRDN: + default: + rtl8152_apply_firmware(tp, false); + break; + } + + data = r8152_mdio_read(tp, MII_BMCR); + if (data & BMCR_PDOWN) { + data &= ~BMCR_PDOWN; + r8152_mdio_write(tp, MII_BMCR, data); + } + + /* disable ALDPS before updating the PHY parameters */ + r8153_aldps_en(tp, false); + + /* disable EEE before updating the PHY parameters */ + rtl_eee_enable(tp, false); + + data = r8153_phy_status(tp, PHY_STAT_LAN_ON); + WARN_ON_ONCE(data != PHY_STAT_LAN_ON); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); + ocp_data |= PFM_PWM_SWITCH; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); + + switch (tp->version) { + case RTL_VER_12: + data = ocp_reg_read(tp, 0xbc08); + data |= BIT(3) | BIT(2); + ocp_reg_write(tp, 0xbc08, data); + + data = sram_read(tp, 0x8fff); + data &= ~0xff00; + data |= 0x0400; + sram_write(tp, 0x8fff, data); + + data = ocp_reg_read(tp, 0xacda); + data |= 0xff00; + ocp_reg_write(tp, 0xacda, data); + data = ocp_reg_read(tp, 0xacde); + data |= 0xf000; + ocp_reg_write(tp, 0xacde, data); + ocp_reg_write(tp, 0xac8c, 0x0ffc); + ocp_reg_write(tp, 0xac46, 0xb7b4); + ocp_reg_write(tp, 0xac50, 0x0fbc); + ocp_reg_write(tp, 0xac3c, 0x9240); + ocp_reg_write(tp, 0xac4e, 0x0db4); + ocp_reg_write(tp, 0xacc6, 0x0707); + ocp_reg_write(tp, 0xacc8, 0xa0d3); + ocp_reg_write(tp, 0xad08, 0x0007); + + ocp_reg_write(tp, 0xb87c, 0x8560); + ocp_reg_write(tp, 0xb87e, 0x19cc); + ocp_reg_write(tp, 0xb87c, 0x8562); + ocp_reg_write(tp, 0xb87e, 0x19cc); + ocp_reg_write(tp, 0xb87c, 0x8564); + ocp_reg_write(tp, 0xb87e, 0x19cc); + ocp_reg_write(tp, 0xb87c, 0x8566); + ocp_reg_write(tp, 0xb87e, 0x147d); + ocp_reg_write(tp, 0xb87c, 0x8568); + ocp_reg_write(tp, 0xb87e, 0x147d); + ocp_reg_write(tp, 0xb87c, 0x856a); + ocp_reg_write(tp, 0xb87e, 0x147d); + ocp_reg_write(tp, 0xb87c, 0x8ffe); + ocp_reg_write(tp, 0xb87e, 0x0907); + ocp_reg_write(tp, 0xb87c, 0x80d6); + ocp_reg_write(tp, 0xb87e, 0x2801); + ocp_reg_write(tp, 0xb87c, 0x80f2); + ocp_reg_write(tp, 0xb87e, 0x2801); + ocp_reg_write(tp, 0xb87c, 0x80f4); + ocp_reg_write(tp, 0xb87e, 0x6077); + ocp_reg_write(tp, 0xb506, 0x01e7); + + ocp_reg_write(tp, 0xb87c, 0x8013); + ocp_reg_write(tp, 0xb87e, 0x0700); + ocp_reg_write(tp, 0xb87c, 0x8fb9); + ocp_reg_write(tp, 0xb87e, 0x2801); + ocp_reg_write(tp, 0xb87c, 0x8fba); + ocp_reg_write(tp, 0xb87e, 0x0100); + ocp_reg_write(tp, 0xb87c, 0x8fbc); + ocp_reg_write(tp, 0xb87e, 0x1900); + ocp_reg_write(tp, 0xb87c, 0x8fbe); + ocp_reg_write(tp, 0xb87e, 0xe100); + ocp_reg_write(tp, 0xb87c, 0x8fc0); + ocp_reg_write(tp, 0xb87e, 0x0800); + ocp_reg_write(tp, 0xb87c, 0x8fc2); + ocp_reg_write(tp, 0xb87e, 0xe500); + ocp_reg_write(tp, 0xb87c, 0x8fc4); + ocp_reg_write(tp, 0xb87e, 0x0f00); + ocp_reg_write(tp, 0xb87c, 0x8fc6); + ocp_reg_write(tp, 0xb87e, 0xf100); + ocp_reg_write(tp, 0xb87c, 0x8fc8); + ocp_reg_write(tp, 0xb87e, 0x0400); + ocp_reg_write(tp, 0xb87c, 0x8fca); + ocp_reg_write(tp, 0xb87e, 0xf300); + ocp_reg_write(tp, 0xb87c, 0x8fcc); + ocp_reg_write(tp, 0xb87e, 0xfd00); + ocp_reg_write(tp, 0xb87c, 0x8fce); + ocp_reg_write(tp, 0xb87e, 0xff00); + ocp_reg_write(tp, 0xb87c, 0x8fd0); + ocp_reg_write(tp, 0xb87e, 0xfb00); + ocp_reg_write(tp, 0xb87c, 0x8fd2); + ocp_reg_write(tp, 0xb87e, 0x0100); + ocp_reg_write(tp, 0xb87c, 0x8fd4); + ocp_reg_write(tp, 0xb87e, 0xf400); + ocp_reg_write(tp, 0xb87c, 0x8fd6); + ocp_reg_write(tp, 0xb87e, 0xff00); + ocp_reg_write(tp, 0xb87c, 0x8fd8); + ocp_reg_write(tp, 0xb87e, 0xf600); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_USB_CFG); + ocp_data |= EN_XG_LIP | EN_G_LIP; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_USB_CFG, ocp_data); + ocp_reg_write(tp, 0xb87c, 0x813d); + ocp_reg_write(tp, 0xb87e, 0x390e); + ocp_reg_write(tp, 0xb87c, 0x814f); + ocp_reg_write(tp, 0xb87e, 0x790e); + ocp_reg_write(tp, 0xb87c, 0x80b0); + ocp_reg_write(tp, 0xb87e, 0x0f31); + data = ocp_reg_read(tp, 0xbf4c); + data |= BIT(1); + ocp_reg_write(tp, 0xbf4c, data); + data = ocp_reg_read(tp, 0xbcca); + data |= BIT(9) | BIT(8); + ocp_reg_write(tp, 0xbcca, data); + ocp_reg_write(tp, 0xb87c, 0x8141); + ocp_reg_write(tp, 0xb87e, 0x320e); + ocp_reg_write(tp, 0xb87c, 0x8153); + ocp_reg_write(tp, 0xb87e, 0x720e); + ocp_reg_write(tp, 0xb87c, 0x8529); + ocp_reg_write(tp, 0xb87e, 0x050e); + data = ocp_reg_read(tp, OCP_EEE_CFG); + data &= ~CTAP_SHORT_EN; + ocp_reg_write(tp, OCP_EEE_CFG, data); + + sram_write(tp, 0x816c, 0xc4a0); + sram_write(tp, 0x8170, 0xc4a0); + sram_write(tp, 0x8174, 0x04a0); + sram_write(tp, 0x8178, 0x04a0); + sram_write(tp, 0x817c, 0x0719); + sram_write(tp, 0x8ff4, 0x0400); + sram_write(tp, 0x8ff1, 0x0404); + + ocp_reg_write(tp, 0xbf4a, 0x001b); + ocp_reg_write(tp, 0xb87c, 0x8033); + ocp_reg_write(tp, 0xb87e, 0x7c13); + ocp_reg_write(tp, 0xb87c, 0x8037); + ocp_reg_write(tp, 0xb87e, 0x7c13); + ocp_reg_write(tp, 0xb87c, 0x803b); + ocp_reg_write(tp, 0xb87e, 0xfc32); + ocp_reg_write(tp, 0xb87c, 0x803f); + ocp_reg_write(tp, 0xb87e, 0x7c13); + ocp_reg_write(tp, 0xb87c, 0x8043); + ocp_reg_write(tp, 0xb87e, 0x7c13); + ocp_reg_write(tp, 0xb87c, 0x8047); + ocp_reg_write(tp, 0xb87e, 0x7c13); + + ocp_reg_write(tp, 0xb87c, 0x8145); + ocp_reg_write(tp, 0xb87e, 0x370e); + ocp_reg_write(tp, 0xb87c, 0x8157); + ocp_reg_write(tp, 0xb87e, 0x770e); + ocp_reg_write(tp, 0xb87c, 0x8169); + ocp_reg_write(tp, 0xb87e, 0x0d0a); + ocp_reg_write(tp, 0xb87c, 0x817b); + ocp_reg_write(tp, 0xb87e, 0x1d0a); + + data = sram_read(tp, 0x8217); + data &= ~0xff00; + data |= 0x5000; + sram_write(tp, 0x8217, data); + data = sram_read(tp, 0x821a); + data &= ~0xff00; + data |= 0x5000; + sram_write(tp, 0x821a, data); + sram_write(tp, 0x80da, 0x0403); + data = sram_read(tp, 0x80dc); + data &= ~0xff00; + data |= 0x1000; + sram_write(tp, 0x80dc, data); + sram_write(tp, 0x80b3, 0x0384); + sram_write(tp, 0x80b7, 0x2007); + data = sram_read(tp, 0x80ba); + data &= ~0xff00; + data |= 0x6c00; + sram_write(tp, 0x80ba, data); + sram_write(tp, 0x80b5, 0xf009); + data = sram_read(tp, 0x80bd); + data &= ~0xff00; + data |= 0x9f00; + sram_write(tp, 0x80bd, data); + sram_write(tp, 0x80c7, 0xf083); + sram_write(tp, 0x80dd, 0x03f0); + data = sram_read(tp, 0x80df); + data &= ~0xff00; + data |= 0x1000; + sram_write(tp, 0x80df, data); + sram_write(tp, 0x80cb, 0x2007); + data = sram_read(tp, 0x80ce); + data &= ~0xff00; + data |= 0x6c00; + sram_write(tp, 0x80ce, data); + sram_write(tp, 0x80c9, 0x8009); + data = sram_read(tp, 0x80d1); + data &= ~0xff00; + data |= 0x8000; + sram_write(tp, 0x80d1, data); + sram_write(tp, 0x80a3, 0x200a); + sram_write(tp, 0x80a5, 0xf0ad); + sram_write(tp, 0x809f, 0x6073); + sram_write(tp, 0x80a1, 0x000b); + data = sram_read(tp, 0x80a9); + data &= ~0xff00; + data |= 0xc000; + sram_write(tp, 0x80a9, data); + + if (rtl_phy_patch_request(tp, true, true)) + return; + + data = ocp_reg_read(tp, 0xb896); + data &= ~BIT(0); + ocp_reg_write(tp, 0xb896, data); + data = ocp_reg_read(tp, 0xb892); + data &= ~0xff00; + ocp_reg_write(tp, 0xb892, data); + ocp_reg_write(tp, 0xb88e, 0xc23e); + ocp_reg_write(tp, 0xb890, 0x0000); + ocp_reg_write(tp, 0xb88e, 0xc240); + ocp_reg_write(tp, 0xb890, 0x0103); + ocp_reg_write(tp, 0xb88e, 0xc242); + ocp_reg_write(tp, 0xb890, 0x0507); + ocp_reg_write(tp, 0xb88e, 0xc244); + ocp_reg_write(tp, 0xb890, 0x090b); + ocp_reg_write(tp, 0xb88e, 0xc246); + ocp_reg_write(tp, 0xb890, 0x0c0e); + ocp_reg_write(tp, 0xb88e, 0xc248); + ocp_reg_write(tp, 0xb890, 0x1012); + ocp_reg_write(tp, 0xb88e, 0xc24a); + ocp_reg_write(tp, 0xb890, 0x1416); + data = ocp_reg_read(tp, 0xb896); + data |= BIT(0); + ocp_reg_write(tp, 0xb896, data); + + rtl_phy_patch_request(tp, false, true); + + data = ocp_reg_read(tp, 0xa86a); + data |= BIT(0); + ocp_reg_write(tp, 0xa86a, data); + data = ocp_reg_read(tp, 0xa6f0); + data |= BIT(0); + ocp_reg_write(tp, 0xa6f0, data); + + ocp_reg_write(tp, 0xbfa0, 0xd70d); + ocp_reg_write(tp, 0xbfa2, 0x4100); + ocp_reg_write(tp, 0xbfa4, 0xe868); + ocp_reg_write(tp, 0xbfa6, 0xdc59); + ocp_reg_write(tp, 0xb54c, 0x3c18); + data = ocp_reg_read(tp, 0xbfa4); + data &= ~BIT(5); + ocp_reg_write(tp, 0xbfa4, data); + data = sram_read(tp, 0x817d); + data |= BIT(12); + sram_write(tp, 0x817d, data); + break; + case RTL_VER_13: + /* 2.5G INRX */ + data = ocp_reg_read(tp, 0xac46); + data &= ~0x00f0; + data |= 0x0090; + ocp_reg_write(tp, 0xac46, data); + data = ocp_reg_read(tp, 0xad30); + data &= ~0x0003; + data |= 0x0001; + ocp_reg_write(tp, 0xad30, data); + fallthrough; + case RTL_VER_15: + /* EEE parameter */ + ocp_reg_write(tp, 0xb87c, 0x80f5); + ocp_reg_write(tp, 0xb87e, 0x760e); + ocp_reg_write(tp, 0xb87c, 0x8107); + ocp_reg_write(tp, 0xb87e, 0x360e); + ocp_reg_write(tp, 0xb87c, 0x8551); + data = ocp_reg_read(tp, 0xb87e); + data &= ~0xff00; + data |= 0x0800; + ocp_reg_write(tp, 0xb87e, data); + + /* ADC_PGA parameter */ + data = ocp_reg_read(tp, 0xbf00); + data &= ~0xe000; + data |= 0xa000; + ocp_reg_write(tp, 0xbf00, data); + data = ocp_reg_read(tp, 0xbf46); + data &= ~0x0f00; + data |= 0x0300; + ocp_reg_write(tp, 0xbf46, data); + + /* Green Table-PGA, 1G full viterbi */ + sram_write(tp, 0x8044, 0x2417); + sram_write(tp, 0x804a, 0x2417); + sram_write(tp, 0x8050, 0x2417); + sram_write(tp, 0x8056, 0x2417); + sram_write(tp, 0x805c, 0x2417); + sram_write(tp, 0x8062, 0x2417); + sram_write(tp, 0x8068, 0x2417); + sram_write(tp, 0x806e, 0x2417); + sram_write(tp, 0x8074, 0x2417); + sram_write(tp, 0x807a, 0x2417); + + /* XG PLL */ + data = ocp_reg_read(tp, 0xbf84); + data &= ~0xe000; + data |= 0xa000; + ocp_reg_write(tp, 0xbf84, data); + break; + default: + break; + } + + if (rtl_phy_patch_request(tp, true, true)) + return; + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); + ocp_data |= EEE_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); + + data = ocp_reg_read(tp, OCP_DOWN_SPEED); + data &= ~(EN_EEE_100 | EN_EEE_1000); + data |= EN_10M_CLKDIV; + ocp_reg_write(tp, OCP_DOWN_SPEED, data); + tp->ups_info._10m_ckdiv = true; + tp->ups_info.eee_plloff_100 = false; + tp->ups_info.eee_plloff_giga = false; + + data = ocp_reg_read(tp, OCP_POWER_CFG); + data &= ~EEE_CLKDIV_EN; + ocp_reg_write(tp, OCP_POWER_CFG, data); + tp->ups_info.eee_ckdiv = false; + + rtl_phy_patch_request(tp, false, true); + + rtl_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags)); + + data = ocp_reg_read(tp, 0xa428); + data &= ~BIT(9); + ocp_reg_write(tp, 0xa428, data); + data = ocp_reg_read(tp, 0xa5ea); + data &= ~BIT(0); + ocp_reg_write(tp, 0xa5ea, data); + tp->ups_info.lite_mode = 0; + + if (tp->eee_en) + rtl_eee_enable(tp, true); + + r8153_aldps_en(tp, true); + r8152b_enable_fc(tp); + r8153_u2p3en(tp, true); + + set_bit(PHY_RESET, &tp->flags); +} + +static void r8156_init(struct r8152 *tp) +{ + u32 ocp_data; + u16 data; + int i; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP); + ocp_data &= ~EN_ALL_SPEED; + ocp_write_byte(tp, MCU_TYPE_USB, USB_ECM_OP, ocp_data); + + ocp_write_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION, 0); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_ECM_OPTION); + ocp_data |= BYPASS_MAC_RESET; + ocp_write_word(tp, MCU_TYPE_USB, USB_ECM_OPTION, ocp_data); + + r8153b_u1u2en(tp, false); + + for (i = 0; i < 500; i++) { + if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & + AUTOLOAD_DONE) + break; + + msleep(20); + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + } + + data = r8153_phy_status(tp, 0); + if (data == PHY_STAT_EXT_INIT) { + data = ocp_reg_read(tp, 0xa468); + data &= ~(BIT(3) | BIT(1)); + ocp_reg_write(tp, 0xa468, data); + } + + data = r8152_mdio_read(tp, MII_BMCR); + if (data & BMCR_PDOWN) { + data &= ~BMCR_PDOWN; + r8152_mdio_write(tp, MII_BMCR, data); + } + + data = r8153_phy_status(tp, PHY_STAT_LAN_ON); + WARN_ON_ONCE(data != PHY_STAT_LAN_ON); + + r8153_u2p3en(tp, false); + + /* MSC timer = 0xfff * 8ms = 32760 ms */ + ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff); + + /* U1/U2/L1 idle timer. 500 us */ + ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500); + + r8153b_power_cut_en(tp, false); + r8156_ups_en(tp, false); + r8153_queue_wake(tp, false); + rtl_runtime_suspend_enable(tp, false); + + if (tp->udev->speed >= USB_SPEED_SUPER) + r8153b_u1u2en(tp, true); + + usb_enable_lpm(tp->udev); + + r8156_mac_clk_spd(tp, true); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data &= ~PLA_MCU_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); + if (rtl8152_get_speed(tp) & LINK_STATUS) + ocp_data |= CUR_LINK_OK; + else + ocp_data &= ~CUR_LINK_OK; + ocp_data |= POLL_LINK_CHG; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); + + set_bit(GREEN_ETHERNET, &tp->flags); + + /* rx aggregation */ + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); + ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); + ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_BMU_CONFIG); + ocp_data |= ACT_ODMA; + ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_CONFIG, ocp_data); + + rtl_tally_reset(tp); + + tp->coalesce = 15000; /* 15 us */ +} + +static void r8156b_init(struct r8152 *tp) +{ + u32 ocp_data; + u16 data; + int i; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP); + ocp_data &= ~EN_ALL_SPEED; + ocp_write_byte(tp, MCU_TYPE_USB, USB_ECM_OP, ocp_data); + + ocp_write_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION, 0); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_ECM_OPTION); + ocp_data |= BYPASS_MAC_RESET; + ocp_write_word(tp, MCU_TYPE_USB, USB_ECM_OPTION, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL); + ocp_data |= RX_DETECT8; + ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data); + + r8153b_u1u2en(tp, false); + + switch (tp->version) { + case RTL_VER_13: + case RTL_VER_15: + r8156b_wait_loading_flash(tp); + break; + default: + break; + } + + for (i = 0; i < 500; i++) { + if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & + AUTOLOAD_DONE) + break; + + msleep(20); + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + } + + data = r8153_phy_status(tp, 0); + if (data == PHY_STAT_EXT_INIT) { + data = ocp_reg_read(tp, 0xa468); + data &= ~(BIT(3) | BIT(1)); + ocp_reg_write(tp, 0xa468, data); + + data = ocp_reg_read(tp, 0xa466); + data &= ~BIT(0); + ocp_reg_write(tp, 0xa466, data); + } + + data = r8152_mdio_read(tp, MII_BMCR); + if (data & BMCR_PDOWN) { + data &= ~BMCR_PDOWN; + r8152_mdio_write(tp, MII_BMCR, data); + } + + data = r8153_phy_status(tp, PHY_STAT_LAN_ON); + + r8153_u2p3en(tp, false); + + /* MSC timer = 0xfff * 8ms = 32760 ms */ + ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff); + + /* U1/U2/L1 idle timer. 500 us */ + ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500); + + r8153b_power_cut_en(tp, false); + r8156_ups_en(tp, false); + r8153_queue_wake(tp, false); + rtl_runtime_suspend_enable(tp, false); + + if (tp->udev->speed >= USB_SPEED_SUPER) + r8153b_u1u2en(tp, true); + + usb_enable_lpm(tp->udev); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RCR); + ocp_data &= ~SLOT_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); + ocp_data |= FLOW_CTRL_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); + + /* enable fc timer and set timer to 600 ms. */ + ocp_write_word(tp, MCU_TYPE_USB, USB_FC_TIMER, + CTRL_TIMER_EN | (600 / 8)); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_CTRL); + if (!(ocp_read_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL) & DACK_DET_EN)) + ocp_data |= FLOW_CTRL_PATCH_2; + ocp_data &= ~AUTO_SPEEDUP; + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_CTRL, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); + ocp_data |= FC_PATCH_TASK; + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); + + r8156_mac_clk_spd(tp, true); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data &= ~PLA_MCU_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); + if (rtl8152_get_speed(tp) & LINK_STATUS) + ocp_data |= CUR_LINK_OK; + else + ocp_data &= ~CUR_LINK_OK; + ocp_data |= POLL_LINK_CHG; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); + + set_bit(GREEN_ETHERNET, &tp->flags); + + /* rx aggregation */ + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); + ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); + ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); + + rtl_tally_reset(tp); + + tp->coalesce = 15000; /* 15 us */ +} + +static bool rtl_vendor_mode(struct usb_interface *intf) +{ + struct usb_host_interface *alt = intf->cur_altsetting; + struct usb_device *udev; + struct usb_host_config *c; + int i, num_configs; + + if (alt->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC) + return true; + + /* The vendor mode is not always config #1, so to find it out. */ + udev = interface_to_usbdev(intf); + c = udev->config; + num_configs = udev->descriptor.bNumConfigurations; + for (i = 0; i < num_configs; (i++, c++)) { + struct usb_interface_descriptor *desc = NULL; + + if (c->desc.bNumInterfaces > 0) + desc = &c->intf_cache[0]->altsetting->desc; + else + continue; + + if (desc->bInterfaceClass == USB_CLASS_VENDOR_SPEC) { + usb_driver_set_configuration(udev, c->desc.bConfigurationValue); + break; + } + } + + WARN_ON_ONCE(i == num_configs); + + return false; +} + static int rtl8152_pre_reset(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); @@ -5958,6 +8503,22 @@ int rtl8152_get_link_ksettings(struct net_device *netdev, mii_ethtool_get_link_ksettings(&tp->mii, cmd); + linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + cmd->link_modes.supported, tp->support_2500full); + + if (tp->support_2500full) { + linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + cmd->link_modes.advertising, + ocp_reg_read(tp, OCP_10GBT_CTRL) & MDIO_AN_10GBT_CTRL_ADV2_5G); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + cmd->link_modes.lp_advertising, + ocp_reg_read(tp, OCP_10GBT_STAT) & MDIO_AN_10GBT_STAT_LP2_5G); + + if (is_speed_2500(rtl8152_get_speed(tp))) + cmd->base.speed = SPEED_2500; + } + mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); @@ -6001,6 +8562,10 @@ static int rtl8152_set_link_ksettings(struct net_device *dev, cmd->link_modes.advertising)) advertising |= RTL_ADVERTISED_1000_FULL; + if (test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + cmd->link_modes.advertising)) + advertising |= RTL_ADVERTISED_2500_FULL; + mutex_lock(&tp->control); ret = rtl8152_set_speed(tp, cmd->base.autoneg, cmd->base.speed, @@ -6459,12 +9024,21 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu) dev->mtu = new_mtu; if (netif_running(dev)) { - u32 rms = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; + if (tp->rtl_ops.change_mtu) + tp->rtl_ops.change_mtu(tp); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rms); - - if (netif_carrier_ok(dev)) - r8153_set_rx_early_size(tp); + if (netif_carrier_ok(dev)) { + netif_stop_queue(dev); + napi_disable(&tp->napi); + tasklet_disable(&tp->tx_tl); + tp->rtl_ops.disable(tp); + tp->rtl_ops.enable(tp); + rtl_start_rx(tp); + tasklet_enable(&tp->tx_tl); + napi_enable(&tp->napi); + rtl8152_set_rx_mode(dev); + netif_wake_queue(dev); + } } mutex_unlock(&tp->control); @@ -6553,6 +9127,7 @@ static int rtl_ops_init(struct r8152 *tp) ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153_hw_phy_cfg; ops->autosuspend_en = rtl8153_runtime_enable; + ops->change_mtu = rtl8153_change_mtu; if (tp->udev->speed < USB_SPEED_SUPER) tp->rx_buf_sz = 16 * 1024; else @@ -6574,6 +9149,68 @@ static int rtl_ops_init(struct r8152 *tp) ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153b_hw_phy_cfg; ops->autosuspend_en = rtl8153b_runtime_enable; + ops->change_mtu = rtl8153_change_mtu; + tp->rx_buf_sz = 32 * 1024; + tp->eee_en = true; + tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; + break; + + case RTL_VER_11: + tp->eee_en = true; + tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; + fallthrough; + case RTL_VER_10: + ops->init = r8156_init; + ops->enable = rtl8156_enable; + ops->disable = rtl8153_disable; + ops->up = rtl8156_up; + ops->down = rtl8156_down; + ops->unload = rtl8153_unload; + ops->eee_get = r8153_get_eee; + ops->eee_set = r8152_set_eee; + ops->in_nway = rtl8153_in_nway; + ops->hw_phy_cfg = r8156_hw_phy_cfg; + ops->autosuspend_en = rtl8156_runtime_enable; + ops->change_mtu = rtl8156_change_mtu; + tp->rx_buf_sz = 48 * 1024; + tp->support_2500full = 1; + break; + + case RTL_VER_12: + case RTL_VER_13: + tp->support_2500full = 1; + fallthrough; + case RTL_VER_15: + tp->eee_en = true; + tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; + ops->init = r8156b_init; + ops->enable = rtl8156b_enable; + ops->disable = rtl8153_disable; + ops->up = rtl8156_up; + ops->down = rtl8156_down; + ops->unload = rtl8153_unload; + ops->eee_get = r8153_get_eee; + ops->eee_set = r8152_set_eee; + ops->in_nway = rtl8153_in_nway; + ops->hw_phy_cfg = r8156b_hw_phy_cfg; + ops->autosuspend_en = rtl8156_runtime_enable; + ops->change_mtu = rtl8156_change_mtu; + tp->rx_buf_sz = 48 * 1024; + break; + + case RTL_VER_14: + ops->init = r8153c_init; + ops->enable = rtl8153_enable; + ops->disable = rtl8153_disable; + ops->up = rtl8153c_up; + ops->down = rtl8153b_down; + ops->unload = rtl8153_unload; + ops->eee_get = r8153_get_eee; + ops->eee_set = r8152_set_eee; + ops->in_nway = rtl8153_in_nway; + ops->hw_phy_cfg = r8153c_hw_phy_cfg; + ops->autosuspend_en = rtl8153c_runtime_enable; + ops->change_mtu = rtl8153c_change_mtu; tp->rx_buf_sz = 32 * 1024; tp->eee_en = true; tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; @@ -6592,11 +9229,17 @@ static int rtl_ops_init(struct r8152 *tp) #define FIRMWARE_8153A_3 "rtl_nic/rtl8153a-3.fw" #define FIRMWARE_8153A_4 "rtl_nic/rtl8153a-4.fw" #define FIRMWARE_8153B_2 "rtl_nic/rtl8153b-2.fw" +#define FIRMWARE_8153C_1 "rtl_nic/rtl8153c-1.fw" +#define FIRMWARE_8156A_2 "rtl_nic/rtl8156a-2.fw" +#define FIRMWARE_8156B_2 "rtl_nic/rtl8156b-2.fw" MODULE_FIRMWARE(FIRMWARE_8153A_2); MODULE_FIRMWARE(FIRMWARE_8153A_3); MODULE_FIRMWARE(FIRMWARE_8153A_4); MODULE_FIRMWARE(FIRMWARE_8153B_2); +MODULE_FIRMWARE(FIRMWARE_8153C_1); +MODULE_FIRMWARE(FIRMWARE_8156A_2); +MODULE_FIRMWARE(FIRMWARE_8156B_2); static int rtl_fw_init(struct r8152 *tp) { @@ -6622,6 +9265,19 @@ static int rtl_fw_init(struct r8152 *tp) rtl_fw->pre_fw = r8153b_pre_firmware_1; rtl_fw->post_fw = r8153b_post_firmware_1; break; + case RTL_VER_11: + rtl_fw->fw_name = FIRMWARE_8156A_2; + rtl_fw->post_fw = r8156a_post_firmware_1; + break; + case RTL_VER_13: + case RTL_VER_15: + rtl_fw->fw_name = FIRMWARE_8156B_2; + break; + case RTL_VER_14: + rtl_fw->fw_name = FIRMWARE_8153C_1; + rtl_fw->pre_fw = r8153b_pre_firmware_1; + rtl_fw->post_fw = r8153c_post_firmware_1; + break; default: break; } @@ -6677,6 +9333,27 @@ u8 rtl8152_get_version(struct usb_interface *intf) case 0x6010: version = RTL_VER_09; break; + case 0x7010: + version = RTL_TEST_01; + break; + case 0x7020: + version = RTL_VER_10; + break; + case 0x7030: + version = RTL_VER_11; + break; + case 0x7400: + version = RTL_VER_12; + break; + case 0x7410: + version = RTL_VER_13; + break; + case 0x6400: + version = RTL_VER_14; + break; + case 0x7420: + version = RTL_VER_15; + break; default: version = RTL_VER_UNKNOWN; dev_info(&intf->dev, "Unknown version 0x%04x\n", ocp_data); @@ -6701,10 +9378,8 @@ static int rtl8152_probe(struct usb_interface *intf, if (version == RTL_VER_UNKNOWN) return -ENODEV; - if (udev->actconfig->desc.bConfigurationValue != 1) { - usb_driver_set_configuration(udev, 1); + if (!rtl_vendor_mode(intf)) return -ENODEV; - } if (intf->cur_altsetting->desc.bNumEndpoints < 3) return -ENODEV; @@ -6772,7 +9447,7 @@ static int rtl8152_probe(struct usb_interface *intf, switch (le16_to_cpu(udev->descriptor.idProduct)) { case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2: case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2: - set_bit(LENOVO_MACPASSTHRU, &tp->flags); + tp->lenovo_macpassthru = 1; } } @@ -6780,7 +9455,7 @@ static int rtl8152_probe(struct usb_interface *intf, (!strcmp(udev->serial, "000001000000") || !strcmp(udev->serial, "000002000000"))) { dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation"); - set_bit(DELL_TB_RX_AGG_BUG, &tp->flags); + tp->dell_tb_rx_agg_bug = 1; } netdev->ethtool_ops = &ops; @@ -6789,12 +9464,29 @@ static int rtl8152_probe(struct usb_interface *intf, /* MTU range: 68 - 1500 or 9194 */ netdev->min_mtu = ETH_MIN_MTU; switch (tp->version) { + case RTL_VER_03: + case RTL_VER_04: + case RTL_VER_05: + case RTL_VER_06: + case RTL_VER_08: + case RTL_VER_09: + case RTL_VER_14: + netdev->max_mtu = size_to_mtu(9 * 1024); + break; + case RTL_VER_10: + case RTL_VER_11: + netdev->max_mtu = size_to_mtu(15 * 1024); + break; + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_15: + netdev->max_mtu = size_to_mtu(16 * 1024); + break; case RTL_VER_01: case RTL_VER_02: - netdev->max_mtu = ETH_DATA_LEN; - break; + case RTL_VER_07: default: - netdev->max_mtu = RTL8153_MAX_MTU; + netdev->max_mtu = ETH_DATA_LEN; break; } @@ -6810,7 +9502,13 @@ static int rtl8152_probe(struct usb_interface *intf, tp->advertising = RTL_ADVERTISED_10_HALF | RTL_ADVERTISED_10_FULL | RTL_ADVERTISED_100_HALF | RTL_ADVERTISED_100_FULL; if (tp->mii.supports_gmii) { - tp->speed = SPEED_1000; + if (tp->support_2500full && + tp->udev->speed >= USB_SPEED_SUPER) { + tp->speed = SPEED_2500; + tp->advertising |= RTL_ADVERTISED_2500_FULL; + } else { + tp->speed = SPEED_1000; + } tp->advertising |= RTL_ADVERTISED_1000_FULL; } tp->duplex = DUPLEX_FULL; @@ -6834,7 +9532,11 @@ static int rtl8152_probe(struct usb_interface *intf, set_ethernet_addr(tp); usb_set_intfdata(intf, tp); - netif_napi_add(netdev, &tp->napi, r8152_poll, RTL8152_NAPI_WEIGHT); + + if (tp->support_2500full) + netif_napi_add(netdev, &tp->napi, r8152_poll, 256); + else + netif_napi_add(netdev, &tp->napi, r8152_poll, 64); ret = register_netdev(netdev); if (ret != 0) { @@ -6870,49 +9572,48 @@ static void rtl8152_disconnect(struct usb_interface *intf) unregister_netdev(tp->netdev); tasklet_kill(&tp->tx_tl); cancel_delayed_work_sync(&tp->hw_phy_work); - tp->rtl_ops.unload(tp); + if (tp->rtl_ops.unload) + tp->rtl_ops.unload(tp); rtl8152_release_firmware(tp); free_netdev(tp->netdev); } } -#define REALTEK_USB_DEVICE(vend, prod) \ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ - USB_DEVICE_ID_MATCH_INT_CLASS, \ - .idVendor = (vend), \ - .idProduct = (prod), \ - .bInterfaceClass = USB_CLASS_VENDOR_SPEC \ +#define REALTEK_USB_DEVICE(vend, prod) { \ + USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC), \ }, \ { \ - .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | \ - USB_DEVICE_ID_MATCH_DEVICE, \ - .idVendor = (vend), \ - .idProduct = (prod), \ - .bInterfaceClass = USB_CLASS_COMM, \ - .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ - .bInterfaceProtocol = USB_CDC_PROTO_NONE + USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_COMM, \ + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), \ +} /* table of devices that work with this driver */ static const struct usb_device_id rtl8152_table[] = { - {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8050)}, - {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, - {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, - {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)}, - {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)}, - {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927)}, - {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, - {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, - {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)}, - {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069)}, - {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082)}, - {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, - {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)}, - {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)}, - {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x721e)}, - {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa387)}, - {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)}, - {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)}, - {REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601)}, + /* Realtek */ + REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8050), + REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8053), + REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152), + REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153), + REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8155), + REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8156), + + /* Microsoft */ + REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab), + REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6), + REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927), + REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101), + REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f), + REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062), + REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069), + REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082), + REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205), + REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c), + REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214), + REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x721e), + REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa387), + REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041), + REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff), + REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601), {} }; diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index 55a244eca5ca..55025202dc4f 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c @@ -629,8 +629,8 @@ static const struct ethtool_ops sierra_net_ethtool_ops = { .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .nway_reset = usbnet_nway_reset, - .get_link_ksettings = usbnet_get_link_ksettings, - .set_link_ksettings = usbnet_set_link_ksettings, + .get_link_ksettings = usbnet_get_link_ksettings_mii, + .set_link_ksettings = usbnet_set_link_ksettings_mii, }; static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap) diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 4353b370249f..f8cdabb9ef5a 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -741,8 +741,8 @@ static const struct ethtool_ops smsc75xx_ethtool_ops = { .set_eeprom = smsc75xx_ethtool_set_eeprom, .get_wol = smsc75xx_ethtool_get_wol, .set_wol = smsc75xx_ethtool_set_wol, - .get_link_ksettings = usbnet_get_link_ksettings, - .set_link_ksettings = usbnet_set_link_ksettings, + .get_link_ksettings = usbnet_get_link_ksettings_mii, + .set_link_ksettings = usbnet_set_link_ksettings_mii, }; static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index 878557ad03ad..ce29261263cd 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -250,8 +250,8 @@ static const struct ethtool_ops sr9700_ethtool_ops = { .get_eeprom_len = sr9700_get_eeprom_len, .get_eeprom = sr9700_get_eeprom, .nway_reset = usbnet_nway_reset, - .get_link_ksettings = usbnet_get_link_ksettings, - .set_link_ksettings = usbnet_set_link_ksettings, + .get_link_ksettings = usbnet_get_link_ksettings_mii, + .set_link_ksettings = usbnet_set_link_ksettings_mii, }; static void sr9700_set_multicast(struct net_device *netdev) diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index da56735d7755..a822d81310d5 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c @@ -527,8 +527,8 @@ static const struct ethtool_ops sr9800_ethtool_ops = { .get_eeprom_len = sr_get_eeprom_len, .get_eeprom = sr_get_eeprom, .nway_reset = usbnet_nway_reset, - .get_link_ksettings = usbnet_get_link_ksettings, - .set_link_ksettings = usbnet_set_link_ksettings, + .get_link_ksettings = usbnet_get_link_ksettings_mii, + .set_link_ksettings = usbnet_set_link_ksettings_mii, }; static int sr9800_link_reset(struct usbnet *dev) diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index f4f37ecfed58..ecf62849f4c1 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -944,7 +944,10 @@ EXPORT_SYMBOL_GPL(usbnet_open); * they'll probably want to use this base set. */ -int usbnet_get_link_ksettings(struct net_device *net, +/* These methods are written on the assumption that the device + * uses MII + */ +int usbnet_get_link_ksettings_mii(struct net_device *net, struct ethtool_link_ksettings *cmd) { struct usbnet *dev = netdev_priv(net); @@ -956,9 +959,30 @@ int usbnet_get_link_ksettings(struct net_device *net, return 0; } -EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings); +EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings_mii); + +int usbnet_get_link_ksettings_internal(struct net_device *net, + struct ethtool_link_ksettings *cmd) +{ + struct usbnet *dev = netdev_priv(net); + + /* the assumption that speed is equal on tx and rx + * is deeply engrained into the networking layer. + * For wireless stuff it is not true. + * We assume that rx_speed matters more. + */ + if (dev->rx_speed != SPEED_UNSET) + cmd->base.speed = dev->rx_speed / 1000000; + else if (dev->tx_speed != SPEED_UNSET) + cmd->base.speed = dev->tx_speed / 1000000; + else + cmd->base.speed = SPEED_UNKNOWN; + + return 0; +} +EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings_internal); -int usbnet_set_link_ksettings(struct net_device *net, +int usbnet_set_link_ksettings_mii(struct net_device *net, const struct ethtool_link_ksettings *cmd) { struct usbnet *dev = netdev_priv(net); @@ -978,7 +1002,7 @@ int usbnet_set_link_ksettings(struct net_device *net, return retval; } -EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings); +EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings_mii); u32 usbnet_get_link (struct net_device *net) { @@ -1043,8 +1067,8 @@ static const struct ethtool_ops usbnet_ethtool_ops = { .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .get_ts_info = ethtool_op_get_ts_info, - .get_link_ksettings = usbnet_get_link_ksettings, - .set_link_ksettings = usbnet_set_link_ksettings, + .get_link_ksettings = usbnet_get_link_ksettings_mii, + .set_link_ksettings = usbnet_set_link_ksettings_mii, }; /*-------------------------------------------------------------------------*/ @@ -1661,6 +1685,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) dev->intf = udev; dev->driver_info = info; dev->driver_name = name; + dev->rx_speed = SPEED_UNSET; + dev->tx_speed = SPEED_UNSET; net->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!net->tstats) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 34e49c75db42..bdb7ce3cb054 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -57,6 +57,7 @@ struct veth_rq_stats { struct veth_rq { struct napi_struct xdp_napi; + struct napi_struct __rcu *napi; /* points to xdp_napi when the latter is initialized */ struct net_device *dev; struct bpf_prog __rcu *xdp_prog; struct xdp_mem_info xdp_mem; @@ -218,6 +219,17 @@ static void veth_get_ethtool_stats(struct net_device *dev, } } +static void veth_get_channels(struct net_device *dev, + struct ethtool_channels *channels) +{ + channels->tx_count = dev->real_num_tx_queues; + channels->rx_count = dev->real_num_rx_queues; + channels->max_tx = dev->real_num_tx_queues; + channels->max_rx = dev->real_num_rx_queues; + channels->combined_count = min(dev->real_num_rx_queues, dev->real_num_tx_queues); + channels->max_combined = min(dev->real_num_rx_queues, dev->real_num_tx_queues); +} + static const struct ethtool_ops veth_ethtool_ops = { .get_drvinfo = veth_get_drvinfo, .get_link = ethtool_op_get_link, @@ -226,6 +238,7 @@ static const struct ethtool_ops veth_ethtool_ops = { .get_ethtool_stats = veth_get_ethtool_stats, .get_link_ksettings = veth_get_link_ksettings, .get_ts_info = ethtool_op_get_ts_info, + .get_channels = veth_get_channels, }; /* general routines */ @@ -281,13 +294,32 @@ static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb, netif_rx(skb); } +/* return true if the specified skb has chances of GRO aggregation + * Don't strive for accuracy, but try to avoid GRO overhead in the most + * common scenarios. + * When XDP is enabled, all traffic is considered eligible, as the xmit + * device has TSO off. + * When TSO is enabled on the xmit device, we are likely interested only + * in UDP aggregation, explicitly check for that if the skb is suspected + * - the sock_wfree destructor is used by UDP, ICMP and XDP sockets - + * to belong to locally generated UDP traffic. + */ +static bool veth_skb_is_eligible_for_gro(const struct net_device *dev, + const struct net_device *rcv, + const struct sk_buff *skb) +{ + return !(dev->features & NETIF_F_ALL_TSO) || + (skb->destructor == sock_wfree && + rcv->features & (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD)); +} + static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) { struct veth_priv *rcv_priv, *priv = netdev_priv(dev); struct veth_rq *rq = NULL; struct net_device *rcv; int length = skb->len; - bool rcv_xdp = false; + bool use_napi = false; int rxq; rcu_read_lock(); @@ -301,20 +333,26 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) rxq = skb_get_queue_mapping(skb); if (rxq < rcv->real_num_rx_queues) { rq = &rcv_priv->rq[rxq]; - rcv_xdp = rcu_access_pointer(rq->xdp_prog); + + /* The napi pointer is available when an XDP program is + * attached or when GRO is enabled + * Don't bother with napi/GRO if the skb can't be aggregated + */ + use_napi = rcu_access_pointer(rq->napi) && + veth_skb_is_eligible_for_gro(dev, rcv, skb); skb_record_rx_queue(skb, rxq); } skb_tx_timestamp(skb); - if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) { - if (!rcv_xdp) + if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) { + if (!use_napi) dev_lstats_add(dev, length); } else { drop: atomic64_inc(&priv->dropped); } - if (rcv_xdp) + if (use_napi) __veth_xdp_flush(rq); rcu_read_unlock(); @@ -433,7 +471,7 @@ static int veth_xdp_xmit(struct net_device *dev, int n, u32 flags, bool ndo_xmit) { struct veth_priv *rcv_priv, *priv = netdev_priv(dev); - int i, ret = -ENXIO, drops = 0; + int i, ret = -ENXIO, nxmit = 0; struct net_device *rcv; unsigned int max_len; struct veth_rq *rq; @@ -448,11 +486,10 @@ static int veth_xdp_xmit(struct net_device *dev, int n, rcv_priv = netdev_priv(rcv); rq = &rcv_priv->rq[veth_select_rxq(rcv)]; - /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive - * side. This means an XDP program is loaded on the peer and the peer - * device is up. + /* The napi pointer is set if NAPI is enabled, which ensures that + * xdp_ring is initialized on receive side and the peer device is up. */ - if (!rcu_access_pointer(rq->xdp_prog)) + if (!rcu_access_pointer(rq->napi)) goto out; max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN; @@ -463,21 +500,20 @@ static int veth_xdp_xmit(struct net_device *dev, int n, void *ptr = veth_xdp_to_ptr(frame); if (unlikely(frame->len > max_len || - __ptr_ring_produce(&rq->xdp_ring, ptr))) { - xdp_return_frame_rx_napi(frame); - drops++; - } + __ptr_ring_produce(&rq->xdp_ring, ptr))) + break; + nxmit++; } spin_unlock(&rq->xdp_ring.producer_lock); if (flags & XDP_XMIT_FLUSH) __veth_xdp_flush(rq); - ret = n - drops; + ret = nxmit; if (ndo_xmit) { u64_stats_update_begin(&rq->stats.syncp); - rq->stats.vs.peer_tq_xdp_xmit += n - drops; - rq->stats.vs.peer_tq_xdp_xmit_err += drops; + rq->stats.vs.peer_tq_xdp_xmit += nxmit; + rq->stats.vs.peer_tq_xdp_xmit_err += n - nxmit; u64_stats_update_end(&rq->stats.syncp); } @@ -504,20 +540,23 @@ static int veth_ndo_xdp_xmit(struct net_device *dev, int n, static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq) { - int sent, i, err = 0; + int sent, i, err = 0, drops; sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false); if (sent < 0) { err = sent; sent = 0; - for (i = 0; i < bq->count; i++) - xdp_return_frame(bq->q[i]); } - trace_xdp_bulk_tx(rq->dev, sent, bq->count - sent, err); + + for (i = sent; unlikely(i < bq->count); i++) + xdp_return_frame(bq->q[i]); + + drops = bq->count - sent; + trace_xdp_bulk_tx(rq->dev, sent, drops, err); u64_stats_update_begin(&rq->stats.syncp); rq->stats.vs.xdp_tx += sent; - rq->stats.vs.xdp_tx_err += bq->count - sent; + rq->stats.vs.xdp_tx_err += drops; u64_stats_update_end(&rq->stats.syncp); bq->count = 0; @@ -672,7 +711,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, int mac_len, delta, off; struct xdp_buff xdp; - skb_orphan(skb); + skb_orphan_partial(skb); rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); @@ -889,7 +928,7 @@ static int veth_poll(struct napi_struct *napi, int budget) return done; } -static int veth_napi_add(struct net_device *dev) +static int __veth_napi_enable(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); int err, i; @@ -906,6 +945,7 @@ static int veth_napi_add(struct net_device *dev) struct veth_rq *rq = &priv->rq[i]; napi_enable(&rq->xdp_napi); + rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi); } return 0; @@ -924,6 +964,7 @@ static void veth_napi_del(struct net_device *dev) for (i = 0; i < dev->real_num_rx_queues; i++) { struct veth_rq *rq = &priv->rq[i]; + rcu_assign_pointer(priv->rq[i].napi, NULL); napi_disable(&rq->xdp_napi); __netif_napi_del(&rq->xdp_napi); } @@ -937,8 +978,14 @@ static void veth_napi_del(struct net_device *dev) } } +static bool veth_gro_requested(const struct net_device *dev) +{ + return !!(dev->wanted_features & NETIF_F_GRO); +} + static int veth_enable_xdp(struct net_device *dev) { + bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP); struct veth_priv *priv = netdev_priv(dev); int err, i; @@ -946,7 +993,8 @@ static int veth_enable_xdp(struct net_device *dev) for (i = 0; i < dev->real_num_rx_queues; i++) { struct veth_rq *rq = &priv->rq[i]; - netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT); + if (!napi_already_on) + netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT); err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id); if (err < 0) goto err_rxq_reg; @@ -961,13 +1009,25 @@ static int veth_enable_xdp(struct net_device *dev) rq->xdp_mem = rq->xdp_rxq.mem; } - err = veth_napi_add(dev); - if (err) - goto err_rxq_reg; + if (!napi_already_on) { + err = __veth_napi_enable(dev); + if (err) + goto err_rxq_reg; + + if (!veth_gro_requested(dev)) { + /* user-space did not require GRO, but adding XDP + * is supposed to get GRO working + */ + dev->features |= NETIF_F_GRO; + netdev_features_change(dev); + } + } } - for (i = 0; i < dev->real_num_rx_queues; i++) + for (i = 0; i < dev->real_num_rx_queues; i++) { rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog); + rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi); + } return 0; err_reg_mem: @@ -977,7 +1037,8 @@ err_rxq_reg: struct veth_rq *rq = &priv->rq[i]; xdp_rxq_info_unreg(&rq->xdp_rxq); - netif_napi_del(&rq->xdp_napi); + if (!napi_already_on) + netif_napi_del(&rq->xdp_napi); } return err; @@ -990,7 +1051,19 @@ static void veth_disable_xdp(struct net_device *dev) for (i = 0; i < dev->real_num_rx_queues; i++) rcu_assign_pointer(priv->rq[i].xdp_prog, NULL); - veth_napi_del(dev); + + if (!netif_running(dev) || !veth_gro_requested(dev)) { + veth_napi_del(dev); + + /* if user-space did not require GRO, since adding XDP + * enabled it, clear it now + */ + if (!veth_gro_requested(dev) && netif_running(dev)) { + dev->features &= ~NETIF_F_GRO; + netdev_features_change(dev); + } + } + for (i = 0; i < dev->real_num_rx_queues; i++) { struct veth_rq *rq = &priv->rq[i]; @@ -999,6 +1072,29 @@ static void veth_disable_xdp(struct net_device *dev) } } +static int veth_napi_enable(struct net_device *dev) +{ + struct veth_priv *priv = netdev_priv(dev); + int err, i; + + for (i = 0; i < dev->real_num_rx_queues; i++) { + struct veth_rq *rq = &priv->rq[i]; + + netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT); + } + + err = __veth_napi_enable(dev); + if (err) { + for (i = 0; i < dev->real_num_rx_queues; i++) { + struct veth_rq *rq = &priv->rq[i]; + + netif_napi_del(&rq->xdp_napi); + } + return err; + } + return err; +} + static int veth_open(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); @@ -1012,6 +1108,10 @@ static int veth_open(struct net_device *dev) err = veth_enable_xdp(dev); if (err) return err; + } else if (veth_gro_requested(dev)) { + err = veth_napi_enable(dev); + if (err) + return err; } if (peer->flags & IFF_UP) { @@ -1033,6 +1133,8 @@ static int veth_close(struct net_device *dev) if (priv->_xdp_prog) veth_disable_xdp(dev); + else if (veth_gro_requested(dev)) + veth_napi_del(dev); return 0; } @@ -1131,10 +1233,32 @@ static netdev_features_t veth_fix_features(struct net_device *dev, if (peer_priv->_xdp_prog) features &= ~NETIF_F_GSO_SOFTWARE; } + if (priv->_xdp_prog) + features |= NETIF_F_GRO; return features; } +static int veth_set_features(struct net_device *dev, + netdev_features_t features) +{ + netdev_features_t changed = features ^ dev->features; + struct veth_priv *priv = netdev_priv(dev); + int err; + + if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog) + return 0; + + if (features & NETIF_F_GRO) { + err = veth_napi_enable(dev); + if (err) + return err; + } else { + veth_napi_del(dev); + } + return 0; +} + static void veth_set_rx_headroom(struct net_device *dev, int new_hr) { struct veth_priv *peer_priv, *priv = netdev_priv(dev); @@ -1253,6 +1377,7 @@ static const struct net_device_ops veth_netdev_ops = { #endif .ndo_get_iflink = veth_get_iflink, .ndo_fix_features = veth_fix_features, + .ndo_set_features = veth_set_features, .ndo_features_check = passthru_features_check, .ndo_set_rx_headroom = veth_set_rx_headroom, .ndo_bpf = veth_xdp, @@ -1315,6 +1440,13 @@ static int veth_validate(struct nlattr *tb[], struct nlattr *data[], static struct rtnl_link_ops veth_link_ops; +static void veth_disable_gro(struct net_device *dev) +{ + dev->features &= ~NETIF_F_GRO; + dev->wanted_features &= ~NETIF_F_GRO; + netdev_update_features(dev); +} + static int veth_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) @@ -1387,6 +1519,10 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, if (err < 0) goto err_register_peer; + /* keep GRO disabled by default to be consistent with the established + * veth behavior + */ + veth_disable_gro(peer); netif_carrier_off(peer); err = rtnl_configure_link(peer, ifmp); @@ -1424,6 +1560,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, priv = netdev_priv(peer); rcu_assign_pointer(priv->peer, dev); + veth_disable_gro(dev); return 0; err_register_dev: diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 0824e6999e49..7fda2ae4c40f 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -195,6 +195,9 @@ struct virtnet_info { /* # of XDP queue pairs currently used by the driver */ u16 xdp_queue_pairs; + /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */ + bool xdp_enabled; + /* I like... big packets and I cannot lie! */ bool big_packets; @@ -376,21 +379,18 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, struct receive_queue *rq, struct page *page, unsigned int offset, unsigned int len, unsigned int truesize, - bool hdr_valid, unsigned int metasize) + bool hdr_valid, unsigned int metasize, + unsigned int headroom) { struct sk_buff *skb; struct virtio_net_hdr_mrg_rxbuf *hdr; unsigned int copy, hdr_len, hdr_padded_len; - char *p; + struct page *page_to_free = NULL; + int tailroom, shinfo_size; + char *p, *hdr_p, *buf; p = page_address(page) + offset; - - /* copy small packet so we can reuse these pages for small data */ - skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); - if (unlikely(!skb)) - return NULL; - - hdr = skb_vnet_hdr(skb); + hdr_p = p; hdr_len = vi->hdr_len; if (vi->mergeable_rx_bufs) @@ -398,14 +398,44 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, else hdr_padded_len = sizeof(struct padded_vnet_hdr); - /* hdr_valid means no XDP, so we can copy the vnet header */ - if (hdr_valid) - memcpy(hdr, p, hdr_len); + /* If headroom is not 0, there is an offset between the beginning of the + * data and the allocated space, otherwise the data and the allocated + * space are aligned. + */ + if (headroom) { + /* Buffers with headroom use PAGE_SIZE as alloc size, + * see add_recvbuf_mergeable() + get_mergeable_buf_len() + */ + truesize = PAGE_SIZE; + tailroom = truesize - len - offset; + buf = page_address(page); + } else { + tailroom = truesize - len; + buf = p; + } len -= hdr_len; offset += hdr_padded_len; p += hdr_padded_len; + shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + /* copy small packet so we can reuse these pages */ + if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) { + skb = build_skb(buf, truesize); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, p - buf); + skb_put(skb, len); + goto ok; + } + + /* copy small packet so we can reuse these pages for small data */ + skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); + if (unlikely(!skb)) + return NULL; + /* Copy all frame if it fits skb->head, otherwise * we let virtio_net_hdr_to_skb() and GRO pull headers as needed. */ @@ -415,11 +445,6 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, copy = ETH_HLEN + metasize; skb_put_data(skb, p, copy); - if (metasize) { - __skb_pull(skb, metasize); - skb_metadata_set(skb, metasize); - } - len -= copy; offset += copy; @@ -427,8 +452,8 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, if (len) skb_add_rx_frag(skb, 0, page, offset, len, truesize); else - put_page(page); - return skb; + page_to_free = page; + goto ok; } /* @@ -455,6 +480,20 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, if (page) give_pages(rq, page); +ok: + /* hdr_valid means no XDP, so we can copy the vnet header */ + if (hdr_valid) { + hdr = skb_vnet_hdr(skb); + memcpy(hdr, hdr_p, hdr_len); + } + if (page_to_free) + put_page(page_to_free); + + if (metasize) { + __skb_pull(skb, metasize); + skb_metadata_set(skb, metasize); + } + return skb; } @@ -485,12 +524,41 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, return 0; } -static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi) -{ - unsigned int qp; - - qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); - return &vi->sq[qp]; +/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on + * the current cpu, so it does not need to be locked. + * + * Here we use marco instead of inline functions because we have to deal with + * three issues at the same time: 1. the choice of sq. 2. judge and execute the + * lock/unlock of txq 3. make sparse happy. It is difficult for two inline + * functions to perfectly solve these three problems at the same time. + */ +#define virtnet_xdp_get_sq(vi) ({ \ + struct netdev_queue *txq; \ + typeof(vi) v = (vi); \ + unsigned int qp; \ + \ + if (v->curr_queue_pairs > nr_cpu_ids) { \ + qp = v->curr_queue_pairs - v->xdp_queue_pairs; \ + qp += smp_processor_id(); \ + txq = netdev_get_tx_queue(v->dev, qp); \ + __netif_tx_acquire(txq); \ + } else { \ + qp = smp_processor_id() % v->curr_queue_pairs; \ + txq = netdev_get_tx_queue(v->dev, qp); \ + __netif_tx_lock(txq, raw_smp_processor_id()); \ + } \ + v->sq + qp; \ +}) + +#define virtnet_xdp_put_sq(vi, q) { \ + struct netdev_queue *txq; \ + typeof(vi) v = (vi); \ + \ + txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \ + if (v->curr_queue_pairs > nr_cpu_ids) \ + __netif_tx_release(txq); \ + else \ + __netif_tx_unlock(txq); \ } static int virtnet_xdp_xmit(struct net_device *dev, @@ -503,10 +571,10 @@ static int virtnet_xdp_xmit(struct net_device *dev, unsigned int len; int packets = 0; int bytes = 0; - int drops = 0; + int nxmit = 0; int kicks = 0; - int ret, err; void *ptr; + int ret; int i; /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this @@ -516,11 +584,10 @@ static int virtnet_xdp_xmit(struct net_device *dev, if (!xdp_prog) return -ENXIO; - sq = virtnet_xdp_sq(vi); + sq = virtnet_xdp_get_sq(vi); if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { ret = -EINVAL; - drops = n; goto out; } @@ -543,13 +610,11 @@ static int virtnet_xdp_xmit(struct net_device *dev, for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; - err = __virtnet_xdp_xmit_one(vi, sq, xdpf); - if (err) { - xdp_return_frame_rx_napi(xdpf); - drops++; - } + if (__virtnet_xdp_xmit_one(vi, sq, xdpf)) + break; + nxmit++; } - ret = n - drops; + ret = nxmit; if (flags & XDP_XMIT_FLUSH) { if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) @@ -560,16 +625,17 @@ out: sq->stats.bytes += bytes; sq->stats.packets += packets; sq->stats.xdp_tx += n; - sq->stats.xdp_tx_drops += drops; + sq->stats.xdp_tx_drops += n - nxmit; sq->stats.kicks += kicks; u64_stats_update_end(&sq->stats.syncp); + virtnet_xdp_put_sq(vi, sq); return ret; } static unsigned int virtnet_get_headroom(struct virtnet_info *vi) { - return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0; + return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0; } /* We copy the packet for XDP in the following cases: @@ -713,7 +779,9 @@ static struct sk_buff *receive_small(struct net_device *dev, if (unlikely(!xdpf)) goto err_xdp; err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); - if (unlikely(err < 0)) { + if (unlikely(!err)) { + xdp_return_frame_rx_napi(xdpf); + } else if (unlikely(err < 0)) { trace_xdp_exception(vi->dev, xdp_prog, act); goto err_xdp; } @@ -776,7 +844,7 @@ static struct sk_buff *receive_big(struct net_device *dev, { struct page *page = buf; struct sk_buff *skb = - page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0); + page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0, 0); stats->bytes += len - vi->hdr_len; if (unlikely(!skb)) @@ -890,7 +958,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, put_page(page); head_skb = page_to_skb(vi, rq, xdp_page, offset, len, PAGE_SIZE, false, - metasize); + metasize, headroom); return head_skb; } break; @@ -900,7 +968,9 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, if (unlikely(!xdpf)) goto err_xdp; err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); - if (unlikely(err < 0)) { + if (unlikely(!err)) { + xdp_return_frame_rx_napi(xdpf); + } else if (unlikely(err < 0)) { trace_xdp_exception(vi->dev, xdp_prog, act); if (unlikely(xdp_page != page)) put_page(xdp_page); @@ -946,7 +1016,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog, - metasize); + metasize, headroom); curr_skb = head_skb; if (unlikely(!curr_skb)) @@ -1462,12 +1532,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget) xdp_do_flush(); if (xdp_xmit & VIRTIO_XDP_TX) { - sq = virtnet_xdp_sq(vi); + sq = virtnet_xdp_get_sq(vi); if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { u64_stats_update_begin(&sq->stats.syncp); sq->stats.kicks++; u64_stats_update_end(&sq->stats.syncp); } + virtnet_xdp_put_sq(vi, sq); } return received; @@ -1985,7 +2056,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi) } virtqueue_set_affinity(vi->rq[i].vq, mask); virtqueue_set_affinity(vi->sq[i].vq, mask); - __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, false); + __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS); cpumask_clear(mask); } @@ -2108,25 +2179,21 @@ static int virtnet_set_channels(struct net_device *dev, static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data) { struct virtnet_info *vi = netdev_priv(dev); - char *p = (char *)data; unsigned int i, j; + u8 *p = data; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < vi->curr_queue_pairs; i++) { - for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { - snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s", - i, virtnet_rq_stats_desc[j].desc); - p += ETH_GSTRING_LEN; - } + for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) + ethtool_sprintf(&p, "rx_queue_%u_%s", i, + virtnet_rq_stats_desc[j].desc); } for (i = 0; i < vi->curr_queue_pairs; i++) { - for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { - snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_%s", - i, virtnet_sq_stats_desc[j].desc); - p += ETH_GSTRING_LEN; - } + for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) + ethtool_sprintf(&p, "tx_queue_%u_%s", i, + virtnet_sq_stats_desc[j].desc); } break; } @@ -2422,10 +2489,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, /* XDP requires extra queues for XDP_TX */ if (curr_qp + xdp_qp > vi->max_queue_pairs) { - NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available"); - netdev_warn(dev, "request %i queues but max is %i\n", + netdev_warn(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n", curr_qp + xdp_qp, vi->max_queue_pairs); - return -ENOMEM; + xdp_qp = 0; } old_prog = rtnl_dereference(vi->rq[0].xdp_prog); @@ -2459,11 +2525,14 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, vi->xdp_queue_pairs = xdp_qp; if (prog) { + vi->xdp_enabled = true; for (i = 0; i < vi->max_queue_pairs; i++) { rcu_assign_pointer(vi->rq[i].xdp_prog, prog); if (i == 0 && !old_prog) virtnet_clear_guest_offloads(vi); } + } else { + vi->xdp_enabled = false; } for (i = 0; i < vi->max_queue_pairs; i++) { @@ -2531,7 +2600,7 @@ static int virtnet_set_features(struct net_device *dev, int err; if ((dev->features ^ features) & NETIF_F_LRO) { - if (vi->xdp_queue_pairs) + if (vi->xdp_enabled) return -EBUSY; if (features & NETIF_F_LRO) @@ -2977,7 +3046,8 @@ static int virtnet_probe(struct virtio_device *vdev) return -ENOMEM; /* Set up network device as normal. */ - dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; + dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE | + IFF_TX_SKB_NO_LINEAR; dev->netdev_ops = &virtnet_netdev; dev->features = NETIF_F_HIGHDMA; diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 7ec8652f2c26..c0bd9cbc43b1 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -218,43 +218,28 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) static void vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) { - struct vmxnet3_adapter *adapter = netdev_priv(netdev); - if (stringset == ETH_SS_STATS) { - int i, j; - for (j = 0; j < adapter->num_tx_queues; j++) { - for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { - memcpy(buf, vmxnet3_tq_dev_stats[i].desc, - ETH_GSTRING_LEN); - buf += ETH_GSTRING_LEN; - } - for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); - i++) { - memcpy(buf, vmxnet3_tq_driver_stats[i].desc, - ETH_GSTRING_LEN); - buf += ETH_GSTRING_LEN; - } - } + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + int i, j; - for (j = 0; j < adapter->num_rx_queues; j++) { - for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { - memcpy(buf, vmxnet3_rq_dev_stats[i].desc, - ETH_GSTRING_LEN); - buf += ETH_GSTRING_LEN; - } - for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); - i++) { - memcpy(buf, vmxnet3_rq_driver_stats[i].desc, - ETH_GSTRING_LEN); - buf += ETH_GSTRING_LEN; - } - } + if (stringset != ETH_SS_STATS) + return; - for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { - memcpy(buf, vmxnet3_global_stats[i].desc, - ETH_GSTRING_LEN); - buf += ETH_GSTRING_LEN; - } + for (j = 0; j < adapter->num_tx_queues; j++) { + for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) + ethtool_sprintf(&buf, vmxnet3_tq_dev_stats[i].desc); + for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) + ethtool_sprintf(&buf, vmxnet3_tq_driver_stats[i].desc); + } + + for (j = 0; j < adapter->num_rx_queues; j++) { + for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) + ethtool_sprintf(&buf, vmxnet3_rq_dev_stats[i].desc); + for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) + ethtool_sprintf(&buf, vmxnet3_rq_driver_stats[i].desc); } + + for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) + ethtool_sprintf(&buf, vmxnet3_global_stats[i].desc); } netdev_features_t vmxnet3_fix_features(struct net_device *netdev, diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 53dbc67e8a34..02a14f1b938a 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -3494,6 +3494,7 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6, if (err < 0) return ERR_PTR(err); + udp_allow_gso(sock->sk); return sock; } @@ -3713,6 +3714,7 @@ static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf, #if IS_ENABLED(CONFIG_IPV6) if (use_ipv6) { struct inet6_dev *idev = __in6_dev_get(lowerdev); + if (idev && idev->cnf.disable_ipv6) { NL_SET_ERR_MSG(extack, "IPv6 support disabled by administrator"); diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index 686a25d3b512..5de71e44fc5a 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -573,7 +573,7 @@ static DECLARE_TASKLET(fst_tx_task, fst_process_tx_work_q); static DECLARE_TASKLET(fst_int_task, fst_process_int_work_q); static struct fst_card_info *fst_card_array[FST_MAX_CARDS]; -static spinlock_t fst_work_q_lock; +static DEFINE_SPINLOCK(fst_work_q_lock); static u64 fst_work_txq; static u64 fst_work_intq; @@ -2648,7 +2648,6 @@ fst_init(void) for (i = 0; i < FST_MAX_CARDS; i++) fst_card_array[i] = NULL; - spin_lock_init(&fst_work_q_lock); return pci_register_driver(&fst_driver); } diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index 5a6a945f6c81..ba8c36c7ea91 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -25,6 +25,8 @@ struct x25_state { x25_hdlc_proto settings; bool up; spinlock_t up_lock; /* Protects "up" */ + struct sk_buff_head rx_queue; + struct tasklet_struct rx_tasklet; }; static int x25_ioctl(struct net_device *dev, struct ifreq *ifr); @@ -34,14 +36,27 @@ static struct x25_state *state(hdlc_device *hdlc) return hdlc->state; } +static void x25_rx_queue_kick(struct tasklet_struct *t) +{ + struct x25_state *x25st = from_tasklet(x25st, t, rx_tasklet); + struct sk_buff *skb = skb_dequeue(&x25st->rx_queue); + + while (skb) { + netif_receive_skb_core(skb); + skb = skb_dequeue(&x25st->rx_queue); + } +} + /* These functions are callbacks called by LAPB layer */ static void x25_connect_disconnect(struct net_device *dev, int reason, int code) { + struct x25_state *x25st = state(dev_to_hdlc(dev)); struct sk_buff *skb; unsigned char *ptr; - if ((skb = dev_alloc_skb(1)) == NULL) { + skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC); + if (!skb) { netdev_err(dev, "out of memory\n"); return; } @@ -50,7 +65,9 @@ static void x25_connect_disconnect(struct net_device *dev, int reason, int code) *ptr = code; skb->protocol = x25_type_trans(skb, dev); - netif_rx(skb); + + skb_queue_tail(&x25st->rx_queue, skb); + tasklet_schedule(&x25st->rx_tasklet); } @@ -71,6 +88,7 @@ static void x25_disconnected(struct net_device *dev, int reason) static int x25_data_indication(struct net_device *dev, struct sk_buff *skb) { + struct x25_state *x25st = state(dev_to_hdlc(dev)); unsigned char *ptr; if (skb_cow(skb, 1)) { @@ -84,7 +102,10 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb) *ptr = X25_IFACE_DATA; skb->protocol = x25_type_trans(skb, dev); - return netif_rx(skb); + + skb_queue_tail(&x25st->rx_queue, skb); + tasklet_schedule(&x25st->rx_tasklet); + return NET_RX_SUCCESS; } @@ -223,6 +244,7 @@ static void x25_close(struct net_device *dev) spin_unlock_bh(&x25st->up_lock); lapb_unregister(dev); + tasklet_kill(&x25st->rx_tasklet); } @@ -338,6 +360,8 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr) memcpy(&state(hdlc)->settings, &new_settings, size); state(hdlc)->up = false; spin_lock_init(&state(hdlc)->up_lock); + skb_queue_head_init(&state(hdlc)->rx_queue); + tasklet_setup(&state(hdlc)->rx_tasklet, x25_rx_queue_kick); /* There's no header_ops so hard_header_len should be 0. */ dev->hard_header_len = 0; diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index c3372498f4f1..59646865a3a4 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -51,6 +51,10 @@ struct lapbethdev { struct list_head node; struct net_device *ethdev; /* link to ethernet device */ struct net_device *axdev; /* lapbeth device (lapb#) */ + bool up; + spinlock_t up_lock; /* Protects "up" */ + struct sk_buff_head rx_queue; + struct napi_struct napi; }; static LIST_HEAD(lapbeth_devices); @@ -81,6 +85,26 @@ static __inline__ int dev_is_ethdev(struct net_device *dev) /* ------------------------------------------------------------------------ */ +static int lapbeth_napi_poll(struct napi_struct *napi, int budget) +{ + struct lapbethdev *lapbeth = container_of(napi, struct lapbethdev, + napi); + struct sk_buff *skb; + int processed = 0; + + for (; processed < budget; ++processed) { + skb = skb_dequeue(&lapbeth->rx_queue); + if (!skb) + break; + netif_receive_skb_core(skb); + } + + if (processed < budget) + napi_complete(napi); + + return processed; +} + /* * Receive a LAPB frame via an ethernet interface. */ @@ -101,8 +125,9 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe rcu_read_lock(); lapbeth = lapbeth_get_x25_dev(dev); if (!lapbeth) - goto drop_unlock; - if (!netif_running(lapbeth->axdev)) + goto drop_unlock_rcu; + spin_lock_bh(&lapbeth->up_lock); + if (!lapbeth->up) goto drop_unlock; len = skb->data[0] + skb->data[1] * 256; @@ -117,11 +142,14 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe goto drop_unlock; } out: + spin_unlock_bh(&lapbeth->up_lock); rcu_read_unlock(); return 0; drop_unlock: kfree_skb(skb); goto out; +drop_unlock_rcu: + rcu_read_unlock(); drop: kfree_skb(skb); return 0; @@ -129,6 +157,7 @@ drop: static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb) { + struct lapbethdev *lapbeth = netdev_priv(dev); unsigned char *ptr; if (skb_cow(skb, 1)) { @@ -142,7 +171,10 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb) *ptr = X25_IFACE_DATA; skb->protocol = x25_type_trans(skb, dev); - return netif_rx(skb); + + skb_queue_tail(&lapbeth->rx_queue, skb); + napi_schedule(&lapbeth->napi); + return NET_RX_SUCCESS; } /* @@ -151,13 +183,11 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb) static netdev_tx_t lapbeth_xmit(struct sk_buff *skb, struct net_device *dev) { + struct lapbethdev *lapbeth = netdev_priv(dev); int err; - /* - * Just to be *really* sure not to send anything if the interface - * is down, the ethernet device may have gone. - */ - if (!netif_running(dev)) + spin_lock_bh(&lapbeth->up_lock); + if (!lapbeth->up) goto drop; /* There should be a pseudo header of 1 byte added by upper layers. @@ -194,6 +224,7 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb, goto drop; } out: + spin_unlock_bh(&lapbeth->up_lock); return NETDEV_TX_OK; drop: kfree_skb(skb); @@ -228,8 +259,9 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb) static void lapbeth_connected(struct net_device *dev, int reason) { + struct lapbethdev *lapbeth = netdev_priv(dev); unsigned char *ptr; - struct sk_buff *skb = dev_alloc_skb(1); + struct sk_buff *skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC); if (!skb) { pr_err("out of memory\n"); @@ -240,13 +272,16 @@ static void lapbeth_connected(struct net_device *dev, int reason) *ptr = X25_IFACE_CONNECT; skb->protocol = x25_type_trans(skb, dev); - netif_rx(skb); + + skb_queue_tail(&lapbeth->rx_queue, skb); + napi_schedule(&lapbeth->napi); } static void lapbeth_disconnected(struct net_device *dev, int reason) { + struct lapbethdev *lapbeth = netdev_priv(dev); unsigned char *ptr; - struct sk_buff *skb = dev_alloc_skb(1); + struct sk_buff *skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC); if (!skb) { pr_err("out of memory\n"); @@ -257,7 +292,9 @@ static void lapbeth_disconnected(struct net_device *dev, int reason) *ptr = X25_IFACE_DISCONNECT; skb->protocol = x25_type_trans(skb, dev); - netif_rx(skb); + + skb_queue_tail(&lapbeth->rx_queue, skb); + napi_schedule(&lapbeth->napi); } /* @@ -285,23 +322,37 @@ static const struct lapb_register_struct lapbeth_callbacks = { */ static int lapbeth_open(struct net_device *dev) { + struct lapbethdev *lapbeth = netdev_priv(dev); int err; + napi_enable(&lapbeth->napi); + if ((err = lapb_register(dev, &lapbeth_callbacks)) != LAPB_OK) { pr_err("lapb_register error: %d\n", err); return -ENODEV; } + spin_lock_bh(&lapbeth->up_lock); + lapbeth->up = true; + spin_unlock_bh(&lapbeth->up_lock); + return 0; } static int lapbeth_close(struct net_device *dev) { + struct lapbethdev *lapbeth = netdev_priv(dev); int err; + spin_lock_bh(&lapbeth->up_lock); + lapbeth->up = false; + spin_unlock_bh(&lapbeth->up_lock); + if ((err = lapb_unregister(dev)) != LAPB_OK) pr_err("lapb_unregister error: %d\n", err); + napi_disable(&lapbeth->napi); + return 0; } @@ -356,6 +407,12 @@ static int lapbeth_new_device(struct net_device *dev) dev_hold(dev); lapbeth->ethdev = dev; + lapbeth->up = false; + spin_lock_init(&lapbeth->up_lock); + + skb_queue_head_init(&lapbeth->rx_queue); + netif_napi_add(ndev, &lapbeth->napi, lapbeth_napi_poll, 16); + rc = -EIO; if (register_netdevice(ndev)) goto fail; @@ -403,8 +460,8 @@ static int lapbeth_device_event(struct notifier_block *this, if (lapbeth_get_x25_dev(dev) == NULL) lapbeth_new_device(dev); break; - case NETDEV_DOWN: - /* ethernet device closed -> close LAPB interface */ + case NETDEV_GOING_DOWN: + /* ethernet device closes -> close LAPB interface */ lapbeth = lapbeth_get_x25_dev(dev); if (lapbeth) dev_close(lapbeth->axdev); diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h index 1081d171e477..462cb620bc5d 100644 --- a/drivers/net/wan/z85230.h +++ b/drivers/net/wan/z85230.h @@ -327,45 +327,6 @@ struct z8530_channel void *private; /* For our owner */ struct net_device *netdevice; /* Network layer device */ - /* - * Async features - */ - - struct tty_struct *tty; /* Attached terminal */ - int line; /* Minor number */ - wait_queue_head_t open_wait; /* Tasks waiting to open */ - wait_queue_head_t close_wait; /* and for close to end */ - unsigned long event; /* Pending events */ - int fdcount; /* # of fd on device */ - int blocked_open; /* # of blocked opens */ - int x_char; /* XON/XOF char */ - unsigned char *xmit_buf; /* Transmit pointer */ - int xmit_head; /* Transmit ring */ - int xmit_tail; - int xmit_cnt; - int flags; - int timeout; - int xmit_fifo_size; /* Transmit FIFO info */ - - int close_delay; /* Do we wait for drain on close ? */ - unsigned short closing_wait; - - /* We need to know the current clock divisor - * to read the bps rate the chip has currently - * loaded. - */ - - unsigned char clk_divisor; /* May be 1, 16, 32, or 64 */ - int zs_baud; - - int magic; - int baud_base; /* Baud parameters */ - int custom_divisor; - - - unsigned char tx_active; /* character is being xmitted */ - unsigned char tx_stopped; /* output is suspended */ - spinlock_t *lock; /* Device lock */ }; diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index 0a37be6a7d33..fab398046a3f 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -669,7 +669,7 @@ static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep, ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx status %d eid %d req count %d count %d len %d\n", - ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, bundle_skb->len); + ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, skb_len); return ret; } diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index d66593f0950f..ea00fbb15601 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -1759,17 +1759,11 @@ err_core_destroy: return ret; } -static int ath10k_snoc_remove(struct platform_device *pdev) +static int ath10k_snoc_free_resources(struct ath10k *ar) { - struct ath10k *ar = platform_get_drvdata(pdev); struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); - ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n"); - - reinit_completion(&ar->driver_recovery); - - if (test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags)) - wait_for_completion_timeout(&ar->driver_recovery, 3 * HZ); + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc free resources\n"); set_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags); @@ -1783,12 +1777,29 @@ static int ath10k_snoc_remove(struct platform_device *pdev) return 0; } +static int ath10k_snoc_remove(struct platform_device *pdev) +{ + struct ath10k *ar = platform_get_drvdata(pdev); + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n"); + + reinit_completion(&ar->driver_recovery); + + if (test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags)) + wait_for_completion_timeout(&ar->driver_recovery, 3 * HZ); + + ath10k_snoc_free_resources(ar); + + return 0; +} + static void ath10k_snoc_shutdown(struct platform_device *pdev) { struct ath10k *ar = platform_get_drvdata(pdev); ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc shutdown\n"); - ath10k_snoc_remove(pdev); + ath10k_snoc_free_resources(ar); } static struct platform_driver ath10k_snoc_driver = { diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index d97b33f789e4..7efbe03fbca8 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -592,6 +592,9 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb) GFP_ATOMIC ); break; + default: + kfree(tb); + return; } exit: diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index d4ef45cd0685..8c9c781afc3e 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -373,7 +373,7 @@ static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab) cfg->tgt_ce = ab->hw_params.target_ce_config; cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len; cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map; - ab->qmi.service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074; + ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id; } static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab) diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c index 987c65010272..de8b632b058c 100644 --- a/drivers/net/wireless/ath/ath11k/ce.c +++ b/drivers/net/wireless/ath/ath11k/ce.c @@ -187,6 +187,59 @@ const struct ce_attr ath11k_host_ce_config_qca6390[] = { }; +const struct ce_attr ath11k_host_ce_config_qcn9074[] = { + /* CE0: host->target HTC control and raw streams */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 16, + .src_sz_max = 2048, + .dest_nentries = 0, + }, + + /* CE1: target->host HTT + HTC control */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath11k_htc_rx_completion_handler, + }, + + /* CE2: target->host WMI */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 32, + .recv_cb = ath11k_htc_rx_completion_handler, + }, + + /* CE3: host->target WMI (mac0) */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 32, + .src_sz_max = 2048, + .dest_nentries = 0, + }, + + /* CE4: host->target HTT */ + { + .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, + .src_nentries = 2048, + .src_sz_max = 256, + .dest_nentries = 0, + }, + + /* CE5: target->host pktlog */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler, + }, +}; + static bool ath11k_ce_need_shadow_fix(int ce_id) { /* only ce4 needs shadow workaroud*/ @@ -455,7 +508,7 @@ static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_ struct hal_srng_params *ring_params) { u32 msi_data_start; - u32 msi_data_count; + u32 msi_data_count, msi_data_idx; u32 msi_irq_start; u32 addr_lo; u32 addr_hi; @@ -469,10 +522,11 @@ static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_ return; ath11k_get_msi_address(ab, &addr_lo, &addr_hi); + ath11k_get_ce_msi_idx(ab, ce_id, &msi_data_idx); ring_params->msi_addr = addr_lo; ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32); - ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start; + ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start; ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR; } diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h index d6eeef919349..713f766cac22 100644 --- a/drivers/net/wireless/ath/ath11k/ce.h +++ b/drivers/net/wireless/ath/ath11k/ce.h @@ -173,6 +173,7 @@ struct ath11k_ce { extern const struct ce_attr ath11k_host_ce_config_ipq8074[]; extern const struct ce_attr ath11k_host_ce_config_qca6390[]; +extern const struct ce_attr ath11k_host_ce_config_qcn9074[]; void ath11k_ce_cleanup_pipes(struct ath11k_base *ab); void ath11k_ce_rx_replenish_retry(struct timer_list *t); diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 350b7913622c..77ce3347ab86 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -45,6 +45,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .ring_mask = &ath11k_hw_ring_mask_ipq8074, .internal_sleep_clock = false, .regs = &ipq8074_regs, + .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074, .host_ce_config = ath11k_host_ce_config_ipq8074, .ce_count = 12, .target_ce_config = ath11k_target_ce_config_wlan_ipq8074, @@ -68,6 +69,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .idle_ps = false, .cold_boot_calib = true, .supports_suspend = false, + .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074), }, { .hw_rev = ATH11K_HW_IPQ6018_HW10, @@ -83,6 +85,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .ring_mask = &ath11k_hw_ring_mask_ipq8074, .internal_sleep_clock = false, .regs = &ipq8074_regs, + .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074, .host_ce_config = ath11k_host_ce_config_ipq8074, .ce_count = 12, .target_ce_config = ath11k_target_ce_config_wlan_ipq8074, @@ -106,6 +109,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .idle_ps = false, .cold_boot_calib = true, .supports_suspend = false, + .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074), }, { .name = "qca6390 hw2.0", @@ -121,6 +125,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .ring_mask = &ath11k_hw_ring_mask_qca6390, .internal_sleep_clock = true, .regs = &qca6390_regs, + .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390, .host_ce_config = ath11k_host_ce_config_qca6390, .ce_count = 9, .target_ce_config = ath11k_target_ce_config_wlan_qca6390, @@ -143,6 +148,44 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .idle_ps = true, .cold_boot_calib = false, .supports_suspend = true, + .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074), + }, + { + .name = "qcn9074 hw1.0", + .hw_rev = ATH11K_HW_QCN9074_HW10, + .fw = { + .dir = "QCN9074/hw1.0", + .board_size = 256 * 1024, + .cal_size = 256 * 1024, + }, + .max_radios = 1, + .single_pdev_only = false, + .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9074, + .hw_ops = &qcn9074_ops, + .ring_mask = &ath11k_hw_ring_mask_qcn9074, + .internal_sleep_clock = false, + .regs = &qcn9074_regs, + .host_ce_config = ath11k_host_ce_config_qcn9074, + .ce_count = 6, + .target_ce_config = ath11k_target_ce_config_wlan_qcn9074, + .target_ce_count = 9, + .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qcn9074, + .svc_to_ce_map_len = 18, + .rxdma1_enable = true, + .num_rxmda_per_pdev = 1, + .rx_mac_buf_ring = false, + .vdev_start_delay = false, + .htt_peer_map_v2 = true, + .tcl_0_only = false, + .interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT), + .supports_monitor = true, + .supports_shadow_regs = false, + .idle_ps = false, + .cold_boot_calib = false, + .supports_suspend = false, + .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074), }, }; @@ -974,7 +1017,7 @@ static int ath11k_init_hw_params(struct ath11k_base *ab) ab->hw_params = *hw_params; - ath11k_dbg(ab, ATH11K_DBG_BOOT, "Hardware name %s\n", ab->hw_params.name); + ath11k_info(ab, "%s\n", ab->hw_params.name); return 0; } diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 8d29845774df..55af982deca7 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -34,6 +34,7 @@ #define ATH11K_PRB_RSP_DROP_THRESHOLD ((ATH11K_TX_MGMT_TARGET_MAX_SUPPORT_WMI * 3) / 4) #define ATH11K_INVALID_HW_MAC_ID 0xFF +#define ATH11K_CONNECTION_LOSS_HZ (3 * HZ) extern unsigned int ath11k_frame_mode; @@ -105,6 +106,7 @@ enum ath11k_hw_rev { ATH11K_HW_IPQ8074, ATH11K_HW_QCA6390_HW20, ATH11K_HW_IPQ6018_HW10, + ATH11K_HW_QCN9074_HW10, }; enum ath11k_firmware_mode { @@ -234,6 +236,7 @@ struct ath11k_vif { u32 aid; u8 bssid[ETH_ALEN]; struct cfg80211_bitrate_mask bitrate_mask; + struct delayed_work connection_loss_work; int num_legacy_stations; int rtscts_prot_mode; int txpower; @@ -607,6 +610,7 @@ struct ath11k_bus_params { bool m3_fw_support; bool fixed_bdf_addr; bool fixed_mem_region; + bool static_window_map; }; /* IPQ8074 HW channel counters frequency value in hertz */ @@ -876,6 +880,8 @@ extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018 extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qca6390[]; extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[]; +extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qcn9074[]; +extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qcn9074[]; int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab); int ath11k_core_pre_init(struct ath11k_base *ab); int ath11k_core_init(struct ath11k_base *ath11k); diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c index e13684343ec3..ec93f14e6d2a 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c +++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c @@ -3851,7 +3851,7 @@ htt_print_pdev_obss_pd_stats_tlv_v(const void *tag_buf, htt_stats_buf->num_non_srg_ppdu_tried); len += HTT_DBG_OUT(buf + len, buf_len - len, "Non-SRG success PPDU = %u\n", htt_stats_buf->num_non_srg_ppdu_success); - len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG Opportunies = %u\n", + len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG Opportunities = %u\n", htt_stats_buf->num_srg_opportunities); len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG tried PPDU = %u\n", htt_stats_buf->num_srg_ppdu_tried); diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index 850ad38b888f..1d9aa1bb6b6e 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -20,95 +20,102 @@ #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) -static u8 *ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc *desc) +static u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc) { - return desc->hdr_status; + return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc); } -static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_desc *desc) +static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - if (!(__le32_to_cpu(desc->mpdu_start.info1) & - RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID)) + if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc)) return HAL_ENCRYPT_TYPE_OPEN; - return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE, - __le32_to_cpu(desc->mpdu_start.info2)); + return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc); } -static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct hal_rx_desc *desc) +static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT, - __le32_to_cpu(desc->msdu_start.info2)); + return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc); } -static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct hal_rx_desc *desc) +static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT, - __le32_to_cpu(desc->msdu_start.info2)); + return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc); } -static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct hal_rx_desc *desc) +static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID, - __le32_to_cpu(desc->mpdu_start.info1)); + return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc); } -static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct hal_rx_desc *desc) +static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID, - __le32_to_cpu(desc->mpdu_start.info1)); + return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc); } -static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct sk_buff *skb) +static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab, + struct sk_buff *skb) { struct ieee80211_hdr *hdr; - hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); + hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz); return ieee80211_has_morefrags(hdr->frame_control); } -static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct sk_buff *skb) +static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab, + struct sk_buff *skb) { struct ieee80211_hdr *hdr; - hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); + hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz); return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; } -static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct hal_rx_desc *desc) +static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM, - __le32_to_cpu(desc->mpdu_start.info1)); + return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc); } -static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc) +static void *ath11k_dp_rx_get_attention(struct ath11k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params.hw_ops->rx_desc_get_attention(desc); +} + +static bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn) { return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, - __le32_to_cpu(desc->attention.info2)); + __le32_to_cpu(attn->info2)); } -static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc *desc) +static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn) { return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL, - __le32_to_cpu(desc->attention.info1)); + __le32_to_cpu(attn->info1)); } -static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct hal_rx_desc *desc) +static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn) { return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL, - __le32_to_cpu(desc->attention.info1)); + __le32_to_cpu(attn->info1)); } -static bool ath11k_dp_rx_h_attn_is_decrypted(struct hal_rx_desc *desc) +static bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn) { return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE, - __le32_to_cpu(desc->attention.info2)) == + __le32_to_cpu(attn->info2)) == RX_DESC_DECRYPT_STATUS_CODE_OK); } -static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc) +static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn) { - u32 info = __le32_to_cpu(desc->attention.info1); + u32 info = __le32_to_cpu(attn->info1); u32 errmap = 0; if (info & RX_ATTENTION_INFO1_FCS_ERR) @@ -135,131 +142,122 @@ static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc) return errmap; } -static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct hal_rx_desc *desc) +static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH, - __le32_to_cpu(desc->msdu_start.info1)); + return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc); } -static u8 ath11k_dp_rx_h_msdu_start_sgi(struct hal_rx_desc *desc) +static u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return FIELD_GET(RX_MSDU_START_INFO3_SGI, - __le32_to_cpu(desc->msdu_start.info3)); + return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc); } -static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct hal_rx_desc *desc) +static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS, - __le32_to_cpu(desc->msdu_start.info3)); + return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc); } -static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct hal_rx_desc *desc) +static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW, - __le32_to_cpu(desc->msdu_start.info3)); + return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc); } -static u32 ath11k_dp_rx_h_msdu_start_freq(struct hal_rx_desc *desc) +static u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return __le32_to_cpu(desc->msdu_start.phy_meta_data); + return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc); } -static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct hal_rx_desc *desc) +static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE, - __le32_to_cpu(desc->msdu_start.info3)); + return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc); } -static u8 ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc *desc) +static u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - u8 mimo_ss_bitmap = FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP, - __le32_to_cpu(desc->msdu_start.info3)); - - return hweight8(mimo_ss_bitmap); + return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc)); } -static u8 ath11k_dp_rx_h_mpdu_start_tid(struct hal_rx_desc *desc) +static u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return FIELD_GET(RX_MPDU_START_INFO2_TID, - __le32_to_cpu(desc->mpdu_start.info2)); + return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc); } -static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct hal_rx_desc *desc) +static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return __le16_to_cpu(desc->mpdu_start.sw_peer_id); + return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc); } -static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc *desc) +static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING, - __le32_to_cpu(desc->msdu_end.info2)); + return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc); } -static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct hal_rx_desc *desc) +static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU, - __le32_to_cpu(desc->msdu_end.info2)); + return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc); } -static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct hal_rx_desc *desc) +static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU, - __le32_to_cpu(desc->msdu_end.info2)); + return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc); } -static void ath11k_dp_rx_desc_end_tlv_copy(struct hal_rx_desc *fdesc, +static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab, + struct hal_rx_desc *fdesc, struct hal_rx_desc *ldesc) { - memcpy((u8 *)&fdesc->msdu_end, (u8 *)&ldesc->msdu_end, - sizeof(struct rx_msdu_end)); - memcpy((u8 *)&fdesc->attention, (u8 *)&ldesc->attention, - sizeof(struct rx_attention)); - memcpy((u8 *)&fdesc->mpdu_end, (u8 *)&ldesc->mpdu_end, - sizeof(struct rx_mpdu_end)); + ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc); } -static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct hal_rx_desc *rx_desc) +static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn) { - struct rx_attention *rx_attn; - - rx_attn = &rx_desc->attention; - return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR, - __le32_to_cpu(rx_attn->info1)); + __le32_to_cpu(attn->info1)); } -static u32 ath11k_dp_rxdesc_get_decap_format(struct hal_rx_desc *rx_desc) -{ - struct rx_msdu_start *rx_msdu_start; - - rx_msdu_start = &rx_desc->msdu_start; - - return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT, - __le32_to_cpu(rx_msdu_start->info2)); -} - -static u8 *ath11k_dp_rxdesc_get_80211hdr(struct hal_rx_desc *rx_desc) +static u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab, + struct hal_rx_desc *rx_desc) { u8 *rx_pkt_hdr; - rx_pkt_hdr = &rx_desc->msdu_payload[0]; + rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc); return rx_pkt_hdr; } -static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc) +static bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab, + struct hal_rx_desc *rx_desc) { u32 tlv_tag; - tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, - __le32_to_cpu(rx_desc->mpdu_start_tag)); + tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc); return tlv_tag == HAL_RX_MPDU_START; } -static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc) +static u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab, + struct hal_rx_desc *rx_desc) +{ + return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc); +} + +static void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab, + struct hal_rx_desc *desc, + u16 len) { - return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id); + ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len); } static void ath11k_dp_service_mon_ring(struct timer_list *t) @@ -1722,19 +1720,19 @@ static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, struct sk_buff *first, struct sk_buff *last, u8 l3pad_bytes, int msdu_len) { + struct ath11k_base *ab = ar->ab; struct sk_buff *skb; struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); int buf_first_hdr_len, buf_first_len; struct hal_rx_desc *ldesc; - int space_extra; - int rem_len; - int buf_len; + int space_extra, rem_len, buf_len; + u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; /* As the msdu is spread across multiple rx buffers, * find the offset to the start of msdu for computing * the length of the msdu in the first buffer. */ - buf_first_hdr_len = HAL_RX_DESC_SIZE + l3pad_bytes; + buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes; buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { @@ -1744,8 +1742,8 @@ static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, } ldesc = (struct hal_rx_desc *)last->data; - rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ldesc); - rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ldesc); + rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc); + rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc); /* MSDU spans over multiple buffers because the length of the MSDU * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data @@ -1757,7 +1755,7 @@ static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, /* When an MSDU spread over multiple buffers attention, MSDU_END and * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs. */ - ath11k_dp_rx_desc_end_tlv_copy(rxcb->rx_desc, ldesc); + ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc); space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); if (space_extra > 0 && @@ -1778,18 +1776,18 @@ static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { rxcb = ATH11K_SKB_RXCB(skb); if (rxcb->is_continuation) - buf_len = DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE; + buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz; else buf_len = rem_len; - if (buf_len > (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)) { + if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) { WARN_ON_ONCE(1); dev_kfree_skb_any(skb); return -EINVAL; } - skb_put(skb, buf_len + HAL_RX_DESC_SIZE); - skb_pull(skb, HAL_RX_DESC_SIZE); + skb_put(skb, buf_len + hal_rx_desc_sz); + skb_pull(skb, hal_rx_desc_sz); skb_copy_from_linear_data(skb, skb_put(first, buf_len), buf_len); dev_kfree_skb_any(skb); @@ -1820,13 +1818,15 @@ static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_ return NULL; } -static void ath11k_dp_rx_h_csum_offload(struct sk_buff *msdu) +static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu) { struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); + struct rx_attention *rx_attention; bool ip_csum_fail, l4_csum_fail; - ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rxcb->rx_desc); - l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rxcb->rx_desc); + rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc); + ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention); + l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention); msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ? CHECKSUM_NONE : CHECKSUM_UNNECESSARY; @@ -1957,7 +1957,7 @@ static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar, qos_ctl = rxcb->tid; - if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(rxcb->rx_desc)) + if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc)) qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; /* TODO Add other QoS ctl fields when required */ @@ -2061,7 +2061,7 @@ static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar, bool is_amsdu; is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu); - hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rxcb->rx_desc); + hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc); rfc1042 = hdr; if (rxcb->is_first_msdu) { @@ -2134,8 +2134,8 @@ static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu, u8 *first_hdr; u8 decap; - first_hdr = ath11k_dp_rx_h_80211_hdr(rx_desc); - decap = ath11k_dp_rx_h_msdu_start_decap_type(rx_desc); + first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc); + decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc); switch (decap) { case DP_RX_DECAP_TYPE_NATIVE_WIFI: @@ -2167,6 +2167,7 @@ static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, bool is_decrypted = false; struct ieee80211_hdr *hdr; struct ath11k_peer *peer; + struct rx_attention *rx_attention; u32 err_bitmap; hdr = (struct ieee80211_hdr *)msdu->data; @@ -2188,9 +2189,10 @@ static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, } spin_unlock_bh(&ar->ab->base_lock); - err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc); + rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc); + err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention); if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap) - is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); + is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention); /* Clear per-MPDU flags while leaving per-PPDU flags intact */ rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | @@ -2215,7 +2217,7 @@ static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, RX_FLAG_PN_VALIDATED; } - ath11k_dp_rx_h_csum_offload(msdu); + ath11k_dp_rx_h_csum_offload(ar, msdu); ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, enctype, rx_status, is_decrypted); @@ -2236,11 +2238,11 @@ static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc, u8 sgi; bool is_cck; - pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(rx_desc); - bw = ath11k_dp_rx_h_msdu_start_rx_bw(rx_desc); - rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(rx_desc); - nss = ath11k_dp_rx_h_msdu_start_nss(rx_desc); - sgi = ath11k_dp_rx_h_msdu_start_sgi(rx_desc); + pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc); + bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc); + rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc); + nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc); + sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc); switch (pkt_type) { case RX_MSDU_START_PKT_TYPE_11A: @@ -2297,7 +2299,7 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, struct ieee80211_rx_status *rx_status) { u8 channel_num; - u32 center_freq; + u32 center_freq, meta_data; struct ieee80211_channel *channel; rx_status->freq = 0; @@ -2308,8 +2310,9 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; - channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc); - center_freq = ath11k_dp_rx_h_msdu_start_freq(rx_desc) >> 16; + meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc); + channel_num = meta_data; + center_freq = meta_data >> 16; if (center_freq >= 5935 && center_freq <= 7105) { rx_status->band = NL80211_BAND_6GHZ; @@ -2409,7 +2412,9 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar, struct sk_buff *msdu, struct sk_buff_head *msdu_list) { + struct ath11k_base *ab = ar->ab; struct hal_rx_desc *rx_desc, *lrx_desc; + struct rx_attention *rx_attention; struct ieee80211_rx_status rx_status = {0}; struct ieee80211_rx_status *status; struct ath11k_skb_rxcb *rxcb; @@ -2419,10 +2424,11 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar, u8 *hdr_status; u16 msdu_len; int ret; + u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu); if (!last_buf) { - ath11k_warn(ar->ab, + ath11k_warn(ab, "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n"); ret = -EIO; goto free_out; @@ -2430,38 +2436,39 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar, rx_desc = (struct hal_rx_desc *)msdu->data; lrx_desc = (struct hal_rx_desc *)last_buf->data; - if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) { - ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n"); + rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc); + if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) { + ath11k_warn(ab, "msdu_done bit in attention is not set\n"); ret = -EIO; goto free_out; } rxcb = ATH11K_SKB_RXCB(msdu); rxcb->rx_desc = rx_desc; - msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); - l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc); + msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc); + l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc); if (rxcb->is_frag) { - skb_pull(msdu, HAL_RX_DESC_SIZE); + skb_pull(msdu, hal_rx_desc_sz); } else if (!rxcb->is_continuation) { - if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) { - hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc); + if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { + hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc); ret = -EINVAL; - ath11k_warn(ar->ab, "invalid msdu len %u\n", msdu_len); - ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status, + ath11k_warn(ab, "invalid msdu len %u\n", msdu_len); + ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status, sizeof(struct ieee80211_hdr)); - ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc, + ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc, sizeof(struct hal_rx_desc)); goto free_out; } - skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len); - skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes); + skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len); + skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes); } else { ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list, msdu, last_buf, l3_pad_bytes, msdu_len); if (ret) { - ath11k_warn(ar->ab, + ath11k_warn(ab, "failed to coalesce msdu rx buffer%d\n", ret); goto free_out; } @@ -3090,16 +3097,17 @@ static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer u8 mic[IEEE80211_CCMP_MIC_LEN]; int head_len, tail_len, ret; size_t data_len; - u32 hdr_len; + u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; u8 *key, *data; u8 key_idx; - if (ath11k_dp_rx_h_mpdu_start_enctype(rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC) + if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) != + HAL_ENCRYPT_TYPE_TKIP_MIC) return 0; - hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE); + hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); hdr_len = ieee80211_hdrlen(hdr->frame_control); - head_len = hdr_len + HAL_RX_DESC_SIZE + IEEE80211_TKIP_IV_LEN; + head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN; tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN; if (!is_multicast_ether_addr(hdr->addr1)) @@ -3125,7 +3133,7 @@ mic_fail: rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; - skb_pull(msdu, HAL_RX_DESC_SIZE); + skb_pull(msdu, hal_rx_desc_sz); ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, @@ -3140,11 +3148,12 @@ static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu, struct ieee80211_hdr *hdr; size_t hdr_len; size_t crypto_len; + u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; if (!flags) return; - hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE); + hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); if (flags & RX_FLAG_MIC_STRIPPED) skb_trim(msdu, msdu->len - @@ -3158,8 +3167,8 @@ static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu, hdr_len = ieee80211_hdrlen(hdr->frame_control); crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); - memmove((void *)msdu->data + HAL_RX_DESC_SIZE + crypto_len, - (void *)msdu->data + HAL_RX_DESC_SIZE, hdr_len); + memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len, + (void *)msdu->data + hal_rx_desc_sz, hdr_len); skb_pull(msdu, crypto_len); } } @@ -3172,11 +3181,12 @@ static int ath11k_dp_rx_h_defrag(struct ath11k *ar, struct hal_rx_desc *rx_desc; struct sk_buff *skb, *first_frag, *last_frag; struct ieee80211_hdr *hdr; + struct rx_attention *rx_attention; enum hal_encrypt_type enctype; bool is_decrypted = false; int msdu_len = 0; int extra_space; - u32 flags; + u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; first_frag = skb_peek(&rx_tid->rx_frags); last_frag = skb_peek_tail(&rx_tid->rx_frags); @@ -3184,11 +3194,13 @@ static int ath11k_dp_rx_h_defrag(struct ath11k *ar, skb_queue_walk(&rx_tid->rx_frags, skb) { flags = 0; rx_desc = (struct hal_rx_desc *)skb->data; - hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); + hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); - enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc); - if (enctype != HAL_ENCRYPT_TYPE_OPEN) - is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); + enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc); + if (enctype != HAL_ENCRYPT_TYPE_OPEN) { + rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc); + is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention); + } if (is_decrypted) { if (skb != first_frag) @@ -3204,7 +3216,7 @@ static int ath11k_dp_rx_h_defrag(struct ath11k *ar, ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags); if (skb != first_frag) - skb_pull(skb, HAL_RX_DESC_SIZE + + skb_pull(skb, hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control)); msdu_len += skb->len; } @@ -3220,7 +3232,7 @@ static int ath11k_dp_rx_h_defrag(struct ath11k *ar, dev_kfree_skb_any(skb); } - hdr = (struct ieee80211_hdr *)(first_frag->data + HAL_RX_DESC_SIZE); + hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz); hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); ATH11K_SKB_RXCB(first_frag)->is_frag = 1; @@ -3246,10 +3258,10 @@ static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_ti struct hal_srng *srng; dma_addr_t paddr; u32 desc_bank, msdu_info, mpdu_info; - u32 dst_idx, cookie; - u32 *msdu_len_offset; + u32 dst_idx, cookie, hal_rx_desc_sz; int ret, buf_id; + hal_rx_desc_sz = ab->hw_params.hal_desc_sz; link_desc_banks = ab->dp.link_desc_banks; reo_dest_ring = rx_tid->dst_ring_desc; @@ -3264,16 +3276,14 @@ static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_ti FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) | FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) | FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH, - defrag_skb->len - HAL_RX_DESC_SIZE) | + defrag_skb->len - hal_rx_desc_sz) | FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) | FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) | FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1); msdu0->rx_msdu_info.info0 = msdu_info; /* change msdu len in hal rx desc */ - msdu_len_offset = (u32 *)&rx_desc->msdu_start; - *msdu_len_offset &= ~(RX_MSDU_START_INFO1_MSDU_LENGTH); - *msdu_len_offset |= defrag_skb->len - HAL_RX_DESC_SIZE; + ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz); paddr = dma_map_single(ab->dev, defrag_skb->data, defrag_skb->len + skb_tailroom(defrag_skb), @@ -3346,24 +3356,26 @@ err_unmap_dma: return ret; } -static int ath11k_dp_rx_h_cmp_frags(struct sk_buff *a, struct sk_buff *b) +static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar, + struct sk_buff *a, struct sk_buff *b) { int frag1, frag2; - frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(a); - frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(b); + frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a); + frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b); return frag1 - frag2; } -static void ath11k_dp_rx_h_sort_frags(struct sk_buff_head *frag_list, +static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar, + struct sk_buff_head *frag_list, struct sk_buff *cur_frag) { struct sk_buff *skb; int cmp; skb_queue_walk(frag_list, skb) { - cmp = ath11k_dp_rx_h_cmp_frags(skb, cur_frag); + cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag); if (cmp < 0) continue; __skb_queue_before(frag_list, skb, cur_frag); @@ -3372,14 +3384,15 @@ static void ath11k_dp_rx_h_sort_frags(struct sk_buff_head *frag_list, __skb_queue_tail(frag_list, cur_frag); } -static u64 ath11k_dp_rx_h_get_pn(struct sk_buff *skb) +static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb) { struct ieee80211_hdr *hdr; u64 pn = 0; u8 *ehdr; + u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; - hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); - ehdr = skb->data + HAL_RX_DESC_SIZE + ieee80211_hdrlen(hdr->frame_control); + hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); + ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control); pn = ehdr[0]; pn |= (u64)ehdr[1] << 8; @@ -3403,19 +3416,19 @@ ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_t first_frag = skb_peek(&rx_tid->rx_frags); desc = (struct hal_rx_desc *)first_frag->data; - encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(desc); + encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc); if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 && encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 && encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 && encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256) return true; - last_pn = ath11k_dp_rx_h_get_pn(first_frag); + last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag); skb_queue_walk(&rx_tid->rx_frags, skb) { if (skb == first_frag) continue; - cur_pn = ath11k_dp_rx_h_get_pn(skb); + cur_pn = ath11k_dp_rx_h_get_pn(ar, skb); if (cur_pn != last_pn + 1) return false; last_pn = cur_pn; @@ -3439,14 +3452,14 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, bool more_frags; rx_desc = (struct hal_rx_desc *)msdu->data; - peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(rx_desc); - tid = ath11k_dp_rx_h_mpdu_start_tid(rx_desc); - seqno = ath11k_dp_rx_h_mpdu_start_seq_no(rx_desc); - frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(msdu); - more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(msdu); - - if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(rx_desc) || - !ath11k_dp_rx_h_mpdu_start_fc_valid(rx_desc) || + peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc); + tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc); + seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc); + frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu); + more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu); + + if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) || + !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) || tid > IEEE80211_NUM_TIDS) return -EINVAL; @@ -3484,7 +3497,7 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, if (frag_no > __fls(rx_tid->rx_frag_bitmap)) __skb_queue_tail(&rx_tid->rx_frags, msdu); else - ath11k_dp_rx_h_sort_frags(&rx_tid->rx_frags, msdu); + ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu); rx_tid->rx_frag_bitmap |= BIT(frag_no); if (!more_frags) @@ -3551,6 +3564,7 @@ ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool struct hal_rx_desc *rx_desc; u8 *hdr_status; u16 msdu_len; + u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; spin_lock_bh(&rx_ring->idr_lock); msdu = idr_find(&rx_ring->bufs_idr, buf_id); @@ -3586,9 +3600,9 @@ ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool } rx_desc = (struct hal_rx_desc *)msdu->data; - msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); - if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) { - hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc); + msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc); + if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { + hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc); ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len); ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status, sizeof(struct ieee80211_hdr)); @@ -3598,7 +3612,7 @@ ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool goto exit; } - skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len); + skb_put(msdu, hal_rx_desc_sz + msdu_len); if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) { dev_kfree_skb_any(msdu); @@ -3732,7 +3746,7 @@ static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar, int n_buffs; n_buffs = DIV_ROUND_UP(msdu_len, - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)); + (DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz)); skb_queue_walk_safe(msdu_list, skb, tmp) { rxcb = ATH11K_SKB_RXCB(skb); @@ -3753,19 +3767,22 @@ static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu, { u16 msdu_len; struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; + struct rx_attention *rx_attention; u8 l3pad_bytes; struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); + u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; - msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); + msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc); - if (!rxcb->is_frag && ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE)) { + if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) { /* First buffer will be freed by the caller, so deduct it's length */ - msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE); + msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz); ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); return -EINVAL; } - if (!ath11k_dp_rx_h_attn_msdu_done(desc)) { + rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc); + if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) { ath11k_warn(ar->ab, "msdu_done bit not set in null_q_des processing\n"); __skb_queue_purge(msdu_list); @@ -3781,25 +3798,25 @@ static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu, * This error can show up both in a REO destination or WBM release ring. */ - rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); - rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); + rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc); + rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc); if (rxcb->is_frag) { - skb_pull(msdu, HAL_RX_DESC_SIZE); + skb_pull(msdu, hal_rx_desc_sz); } else { - l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); + l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc); - if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) + if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) return -EINVAL; - skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); - skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes); + skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); + skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); } ath11k_dp_rx_h_ppdu(ar, desc, status); ath11k_dp_rx_h_mpdu(ar, msdu, desc, status); - rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(desc); + rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc); /* Please note that caller will having the access to msdu and completing * rx with mac80211. Need not worry about cleaning up amsdu_list. @@ -3846,14 +3863,15 @@ static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu, struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; u8 l3pad_bytes; struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); + u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; - rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); - rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); + rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc); + rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc); - l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); - msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); - skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); - skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes); + l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc); + msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc); + skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); + skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); ath11k_dp_rx_h_ppdu(ar, desc, status); @@ -4595,10 +4613,10 @@ ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, rx_desc = (struct hal_rx_desc *)msdu->data; rx_pkt_offset = sizeof(struct hal_rx_desc); - l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(rx_desc); + l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc); if (is_first_msdu) { - if (!ath11k_dp_rxdesc_mpdu_valid(rx_desc)) { + if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) { drop_mpdu = true; dev_kfree_skb_any(msdu); msdu = NULL; @@ -4607,7 +4625,7 @@ ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, } msdu_ppdu_id = - ath11k_dp_rxdesc_get_ppduid(rx_desc); + ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc); if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id, ppdu_id, @@ -4676,12 +4694,13 @@ next_msdu: return rx_bufs_used; } -static void ath11k_dp_rx_msdus_set_payload(struct sk_buff *msdu) +static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu) { u32 rx_pkt_offset, l2_hdr_offset; - rx_pkt_offset = sizeof(struct hal_rx_desc); - l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad((struct hal_rx_desc *)msdu->data); + rx_pkt_offset = ar->ab->hw_params.hal_desc_sz; + l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, + (struct hal_rx_desc *)msdu->data); skb_pull(msdu, rx_pkt_offset + l2_hdr_offset); } @@ -4691,12 +4710,14 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, struct sk_buff *last_msdu, struct ieee80211_rx_status *rxs) { + struct ath11k_base *ab = ar->ab; struct sk_buff *msdu, *mpdu_buf, *prev_buf; - u32 decap_format, wifi_hdr_len; + u32 wifi_hdr_len; struct hal_rx_desc *rx_desc; char *hdr_desc; - u8 *dest; + u8 *dest, decap_format; struct ieee80211_hdr_3addr *wh; + struct rx_attention *rx_attention; mpdu_buf = NULL; @@ -4704,22 +4725,23 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, goto err_merge_fail; rx_desc = (struct hal_rx_desc *)head_msdu->data; + rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc); - if (ath11k_dp_rxdesc_get_mpdulen_err(rx_desc)) + if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention)) return NULL; - decap_format = ath11k_dp_rxdesc_get_decap_format(rx_desc); + decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc); ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); if (decap_format == DP_RX_DECAP_TYPE_RAW) { - ath11k_dp_rx_msdus_set_payload(head_msdu); + ath11k_dp_rx_msdus_set_payload(ar, head_msdu); prev_buf = head_msdu; msdu = head_msdu->next; while (msdu) { - ath11k_dp_rx_msdus_set_payload(msdu); + ath11k_dp_rx_msdus_set_payload(ar, msdu); prev_buf = msdu; msdu = msdu->next; @@ -4733,7 +4755,7 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, u8 qos_pkt = 0; rx_desc = (struct hal_rx_desc *)head_msdu->data; - hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); + hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc); /* Base size */ wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr); @@ -4750,7 +4772,7 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, while (msdu) { rx_desc = (struct hal_rx_desc *)msdu->data; - hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); + hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc); if (qos_pkt) { dest = skb_push(msdu, sizeof(__le16)); @@ -4760,7 +4782,7 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, memcpy(dest + wifi_hdr_len, (u8 *)&qos_field, sizeof(__le16)); } - ath11k_dp_rx_msdus_set_payload(msdu); + ath11k_dp_rx_msdus_set_payload(ar, msdu); prev_buf = msdu; msdu = msdu->next; } @@ -4768,11 +4790,11 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, if (!dest) goto err_merge_fail; - ath11k_dbg(ar->ab, ATH11K_DBG_DATA, + ath11k_dbg(ab, ATH11K_DBG_DATA, "mpdu_buf %pK mpdu_buf->len %u", prev_buf, prev_buf->len); } else { - ath11k_dbg(ar->ab, ATH11K_DBG_DATA, + ath11k_dbg(ab, ATH11K_DBG_DATA, "decap format %d is not supported!\n", decap_format); goto err_merge_fail; @@ -4782,7 +4804,7 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, err_merge_fail: if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) { - ath11k_dbg(ar->ab, ATH11K_DBG_DATA, + ath11k_dbg(ab, ATH11K_DBG_DATA, "err_merge_fail mpdu_buf %pK", mpdu_buf); /* Free the head buffer */ dev_kfree_skb_any(mpdu_buf); diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c index 1a0b9be9ce6a..8bba5234f81f 100644 --- a/drivers/net/wireless/ath/ath11k/dp_tx.c +++ b/drivers/net/wireless/ath/ath11k/dp_tx.c @@ -178,7 +178,7 @@ tcl_ring_sel: } if (ieee80211_vif_is_mesh(arvif->vif)) - ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_MESH_ENABLE, 1); + ti.enable_mesh = true; ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE, 1); @@ -792,8 +792,8 @@ int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id, cmd->ring_tail_off32_remote_addr_hi = (u64)tp_addr >> HAL_ADDR_MSB_REG_SHIFT; - cmd->ring_msi_addr_lo = params.msi_addr & 0xffffffff; - cmd->ring_msi_addr_hi = ((uint64_t)(params.msi_addr) >> 32) & 0xffffffff; + cmd->ring_msi_addr_lo = lower_32_bits(params.msi_addr); + cmd->ring_msi_addr_hi = upper_32_bits(params.msi_addr); cmd->msi_data = params.msi_data; cmd->intr_info = FIELD_PREP( diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c index 9904c0eb7587..08e3c72d9237 100644 --- a/drivers/net/wireless/ath/ath11k/hal.c +++ b/drivers/net/wireless/ath/ath11k/hal.c @@ -89,17 +89,6 @@ static const struct hal_srng_config hw_srng_config_template[] = { .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, - .reg_start = { - (HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + - HAL_CE_DST_RING_BASE_LSB), - HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP, - }, - .reg_size = { - (HAL_SEQ_WCSS_UMAC_CE1_SRC_REG - - HAL_SEQ_WCSS_UMAC_CE0_SRC_REG), - (HAL_SEQ_WCSS_UMAC_CE1_SRC_REG - - HAL_SEQ_WCSS_UMAC_CE0_SRC_REG), - }, .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE, }, { /* CE_DST */ @@ -108,17 +97,6 @@ static const struct hal_srng_config hw_srng_config_template[] = { .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, - .reg_start = { - (HAL_SEQ_WCSS_UMAC_CE0_DST_REG + - HAL_CE_DST_RING_BASE_LSB), - HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP, - }, - .reg_size = { - (HAL_SEQ_WCSS_UMAC_CE1_DST_REG - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG), - (HAL_SEQ_WCSS_UMAC_CE1_DST_REG - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG), - }, .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE, }, { /* CE_DST_STATUS */ @@ -127,18 +105,6 @@ static const struct hal_srng_config hw_srng_config_template[] = { .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_DST, - .reg_start = { - (HAL_SEQ_WCSS_UMAC_CE0_DST_REG + - HAL_CE_DST_STATUS_RING_BASE_LSB), - (HAL_SEQ_WCSS_UMAC_CE0_DST_REG + - HAL_CE_DST_STATUS_RING_HP), - }, - .reg_size = { - (HAL_SEQ_WCSS_UMAC_CE1_DST_REG - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG), - (HAL_SEQ_WCSS_UMAC_CE1_DST_REG - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG), - }, .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE, }, { /* WBM_IDLE_LINK */ @@ -147,11 +113,6 @@ static const struct hal_srng_config hw_srng_config_template[] = { .entry_size = sizeof(struct hal_wbm_link_desc) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, - .reg_start = { - (HAL_SEQ_WCSS_UMAC_WBM_REG + - HAL_WBM_IDLE_LINK_RING_BASE_LSB), - (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP), - }, .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE, }, { /* SW2WBM_RELEASE */ @@ -160,11 +121,6 @@ static const struct hal_srng_config hw_srng_config_template[] = { .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, - .reg_start = { - (HAL_SEQ_WCSS_UMAC_WBM_REG + - HAL_WBM_RELEASE_RING_BASE_LSB), - (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP), - }, .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE, }, { /* WBM2SW_RELEASE */ @@ -173,16 +129,6 @@ static const struct hal_srng_config hw_srng_config_template[] = { .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_DST, - .reg_start = { - (HAL_SEQ_WCSS_UMAC_WBM_REG + - HAL_WBM0_RELEASE_RING_BASE_LSB), - (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP), - }, - .reg_size = { - (HAL_WBM1_RELEASE_RING_BASE_LSB - - HAL_WBM0_RELEASE_RING_BASE_LSB), - (HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP), - }, .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE, }, { /* RXDMA_BUF */ @@ -955,7 +901,7 @@ void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab, /* Enable the SRNG */ ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + - HAL_WBM_IDLE_LINK_RING_MISC_ADDR, 0x40); + HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab), 0x40); } int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type, @@ -1234,6 +1180,46 @@ static int ath11k_hal_srng_create_config(struct ath11k_base *ab) s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP; + s = &hal->srng_config[HAL_CE_SRC]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab); + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab); + + s = &hal->srng_config[HAL_CE_DST]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); + + s = &hal->srng_config[HAL_CE_DST_STATUS]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + + HAL_CE_DST_STATUS_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); + + s = &hal->srng_config[HAL_WBM_IDLE_LINK]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP; + + s = &hal->srng_config[HAL_SW2WBM_RELEASE]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP; + + s = &hal->srng_config[HAL_WBM2SW_RELEASE]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP; + s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) - + HAL_WBM0_RELEASE_RING_BASE_LSB(ab); + s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP; + return 0; } diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h index 1f1b29cd0aa3..91d1428b8b94 100644 --- a/drivers/net/wireless/ath/ath11k/hal.h +++ b/drivers/net/wireless/ath/ath11k/hal.h @@ -39,14 +39,22 @@ struct ath11k_base; #define HAL_SHADOW_REG(x) (HAL_SHADOW_BASE_ADDR + (4 * (x))) /* WCSS Relative address */ +#define HAL_SEQ_WCSS_UMAC_OFFSET 0x00a00000 #define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000 #define HAL_SEQ_WCSS_UMAC_TCL_REG 0x00a44000 -#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG 0x00a00000 -#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG 0x00a01000 -#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG 0x00a02000 -#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG 0x00a03000 +#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(x) \ + (ab->hw_params.regs->hal_seq_wcss_umac_ce0_src_reg) +#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG(x) \ + (ab->hw_params.regs->hal_seq_wcss_umac_ce0_dst_reg) +#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(x) \ + (ab->hw_params.regs->hal_seq_wcss_umac_ce1_src_reg) +#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG(x) \ + (ab->hw_params.regs->hal_seq_wcss_umac_ce1_dst_reg) #define HAL_SEQ_WCSS_UMAC_WBM_REG 0x00a34000 +#define HAL_CE_WFSS_CE_REG_BASE 0x01b80000 +#define HAL_WLAON_REG_BASE 0x01f80000 + /* SW2TCL(x) R0 ring configuration address */ #define HAL_TCL1_RING_CMN_CTRL_REG 0x00000014 #define HAL_TCL1_RING_DSCP_TID_MAP 0x0000002c @@ -197,8 +205,10 @@ struct ath11k_base; #define HAL_REO_STATUS_HP(ab) ab->hw_params.regs->hal_reo_status_hp /* WBM Idle R0 address */ -#define HAL_WBM_IDLE_LINK_RING_BASE_LSB 0x00000860 -#define HAL_WBM_IDLE_LINK_RING_MISC_ADDR 0x00000870 +#define HAL_WBM_IDLE_LINK_RING_BASE_LSB(x) \ + (ab->hw_params.regs->hal_wbm_idle_link_ring_base_lsb) +#define HAL_WBM_IDLE_LINK_RING_MISC_ADDR(x) \ + (ab->hw_params.regs->hal_wbm_idle_link_ring_misc) #define HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR 0x00000048 #define HAL_WBM_R0_IDLE_LIST_SIZE_ADDR 0x0000004c #define HAL_WBM_SCATTERED_RING_BASE_LSB 0x00000058 @@ -213,14 +223,17 @@ struct ath11k_base; #define HAL_WBM_IDLE_LINK_RING_HP 0x000030b0 /* SW2WBM R0 release address */ -#define HAL_WBM_RELEASE_RING_BASE_LSB 0x000001d8 +#define HAL_WBM_RELEASE_RING_BASE_LSB(x) \ + (ab->hw_params.regs->hal_wbm_release_ring_base_lsb) /* SW2WBM R2 release address */ #define HAL_WBM_RELEASE_RING_HP 0x00003018 /* WBM2SW R0 release address */ -#define HAL_WBM0_RELEASE_RING_BASE_LSB 0x00000910 -#define HAL_WBM1_RELEASE_RING_BASE_LSB 0x00000968 +#define HAL_WBM0_RELEASE_RING_BASE_LSB(x) \ + (ab->hw_params.regs->hal_wbm0_release_ring_base_lsb) +#define HAL_WBM1_RELEASE_RING_BASE_LSB(x) \ + (ab->hw_params.regs->hal_wbm1_release_ring_base_lsb) /* WBM2SW R2 release address */ #define HAL_WBM0_RELEASE_RING_HP 0x000030c0 @@ -303,8 +316,6 @@ struct ath11k_base; #define HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff #define HAL_RXDMA_RING_MAX_SIZE 0x0000ffff -#define HAL_RX_DESC_SIZE (sizeof(struct hal_rx_desc)) - /* Add any other errors here and return them in * ath11k_hal_rx_desc_get_err(). */ diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h index 1b713cb13b90..d54ec6aa6281 100644 --- a/drivers/net/wireless/ath/ath11k/hal_desc.h +++ b/drivers/net/wireless/ath/ath11k/hal_desc.h @@ -949,16 +949,17 @@ struct hal_reo_flush_cache { #define HAL_TCL_DATA_CMD_INFO1_TO_FW BIT(21) #define HAL_TCL_DATA_CMD_INFO1_PKT_OFFSET GENMASK(31, 23) -#define HAL_TCL_DATA_CMD_INFO2_BUF_TIMESTAMP GENMASK(18, 0) -#define HAL_TCL_DATA_CMD_INFO2_BUF_T_VALID BIT(19) -#define HAL_TCL_DATA_CMD_INFO2_MESH_ENABLE BIT(20) -#define HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE BIT(21) -#define HAL_TCL_DATA_CMD_INFO2_TID GENMASK(25, 22) -#define HAL_TCL_DATA_CMD_INFO2_LMAC_ID GENMASK(27, 26) +#define HAL_TCL_DATA_CMD_INFO2_BUF_TIMESTAMP GENMASK(18, 0) +#define HAL_TCL_DATA_CMD_INFO2_BUF_T_VALID BIT(19) +#define HAL_IPQ8074_TCL_DATA_CMD_INFO2_MESH_ENABLE BIT(20) +#define HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE BIT(21) +#define HAL_TCL_DATA_CMD_INFO2_TID GENMASK(25, 22) +#define HAL_TCL_DATA_CMD_INFO2_LMAC_ID GENMASK(27, 26) #define HAL_TCL_DATA_CMD_INFO3_DSCP_TID_TABLE_IDX GENMASK(5, 0) #define HAL_TCL_DATA_CMD_INFO3_SEARCH_INDEX GENMASK(25, 6) #define HAL_TCL_DATA_CMD_INFO3_CACHE_SET_NUM GENMASK(29, 26) +#define HAL_QCN9074_TCL_DATA_CMD_INFO3_MESH_ENABLE GENMASK(31, 30) #define HAL_TCL_DATA_CMD_INFO4_RING_ID GENMASK(27, 20) #define HAL_TCL_DATA_CMD_INFO4_LOOPING_COUNT GENMASK(31, 28) diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.c b/drivers/net/wireless/ath/ath11k/hal_tx.c index 569e790d83a1..c8929de8ce6c 100644 --- a/drivers/net/wireless/ath/ath11k/hal_tx.c +++ b/drivers/net/wireless/ath/ath11k/hal_tx.c @@ -75,6 +75,9 @@ void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd, FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_CACHE_SET_NUM, ti->bss_ast_hash); tcl_cmd->info4 = 0; + + if (ti->enable_mesh) + ab->hw_params.hw_ops->tx_mesh_enable(ab, tcl_cmd); } void ath11k_hal_tx_set_dscp_tid_map(struct ath11k_base *ab, int id) diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.h b/drivers/net/wireless/ath/ath11k/hal_tx.h index c291e59c3ca6..36f4f6f6cbc2 100644 --- a/drivers/net/wireless/ath/ath11k/hal_tx.h +++ b/drivers/net/wireless/ath/ath11k/hal_tx.h @@ -34,6 +34,7 @@ struct hal_tx_info { u8 search_type; /* %HAL_TX_ADDR_SEARCH_ */ u8 lmac_id; u8 dscp_tid_tbl_idx; + bool enable_mesh; }; /* TODO: Check if the actual desc macros can be used instead */ diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h index 6285c52afc44..e9366f786fbb 100644 --- a/drivers/net/wireless/ath/ath11k/hif.h +++ b/drivers/net/wireless/ath/ath11k/hif.h @@ -28,6 +28,7 @@ struct ath11k_hif_ops { u32 *msi_addr_hi); void (*ce_irq_enable)(struct ath11k_base *ab); void (*ce_irq_disable)(struct ath11k_base *ab); + void (*get_ce_msi_idx)(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx); }; static inline void ath11k_hif_ce_irq_enable(struct ath11k_base *ab) @@ -124,4 +125,13 @@ static inline void ath11k_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_ ab->hif.ops->get_msi_address(ab, msi_addr_lo, msi_addr_hi); } + +static inline void ath11k_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, + u32 *msi_data_idx) +{ + if (ab->hif.ops->get_ce_msi_idx) + ab->hif.ops->get_ce_msi_idx(ab, ce_id, msi_data_idx); + else + *msi_data_idx = ce_id; +} #endif /* _HIF_H_ */ diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c index 66331da35012..377ae8d5b58f 100644 --- a/drivers/net/wireless/ath/ath11k/hw.c +++ b/drivers/net/wireless/ath/ath11k/hw.c @@ -31,6 +31,20 @@ static u8 ath11k_hw_ipq6018_mac_from_pdev_id(int pdev_idx) return pdev_idx; } +static void ath11k_hw_ipq8074_tx_mesh_enable(struct ath11k_base *ab, + struct hal_tcl_data_cmd *tcl_cmd) +{ + tcl_cmd->info2 |= FIELD_PREP(HAL_IPQ8074_TCL_DATA_CMD_INFO2_MESH_ENABLE, + true); +} + +static void ath11k_hw_qcn9074_tx_mesh_enable(struct ath11k_base *ab, + struct hal_tcl_data_cmd *tcl_cmd) +{ + tcl_cmd->info3 |= FIELD_PREP(HAL_QCN9074_TCL_DATA_CMD_INFO3_MESH_ENABLE, + true); +} + static void ath11k_init_wmi_config_qca6390(struct ath11k_base *ab, struct target_resource_config *config) { @@ -155,11 +169,358 @@ static int ath11k_hw_mac_id_to_srng_id_qca6390(struct ath11k_hw_params *hw, return mac_id; } +static bool ath11k_hw_ipq8074_rx_desc_get_first_msdu(struct hal_rx_desc *desc) +{ + return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU, + __le32_to_cpu(desc->u.ipq8074.msdu_end.info2)); +} + +static bool ath11k_hw_ipq8074_rx_desc_get_last_msdu(struct hal_rx_desc *desc) +{ + return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU, + __le32_to_cpu(desc->u.ipq8074.msdu_end.info2)); +} + +static u8 ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING, + __le32_to_cpu(desc->u.ipq8074.msdu_end.info2)); +} + +static u8 *ath11k_hw_ipq8074_rx_desc_get_hdr_status(struct hal_rx_desc *desc) +{ + return desc->u.ipq8074.hdr_status; +} + +static bool ath11k_hw_ipq8074_rx_desc_encrypt_valid(struct hal_rx_desc *desc) +{ + return __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1) & + RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID; +} + +static u32 ath11k_hw_ipq8074_rx_desc_get_encrypt_type(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE, + __le32_to_cpu(desc->u.ipq8074.mpdu_start.info2)); +} + +static u8 ath11k_hw_ipq8074_rx_desc_get_decap_type(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT, + __le32_to_cpu(desc->u.ipq8074.msdu_start.info2)); +} + +static u8 ath11k_hw_ipq8074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT, + __le32_to_cpu(desc->u.ipq8074.msdu_start.info2)); +} + +static bool ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc) +{ + return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID, + __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1)); +} + +static bool ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc) +{ + return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID, + __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1)); +} + +static u16 ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM, + __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1)); +} + +static u16 ath11k_hw_ipq8074_rx_desc_get_msdu_len(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH, + __le32_to_cpu(desc->u.ipq8074.msdu_start.info1)); +} + +static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO3_SGI, + __le32_to_cpu(desc->u.ipq8074.msdu_start.info3)); +} + +static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS, + __le32_to_cpu(desc->u.ipq8074.msdu_start.info3)); +} + +static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW, + __le32_to_cpu(desc->u.ipq8074.msdu_start.info3)); +} + +static u32 ath11k_hw_ipq8074_rx_desc_get_msdu_freq(struct hal_rx_desc *desc) +{ + return __le32_to_cpu(desc->u.ipq8074.msdu_start.phy_meta_data); +} + +static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE, + __le32_to_cpu(desc->u.ipq8074.msdu_start.info3)); +} + +static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_nss(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP, + __le32_to_cpu(desc->u.ipq8074.msdu_start.info3)); +} + +static u8 ath11k_hw_ipq8074_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MPDU_START_INFO2_TID, + __le32_to_cpu(desc->u.ipq8074.mpdu_start.info2)); +} + +static u16 ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc) +{ + return __le16_to_cpu(desc->u.ipq8074.mpdu_start.sw_peer_id); +} + +static void ath11k_hw_ipq8074_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc, + struct hal_rx_desc *ldesc) +{ + memcpy((u8 *)&fdesc->u.ipq8074.msdu_end, (u8 *)&ldesc->u.ipq8074.msdu_end, + sizeof(struct rx_msdu_end_ipq8074)); + memcpy((u8 *)&fdesc->u.ipq8074.attention, (u8 *)&ldesc->u.ipq8074.attention, + sizeof(struct rx_attention)); + memcpy((u8 *)&fdesc->u.ipq8074.mpdu_end, (u8 *)&ldesc->u.ipq8074.mpdu_end, + sizeof(struct rx_mpdu_end)); +} + +static u32 ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc) +{ + return FIELD_GET(HAL_TLV_HDR_TAG, + __le32_to_cpu(desc->u.ipq8074.mpdu_start_tag)); +} + +static u32 ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc) +{ + return __le16_to_cpu(desc->u.ipq8074.mpdu_start.phy_ppdu_id); +} + +static void ath11k_hw_ipq8074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len) +{ + u32 info = __le32_to_cpu(desc->u.ipq8074.msdu_start.info1); + + info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH; + info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len); + + desc->u.ipq8074.msdu_start.info1 = __cpu_to_le32(info); +} + +static +struct rx_attention *ath11k_hw_ipq8074_rx_desc_get_attention(struct hal_rx_desc *desc) +{ + return &desc->u.ipq8074.attention; +} + +static u8 *ath11k_hw_ipq8074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc) +{ + return &desc->u.ipq8074.msdu_payload[0]; +} + +static bool ath11k_hw_qcn9074_rx_desc_get_first_msdu(struct hal_rx_desc *desc) +{ + return !!FIELD_GET(RX_MSDU_END_INFO4_FIRST_MSDU, + __le16_to_cpu(desc->u.qcn9074.msdu_end.info4)); +} + +static bool ath11k_hw_qcn9074_rx_desc_get_last_msdu(struct hal_rx_desc *desc) +{ + return !!FIELD_GET(RX_MSDU_END_INFO4_LAST_MSDU, + __le16_to_cpu(desc->u.qcn9074.msdu_end.info4)); +} + +static u8 ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_END_INFO4_L3_HDR_PADDING, + __le16_to_cpu(desc->u.qcn9074.msdu_end.info4)); +} + +static u8 *ath11k_hw_qcn9074_rx_desc_get_hdr_status(struct hal_rx_desc *desc) +{ + return desc->u.qcn9074.hdr_status; +} + +static bool ath11k_hw_qcn9074_rx_desc_encrypt_valid(struct hal_rx_desc *desc) +{ + return __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11) & + RX_MPDU_START_INFO11_ENCRYPT_INFO_VALID; +} + +static u32 ath11k_hw_qcn9074_rx_desc_get_encrypt_type(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MPDU_START_INFO9_ENC_TYPE, + __le32_to_cpu(desc->u.qcn9074.mpdu_start.info9)); +} + +static u8 ath11k_hw_qcn9074_rx_desc_get_decap_type(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT, + __le32_to_cpu(desc->u.qcn9074.msdu_start.info2)); +} + +static u8 ath11k_hw_qcn9074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT, + __le32_to_cpu(desc->u.qcn9074.msdu_start.info2)); +} + +static bool ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc) +{ + return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_CTRL_VALID, + __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11)); +} + +static bool ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc) +{ + return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_FCTRL_VALID, + __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11)); +} + +static u16 ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_NUM, + __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11)); +} + +static u16 ath11k_hw_qcn9074_rx_desc_get_msdu_len(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH, + __le32_to_cpu(desc->u.qcn9074.msdu_start.info1)); +} + +static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO3_SGI, + __le32_to_cpu(desc->u.qcn9074.msdu_start.info3)); +} + +static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS, + __le32_to_cpu(desc->u.qcn9074.msdu_start.info3)); +} + +static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW, + __le32_to_cpu(desc->u.qcn9074.msdu_start.info3)); +} + +static u32 ath11k_hw_qcn9074_rx_desc_get_msdu_freq(struct hal_rx_desc *desc) +{ + return __le32_to_cpu(desc->u.qcn9074.msdu_start.phy_meta_data); +} + +static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE, + __le32_to_cpu(desc->u.qcn9074.msdu_start.info3)); +} + +static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_nss(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP, + __le32_to_cpu(desc->u.qcn9074.msdu_start.info3)); +} + +static u8 ath11k_hw_qcn9074_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MPDU_START_INFO9_TID, + __le32_to_cpu(desc->u.qcn9074.mpdu_start.info9)); +} + +static u16 ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc) +{ + return __le16_to_cpu(desc->u.qcn9074.mpdu_start.sw_peer_id); +} + +static void ath11k_hw_qcn9074_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc, + struct hal_rx_desc *ldesc) +{ + memcpy((u8 *)&fdesc->u.qcn9074.msdu_end, (u8 *)&ldesc->u.qcn9074.msdu_end, + sizeof(struct rx_msdu_end_qcn9074)); + memcpy((u8 *)&fdesc->u.qcn9074.attention, (u8 *)&ldesc->u.qcn9074.attention, + sizeof(struct rx_attention)); + memcpy((u8 *)&fdesc->u.qcn9074.mpdu_end, (u8 *)&ldesc->u.qcn9074.mpdu_end, + sizeof(struct rx_mpdu_end)); +} + +static u32 ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc) +{ + return FIELD_GET(HAL_TLV_HDR_TAG, + __le32_to_cpu(desc->u.qcn9074.mpdu_start_tag)); +} + +static u32 ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc) +{ + return __le16_to_cpu(desc->u.qcn9074.mpdu_start.phy_ppdu_id); +} + +static void ath11k_hw_qcn9074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len) +{ + u32 info = __le32_to_cpu(desc->u.qcn9074.msdu_start.info1); + + info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH; + info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len); + + desc->u.qcn9074.msdu_start.info1 = __cpu_to_le32(info); +} + +static +struct rx_attention *ath11k_hw_qcn9074_rx_desc_get_attention(struct hal_rx_desc *desc) +{ + return &desc->u.qcn9074.attention; +} + +static u8 *ath11k_hw_qcn9074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc) +{ + return &desc->u.qcn9074.msdu_payload[0]; +} + const struct ath11k_hw_ops ipq8074_ops = { .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id, .wmi_init_config = ath11k_init_wmi_config_ipq8074, .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074, .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074, + .tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable, + .rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu, + .rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu, + .rx_desc_get_l3_pad_bytes = ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes, + .rx_desc_get_hdr_status = ath11k_hw_ipq8074_rx_desc_get_hdr_status, + .rx_desc_encrypt_valid = ath11k_hw_ipq8074_rx_desc_encrypt_valid, + .rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type, + .rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type, + .rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl, + .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld, + .rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid, + .rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no, + .rx_desc_get_msdu_len = ath11k_hw_ipq8074_rx_desc_get_msdu_len, + .rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi, + .rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs, + .rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw, + .rx_desc_get_msdu_freq = ath11k_hw_ipq8074_rx_desc_get_msdu_freq, + .rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type, + .rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss, + .rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid, + .rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id, + .rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end, + .rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag, + .rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id, + .rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len, + .rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention, + .rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload, }; const struct ath11k_hw_ops ipq6018_ops = { @@ -167,6 +528,33 @@ const struct ath11k_hw_ops ipq6018_ops = { .wmi_init_config = ath11k_init_wmi_config_ipq8074, .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074, .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074, + .tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable, + .rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu, + .rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu, + .rx_desc_get_l3_pad_bytes = ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes, + .rx_desc_get_hdr_status = ath11k_hw_ipq8074_rx_desc_get_hdr_status, + .rx_desc_encrypt_valid = ath11k_hw_ipq8074_rx_desc_encrypt_valid, + .rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type, + .rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type, + .rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl, + .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld, + .rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid, + .rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no, + .rx_desc_get_msdu_len = ath11k_hw_ipq8074_rx_desc_get_msdu_len, + .rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi, + .rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs, + .rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw, + .rx_desc_get_msdu_freq = ath11k_hw_ipq8074_rx_desc_get_msdu_freq, + .rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type, + .rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss, + .rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid, + .rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id, + .rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end, + .rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag, + .rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id, + .rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len, + .rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention, + .rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload, }; const struct ath11k_hw_ops qca6390_ops = { @@ -174,6 +562,67 @@ const struct ath11k_hw_ops qca6390_ops = { .wmi_init_config = ath11k_init_wmi_config_qca6390, .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390, .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390, + .tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable, + .rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu, + .rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu, + .rx_desc_get_l3_pad_bytes = ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes, + .rx_desc_get_hdr_status = ath11k_hw_ipq8074_rx_desc_get_hdr_status, + .rx_desc_encrypt_valid = ath11k_hw_ipq8074_rx_desc_encrypt_valid, + .rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type, + .rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type, + .rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl, + .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld, + .rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid, + .rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no, + .rx_desc_get_msdu_len = ath11k_hw_ipq8074_rx_desc_get_msdu_len, + .rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi, + .rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs, + .rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw, + .rx_desc_get_msdu_freq = ath11k_hw_ipq8074_rx_desc_get_msdu_freq, + .rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type, + .rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss, + .rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid, + .rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id, + .rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end, + .rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag, + .rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id, + .rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len, + .rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention, + .rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload, +}; + +const struct ath11k_hw_ops qcn9074_ops = { + .get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id, + .wmi_init_config = ath11k_init_wmi_config_ipq8074, + .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074, + .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074, + .tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable, + .rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu, + .rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu, + .rx_desc_get_l3_pad_bytes = ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes, + .rx_desc_get_hdr_status = ath11k_hw_qcn9074_rx_desc_get_hdr_status, + .rx_desc_encrypt_valid = ath11k_hw_qcn9074_rx_desc_encrypt_valid, + .rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type, + .rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type, + .rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl, + .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld, + .rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid, + .rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no, + .rx_desc_get_msdu_len = ath11k_hw_qcn9074_rx_desc_get_msdu_len, + .rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi, + .rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs, + .rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw, + .rx_desc_get_msdu_freq = ath11k_hw_qcn9074_rx_desc_get_msdu_freq, + .rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type, + .rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss, + .rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid, + .rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id, + .rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end, + .rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag, + .rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id, + .rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len, + .rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention, + .rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload, }; #define ATH11K_TX_RING_MASK_0 0x1 @@ -792,6 +1241,241 @@ const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[] = { }, }; +/* Target firmware's Copy Engine configuration. */ +const struct ce_pipe_config ath11k_target_ce_config_wlan_qcn9074[] = { + /* CE0: host->target HTC control and raw streams */ + { + .pipenum = __cpu_to_le32(0), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE1: target->host HTT + HTC control */ + { + .pipenum = __cpu_to_le32(1), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE2: target->host WMI */ + { + .pipenum = __cpu_to_le32(2), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE3: host->target WMI */ + { + .pipenum = __cpu_to_le32(3), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE4: host->target HTT */ + { + .pipenum = __cpu_to_le32(4), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(256), + .nbytes_max = __cpu_to_le32(256), + .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), + .reserved = __cpu_to_le32(0), + }, + + /* CE5: target->host Pktlog */ + { + .pipenum = __cpu_to_le32(5), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE6: Reserved for target autonomous hif_memcpy */ + { + .pipenum = __cpu_to_le32(6), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(16384), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE7 used only by Host */ + { + .pipenum = __cpu_to_le32(7), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H), + .nentries = __cpu_to_le32(0), + .nbytes_max = __cpu_to_le32(0), + .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), + .reserved = __cpu_to_le32(0), + }, + + /* CE8 target->host used only by IPA */ + { + .pipenum = __cpu_to_le32(8), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(16384), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + /* CE 9, 10, 11 are used by MHI driver */ +}; + +/* Map from service/endpoint to Copy Engine. + * This table is derived from the CE_PCI TABLE, above. + * It is passed to the Target at startup for use by firmware. + */ +const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qcn9074[] = { + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(0), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(1), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(0), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(1), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(4), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(1), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(5), + }, + + /* (Additions here) */ + + { /* must be last */ + __cpu_to_le32(0), + __cpu_to_le32(0), + __cpu_to_le32(0), + }, +}; + +const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074 = { + .tx = { + ATH11K_TX_RING_MASK_0, + ATH11K_TX_RING_MASK_1, + ATH11K_TX_RING_MASK_2, + }, + .rx_mon_status = { + 0, 0, 0, + ATH11K_RX_MON_STATUS_RING_MASK_0, + ATH11K_RX_MON_STATUS_RING_MASK_1, + ATH11K_RX_MON_STATUS_RING_MASK_2, + }, + .rx = { + 0, 0, 0, 0, + ATH11K_RX_RING_MASK_0, + ATH11K_RX_RING_MASK_1, + ATH11K_RX_RING_MASK_2, + ATH11K_RX_RING_MASK_3, + }, + .rx_err = { + 0, 0, 0, + ATH11K_RX_ERR_RING_MASK_0, + }, + .rx_wbm_rel = { + 0, 0, 0, + ATH11K_RX_WBM_REL_RING_MASK_0, + }, + .reo_status = { + 0, 0, 0, + ATH11K_REO_STATUS_RING_MASK_0, + }, + .rxdma2host = { + 0, 0, 0, + ATH11K_RXDMA2HOST_RING_MASK_0, + }, + .host2rxdma = { + 0, 0, 0, + ATH11K_HOST2RXDMA_RING_MASK_0, + }, +}; + const struct ath11k_hw_regs ipq8074_regs = { /* SW2TCL(x) R0 ring configuration address */ .hal_tcl1_ring_base_lsb = 0x00000510, @@ -841,6 +1525,26 @@ const struct ath11k_hw_regs ipq8074_regs = { .hal_reo_status_ring_base_lsb = 0x00000504, .hal_reo_status_hp = 0x00003070, + /* WCSS relative address */ + .hal_seq_wcss_umac_ce0_src_reg = 0x00a00000, + .hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000, + .hal_seq_wcss_umac_ce1_src_reg = 0x00a02000, + .hal_seq_wcss_umac_ce1_dst_reg = 0x00a03000, + + /* WBM Idle address */ + .hal_wbm_idle_link_ring_base_lsb = 0x00000860, + .hal_wbm_idle_link_ring_misc = 0x00000870, + + /* SW2WBM release address */ + .hal_wbm_release_ring_base_lsb = 0x000001d8, + + /* WBM2SW release address */ + .hal_wbm0_release_ring_base_lsb = 0x00000910, + .hal_wbm1_release_ring_base_lsb = 0x00000968, + + /* PCIe base address */ + .pcie_qserdes_sysclk_en_sel = 0x0, + .pcie_pcs_osc_dtct_config_base = 0x0, }; const struct ath11k_hw_regs qca6390_regs = { @@ -891,4 +1595,96 @@ const struct ath11k_hw_regs qca6390_regs = { /* REO status address */ .hal_reo_status_ring_base_lsb = 0x000004ac, .hal_reo_status_hp = 0x00003068, + + /* WCSS relative address */ + .hal_seq_wcss_umac_ce0_src_reg = 0x00a00000, + .hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000, + .hal_seq_wcss_umac_ce1_src_reg = 0x00a02000, + .hal_seq_wcss_umac_ce1_dst_reg = 0x00a03000, + + /* WBM Idle address */ + .hal_wbm_idle_link_ring_base_lsb = 0x00000860, + .hal_wbm_idle_link_ring_misc = 0x00000870, + + /* SW2WBM release address */ + .hal_wbm_release_ring_base_lsb = 0x000001d8, + + /* WBM2SW release address */ + .hal_wbm0_release_ring_base_lsb = 0x00000910, + .hal_wbm1_release_ring_base_lsb = 0x00000968, + + /* PCIe base address */ + .pcie_qserdes_sysclk_en_sel = 0x01e0c0ac, + .pcie_pcs_osc_dtct_config_base = 0x01e0c628, +}; + +const struct ath11k_hw_regs qcn9074_regs = { + /* SW2TCL(x) R0 ring configuration address */ + .hal_tcl1_ring_base_lsb = 0x000004f0, + .hal_tcl1_ring_base_msb = 0x000004f4, + .hal_tcl1_ring_id = 0x000004f8, + .hal_tcl1_ring_misc = 0x00000500, + .hal_tcl1_ring_tp_addr_lsb = 0x0000050c, + .hal_tcl1_ring_tp_addr_msb = 0x00000510, + .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000520, + .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000524, + .hal_tcl1_ring_msi1_base_lsb = 0x00000538, + .hal_tcl1_ring_msi1_base_msb = 0x0000053c, + .hal_tcl1_ring_msi1_data = 0x00000540, + .hal_tcl2_ring_base_lsb = 0x00000548, + .hal_tcl_ring_base_lsb = 0x000005f8, + + /* TCL STATUS ring address */ + .hal_tcl_status_ring_base_lsb = 0x00000700, + + /* REO2SW(x) R0 ring configuration address */ + .hal_reo1_ring_base_lsb = 0x0000029c, + .hal_reo1_ring_base_msb = 0x000002a0, + .hal_reo1_ring_id = 0x000002a4, + .hal_reo1_ring_misc = 0x000002ac, + .hal_reo1_ring_hp_addr_lsb = 0x000002b0, + .hal_reo1_ring_hp_addr_msb = 0x000002b4, + .hal_reo1_ring_producer_int_setup = 0x000002c0, + .hal_reo1_ring_msi1_base_lsb = 0x000002e4, + .hal_reo1_ring_msi1_base_msb = 0x000002e8, + .hal_reo1_ring_msi1_data = 0x000002ec, + .hal_reo2_ring_base_lsb = 0x000002f4, + .hal_reo1_aging_thresh_ix_0 = 0x00000564, + .hal_reo1_aging_thresh_ix_1 = 0x00000568, + .hal_reo1_aging_thresh_ix_2 = 0x0000056c, + .hal_reo1_aging_thresh_ix_3 = 0x00000570, + + /* REO2SW(x) R2 ring pointers (head/tail) address */ + .hal_reo1_ring_hp = 0x00003038, + .hal_reo1_ring_tp = 0x0000303c, + .hal_reo2_ring_hp = 0x00003040, + + /* REO2TCL R0 ring configuration address */ + .hal_reo_tcl_ring_base_lsb = 0x000003fc, + .hal_reo_tcl_ring_hp = 0x00003058, + + /* REO status address */ + .hal_reo_status_ring_base_lsb = 0x00000504, + .hal_reo_status_hp = 0x00003070, + + /* WCSS relative address */ + .hal_seq_wcss_umac_ce0_src_reg = 0x01b80000, + .hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000, + .hal_seq_wcss_umac_ce1_src_reg = 0x01b82000, + .hal_seq_wcss_umac_ce1_dst_reg = 0x01b83000, + + /* WBM Idle address */ + .hal_wbm_idle_link_ring_base_lsb = 0x00000874, + .hal_wbm_idle_link_ring_misc = 0x00000884, + + /* SW2WBM release address */ + .hal_wbm_release_ring_base_lsb = 0x000001ec, + + /* WBM2SW release address */ + .hal_wbm0_release_ring_base_lsb = 0x00000924, + .hal_wbm1_release_ring_base_lsb = 0x0000097c, + + /* PCIe base address */ + .pcie_qserdes_sysclk_en_sel = 0x01e0e0a8, + .pcie_pcs_osc_dtct_config_base = 0x01e0f45c, }; diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h index 8af0034fdb05..c81a6328361d 100644 --- a/drivers/net/wireless/ath/ath11k/hw.h +++ b/drivers/net/wireless/ath/ath11k/hw.h @@ -105,6 +105,9 @@ enum ath11k_bus { #define ATH11K_EXT_IRQ_GRP_NUM_MAX 11 +struct hal_rx_desc; +struct hal_tcl_data_cmd; + struct ath11k_hw_ring_mask { u8 tx[ATH11K_EXT_IRQ_GRP_NUM_MAX]; u8 rx_mon_status[ATH11K_EXT_IRQ_GRP_NUM_MAX]; @@ -134,6 +137,7 @@ struct ath11k_hw_params { bool internal_sleep_clock; const struct ath11k_hw_regs *regs; + u32 qmi_service_ins_id; const struct ce_attr *host_ce_config; u32 ce_count; const struct ce_pipe_config *target_ce_config; @@ -157,6 +161,7 @@ struct ath11k_hw_params { bool idle_ps; bool cold_boot_calib; bool supports_suspend; + u32 hal_desc_sz; }; struct ath11k_hw_ops { @@ -165,14 +170,45 @@ struct ath11k_hw_ops { struct target_resource_config *config); int (*mac_id_to_pdev_id)(struct ath11k_hw_params *hw, int mac_id); int (*mac_id_to_srng_id)(struct ath11k_hw_params *hw, int mac_id); + void (*tx_mesh_enable)(struct ath11k_base *ab, + struct hal_tcl_data_cmd *tcl_cmd); + bool (*rx_desc_get_first_msdu)(struct hal_rx_desc *desc); + bool (*rx_desc_get_last_msdu)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_l3_pad_bytes)(struct hal_rx_desc *desc); + u8 *(*rx_desc_get_hdr_status)(struct hal_rx_desc *desc); + bool (*rx_desc_encrypt_valid)(struct hal_rx_desc *desc); + u32 (*rx_desc_get_encrypt_type)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_decap_type)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_mesh_ctl)(struct hal_rx_desc *desc); + bool (*rx_desc_get_mpdu_seq_ctl_vld)(struct hal_rx_desc *desc); + bool (*rx_desc_get_mpdu_fc_valid)(struct hal_rx_desc *desc); + u16 (*rx_desc_get_mpdu_start_seq_no)(struct hal_rx_desc *desc); + u16 (*rx_desc_get_msdu_len)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_msdu_sgi)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_msdu_rate_mcs)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_msdu_rx_bw)(struct hal_rx_desc *desc); + u32 (*rx_desc_get_msdu_freq)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_msdu_pkt_type)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_msdu_nss)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_mpdu_tid)(struct hal_rx_desc *desc); + u16 (*rx_desc_get_mpdu_peer_id)(struct hal_rx_desc *desc); + void (*rx_desc_copy_attn_end_tlv)(struct hal_rx_desc *fdesc, + struct hal_rx_desc *ldesc); + u32 (*rx_desc_get_mpdu_start_tag)(struct hal_rx_desc *desc); + u32 (*rx_desc_get_mpdu_ppdu_id)(struct hal_rx_desc *desc); + void (*rx_desc_set_msdu_len)(struct hal_rx_desc *desc, u16 len); + struct rx_attention *(*rx_desc_get_attention)(struct hal_rx_desc *desc); + u8 *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc); }; extern const struct ath11k_hw_ops ipq8074_ops; extern const struct ath11k_hw_ops ipq6018_ops; extern const struct ath11k_hw_ops qca6390_ops; +extern const struct ath11k_hw_ops qcn9074_ops; extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074; extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390; +extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074; static inline int ath11k_hw_get_mac_from_pdev_id(struct ath11k_hw_params *hw, @@ -261,9 +297,26 @@ struct ath11k_hw_regs { u32 hal_reo_status_ring_base_lsb; u32 hal_reo_status_hp; + + u32 hal_seq_wcss_umac_ce0_src_reg; + u32 hal_seq_wcss_umac_ce0_dst_reg; + u32 hal_seq_wcss_umac_ce1_src_reg; + u32 hal_seq_wcss_umac_ce1_dst_reg; + + u32 hal_wbm_idle_link_ring_base_lsb; + u32 hal_wbm_idle_link_ring_misc; + + u32 hal_wbm_release_ring_base_lsb; + + u32 hal_wbm0_release_ring_base_lsb; + u32 hal_wbm1_release_ring_base_lsb; + + u32 pcie_qserdes_sysclk_en_sel; + u32 pcie_pcs_osc_dtct_config_base; }; extern const struct ath11k_hw_regs ipq8074_regs; extern const struct ath11k_hw_regs qca6390_regs; +extern const struct ath11k_hw_regs qcn9074_regs; #endif diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index faa2e678e63e..4df425dd31a2 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -829,6 +829,75 @@ static void ath11k_control_beaconing(struct ath11k_vif *arvif, ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); } +static void ath11k_mac_handle_beacon_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct sk_buff *skb = data; + struct ieee80211_mgmt *mgmt = (void *)skb->data; + struct ath11k_vif *arvif = (void *)vif->drv_priv; + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid)) + return; + + cancel_delayed_work(&arvif->connection_loss_work); +} + +void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb) +{ + ieee80211_iterate_active_interfaces_atomic(ar->hw, + IEEE80211_IFACE_ITER_NORMAL, + ath11k_mac_handle_beacon_iter, + skb); +} + +static void ath11k_mac_handle_beacon_miss_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + u32 *vdev_id = data; + struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k *ar = arvif->ar; + struct ieee80211_hw *hw = ar->hw; + + if (arvif->vdev_id != *vdev_id) + return; + + if (!arvif->is_up) + return; + + ieee80211_beacon_loss(vif); + + /* Firmware doesn't report beacon loss events repeatedly. If AP probe + * (done by mac80211) succeeds but beacons do not resume then it + * doesn't make sense to continue operation. Queue connection loss work + * which can be cancelled when beacon is received. + */ + ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work, + ATH11K_CONNECTION_LOSS_HZ); +} + +void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id) +{ + ieee80211_iterate_active_interfaces_atomic(ar->hw, + IEEE80211_IFACE_ITER_NORMAL, + ath11k_mac_handle_beacon_miss_iter, + &vdev_id); +} + +static void ath11k_mac_vif_sta_connection_loss_work(struct work_struct *work) +{ + struct ath11k_vif *arvif = container_of(work, struct ath11k_vif, + connection_loss_work.work); + struct ieee80211_vif *vif = arvif->vif; + + if (!arvif->is_up) + return; + + ieee80211_connection_loss(vif); +} + static void ath11k_peer_assoc_h_basic(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -1265,9 +1334,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar, * request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu * length. */ - ampdu_factor = (he_cap->he_cap_elem.mac_cap_info[3] & - IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) >> - IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_SHIFT; + ampdu_factor = u8_get_bits(he_cap->he_cap_elem.mac_cap_info[3], + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK); if (ampdu_factor) { if (sta->vht_cap.vht_supported) @@ -1760,7 +1828,7 @@ static void ath11k_bss_disassoc(struct ieee80211_hw *hw, arvif->is_up = false; - /* TODO: cancel connection_loss_work */ + cancel_delayed_work_sync(&arvif->connection_loss_work); } static u32 ath11k_mac_get_rate_hw_value(int bitrate) @@ -3807,7 +3875,7 @@ ath11k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem) IEEE80211_HE_MAC_CAP4_BQR; he_cap_elem->mac_cap_info[4] &= ~m; - m = IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECVITE_TRANSMISSION | + m = IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECTIVE_TRANSMISSION | IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU | IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING | IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX; @@ -3817,7 +3885,7 @@ ath11k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem) IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO; he_cap_elem->phy_cap_info[2] &= ~m; - m = IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA | + m = IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU | IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK | IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK; he_cap_elem->phy_cap_info[3] &= ~m; @@ -3829,13 +3897,13 @@ ath11k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem) he_cap_elem->phy_cap_info[5] &= ~m; m = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | - IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB | + IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB | IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB | IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO; he_cap_elem->phy_cap_info[6] &= ~m; - m = IEEE80211_HE_PHY_CAP7_SRP_BASED_SR | - IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR | + m = IEEE80211_HE_PHY_CAP7_PSR_BASED_SR | + IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP | IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ | IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ; he_cap_elem->phy_cap_info[7] &= ~m; @@ -3919,8 +3987,6 @@ static int ath11k_mac_copy_he_cap(struct ath11k *ar, he_cap_elem->phy_cap_info[5] &= ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK; - he_cap_elem->phy_cap_info[5] &= - ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK; he_cap_elem->phy_cap_info[5] |= ar->num_tx_chains - 1; switch (i) { @@ -4213,7 +4279,7 @@ static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb, return -ENOSPC; } - if (skb_queue_len(q) == ATH11K_TX_MGMT_NUM_PENDING_MAX) { + if (skb_queue_len_lockless(q) >= ATH11K_TX_MGMT_NUM_PENDING_MAX) { ath11k_warn(ar->ab, "mgmt tx queue is full\n"); return -ENOSPC; } @@ -4617,10 +4683,8 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, arvif->vif = vif; INIT_LIST_HEAD(&arvif->list); - - /* Should we initialize any worker to handle connection loss indication - * from firmware in sta mode? - */ + INIT_DELAYED_WORK(&arvif->connection_loss_work, + ath11k_mac_vif_sta_connection_loss_work); for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { arvif->bitrate_mask.control[i].legacy = 0xffffffff; @@ -4829,6 +4893,8 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw, int ret; int i; + cancel_delayed_work_sync(&arvif->connection_loss_work); + mutex_lock(&ar->conf_mutex); ath11k_dbg(ab, ATH11K_DBG_MAC, "mac remove interface (vdev %d)\n", @@ -5096,13 +5162,15 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif, arg.channel.chan_radar = !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); + arg.channel.freq2_radar = + !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); + arg.channel.passive = arg.channel.chan_radar; spin_lock_bh(&ab->base_lock); arg.regdomain = ar->ab->dfs_region; spin_unlock_bh(&ab->base_lock); - /* TODO: Notify if secondary 80Mhz also needs radar detection */ if (he_support) { ret = ath11k_set_he_mu_sounding_mode(ar, arvif); if (ret) { @@ -6082,6 +6150,7 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw, /* TODO: Use real NF instead of default one. */ sinfo->signal = arsta->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); } static const struct ieee80211_ops ath11k_ops = { diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h index 455577905505..4bc59bdaf244 100644 --- a/drivers/net/wireless/ath/ath11k/mac.h +++ b/drivers/net/wireless/ath/ath11k/mac.h @@ -150,4 +150,6 @@ int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx); u8 ath11k_mac_bw_to_mac80211_bw(u8 bw); enum ath11k_supported_bw ath11k_mac_mac80211_bw_to_ath11k_bw(enum rate_info_bw bw); enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher); +void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb); +void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id); #endif diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c index 09858e516903..27b394d115e2 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.c +++ b/drivers/net/wireless/ath/ath11k/mhi.c @@ -7,10 +7,11 @@ #include "core.h" #include "debug.h" #include "mhi.h" +#include "pci.h" #define MHI_TIMEOUT_DEFAULT_MS 90000 -static struct mhi_channel_config ath11k_mhi_channels[] = { +static struct mhi_channel_config ath11k_mhi_channels_qca6390[] = { { .num = 0, .name = "LOOPBACK", @@ -69,7 +70,7 @@ static struct mhi_channel_config ath11k_mhi_channels[] = { }, }; -static struct mhi_event_config ath11k_mhi_events[] = { +static struct mhi_event_config ath11k_mhi_events_qca6390[] = { { .num_elements = 32, .irq_moderation_ms = 0, @@ -92,15 +93,108 @@ static struct mhi_event_config ath11k_mhi_events[] = { }, }; -static struct mhi_controller_config ath11k_mhi_config = { +static struct mhi_controller_config ath11k_mhi_config_qca6390 = { .max_channels = 128, .timeout_ms = 2000, .use_bounce_buf = false, .buf_len = 0, - .num_channels = ARRAY_SIZE(ath11k_mhi_channels), - .ch_cfg = ath11k_mhi_channels, - .num_events = ARRAY_SIZE(ath11k_mhi_events), - .event_cfg = ath11k_mhi_events, + .num_channels = ARRAY_SIZE(ath11k_mhi_channels_qca6390), + .ch_cfg = ath11k_mhi_channels_qca6390, + .num_events = ARRAY_SIZE(ath11k_mhi_events_qca6390), + .event_cfg = ath11k_mhi_events_qca6390, +}; + +static struct mhi_channel_config ath11k_mhi_channels_qcn9074[] = { + { + .num = 0, + .name = "LOOPBACK", + .num_elements = 32, + .event_ring = 1, + .dir = DMA_TO_DEVICE, + .ee_mask = 0x14, + .pollcfg = 0, + .doorbell = MHI_DB_BRST_DISABLE, + .lpm_notify = false, + .offload_channel = false, + .doorbell_mode_switch = false, + .auto_queue = false, + }, + { + .num = 1, + .name = "LOOPBACK", + .num_elements = 32, + .event_ring = 1, + .dir = DMA_FROM_DEVICE, + .ee_mask = 0x14, + .pollcfg = 0, + .doorbell = MHI_DB_BRST_DISABLE, + .lpm_notify = false, + .offload_channel = false, + .doorbell_mode_switch = false, + .auto_queue = false, + }, + { + .num = 20, + .name = "IPCR", + .num_elements = 32, + .event_ring = 1, + .dir = DMA_TO_DEVICE, + .ee_mask = 0x14, + .pollcfg = 0, + .doorbell = MHI_DB_BRST_DISABLE, + .lpm_notify = false, + .offload_channel = false, + .doorbell_mode_switch = false, + .auto_queue = false, + }, + { + .num = 21, + .name = "IPCR", + .num_elements = 32, + .event_ring = 1, + .dir = DMA_FROM_DEVICE, + .ee_mask = 0x14, + .pollcfg = 0, + .doorbell = MHI_DB_BRST_DISABLE, + .lpm_notify = false, + .offload_channel = false, + .doorbell_mode_switch = false, + .auto_queue = true, + }, +}; + +static struct mhi_event_config ath11k_mhi_events_qcn9074[] = { + { + .num_elements = 32, + .irq_moderation_ms = 0, + .irq = 1, + .data_type = MHI_ER_CTRL, + .mode = MHI_DB_BRST_DISABLE, + .hardware_event = false, + .client_managed = false, + .offload_channel = false, + }, + { + .num_elements = 256, + .irq_moderation_ms = 1, + .irq = 2, + .mode = MHI_DB_BRST_DISABLE, + .priority = 1, + .hardware_event = false, + .client_managed = false, + .offload_channel = false, + }, +}; + +static struct mhi_controller_config ath11k_mhi_config_qcn9074 = { + .max_channels = 30, + .timeout_ms = 10000, + .use_bounce_buf = false, + .buf_len = 0, + .num_channels = ARRAY_SIZE(ath11k_mhi_channels_qcn9074), + .ch_cfg = ath11k_mhi_channels_qcn9074, + .num_events = ARRAY_SIZE(ath11k_mhi_events_qcn9074), + .event_cfg = ath11k_mhi_events_qcn9074, }; void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab) @@ -221,6 +315,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci) { struct ath11k_base *ab = ab_pci->ab; struct mhi_controller *mhi_ctrl; + struct mhi_controller_config *ath11k_mhi_config; int ret; mhi_ctrl = mhi_alloc_controller(); @@ -254,7 +349,21 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci) mhi_ctrl->read_reg = ath11k_mhi_op_read_reg; mhi_ctrl->write_reg = ath11k_mhi_op_write_reg; - ret = mhi_register_controller(mhi_ctrl, &ath11k_mhi_config); + switch (ab->hw_rev) { + case ATH11K_HW_QCN9074_HW10: + ath11k_mhi_config = &ath11k_mhi_config_qcn9074; + break; + case ATH11K_HW_QCA6390_HW20: + ath11k_mhi_config = &ath11k_mhi_config_qca6390; + break; + default: + ath11k_err(ab, "failed assign mhi_config for unknown hw rev %d\n", + ab->hw_rev); + mhi_free_controller(mhi_ctrl); + return -EINVAL; + } + + ret = mhi_register_controller(mhi_ctrl, ath11k_mhi_config); if (ret) { ath11k_err(ab, "failed to register to mhi bus, err = %d\n", ret); mhi_free_controller(mhi_ctrl); diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index d14416816acc..0f31eb566fb6 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -35,9 +35,11 @@ #define ACCESS_ALWAYS_OFF 0xFE0 #define QCA6390_DEVICE_ID 0x1101 +#define QCN9074_DEVICE_ID 0x1104 static const struct pci_device_id ath11k_pci_id_table[] = { { PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) }, + /* TODO: add QCN9074_DEVICE_ID) once firmware issues are resolved */ {0} }; @@ -50,14 +52,25 @@ static const struct ath11k_bus_params ath11k_pci_bus_params = { .fixed_mem_region = false, }; -static const struct ath11k_msi_config msi_config = { - .total_vectors = 32, - .total_users = 4, - .users = (struct ath11k_msi_user[]) { - { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, - { .name = "CE", .num_vectors = 10, .base_vector = 3 }, - { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, - { .name = "DP", .num_vectors = 18, .base_vector = 14 }, +static const struct ath11k_msi_config ath11k_msi_config[] = { + { + .total_vectors = 32, + .total_users = 4, + .users = (struct ath11k_msi_user[]) { + { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, + { .name = "CE", .num_vectors = 10, .base_vector = 3 }, + { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, + { .name = "DP", .num_vectors = 18, .base_vector = 14 }, + }, + }, + { + .total_vectors = 16, + .total_users = 3, + .users = (struct ath11k_msi_user[]) { + { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, + { .name = "CE", .num_vectors = 5, .base_vector = 3 }, + { .name = "DP", .num_vectors = 8, .base_vector = 8 }, + }, }, }; @@ -131,9 +144,38 @@ static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offse } } +static inline void ath11k_pci_select_static_window(struct ath11k_pci *ab_pci) +{ + u32 umac_window = FIELD_GET(WINDOW_VALUE_MASK, HAL_SEQ_WCSS_UMAC_OFFSET); + u32 ce_window = FIELD_GET(WINDOW_VALUE_MASK, HAL_CE_WFSS_CE_REG_BASE); + u32 window; + + window = (umac_window << 12) | (ce_window << 6); + + iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + WINDOW_REG_ADDRESS); +} + +static inline u32 ath11k_pci_get_window_start(struct ath11k_base *ab, + u32 offset) +{ + u32 window_start; + + /* If offset lies within DP register range, use 3rd window */ + if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK) + window_start = 3 * WINDOW_START; + /* If offset lies within CE register range, use 2nd window */ + else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK) + window_start = 2 * WINDOW_START; + else + window_start = WINDOW_START; + + return window_start; +} + void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + u32 window_start; /* for offset beyond BAR + 4K - 32, may * need to wakeup MHI to access. @@ -145,10 +187,21 @@ void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value) if (offset < WINDOW_START) { iowrite32(value, ab->mem + offset); } else { - spin_lock_bh(&ab_pci->window_lock); - ath11k_pci_select_window(ab_pci, offset); - iowrite32(value, ab->mem + WINDOW_START + (offset & WINDOW_RANGE_MASK)); - spin_unlock_bh(&ab_pci->window_lock); + if (ab->bus_params.static_window_map) + window_start = ath11k_pci_get_window_start(ab, offset); + else + window_start = WINDOW_START; + + if (window_start == WINDOW_START) { + spin_lock_bh(&ab_pci->window_lock); + ath11k_pci_select_window(ab_pci, offset); + iowrite32(value, ab->mem + window_start + + (offset & WINDOW_RANGE_MASK)); + spin_unlock_bh(&ab_pci->window_lock); + } else { + iowrite32(value, ab->mem + window_start + + (offset & WINDOW_RANGE_MASK)); + } } if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && @@ -159,7 +212,7 @@ void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value) u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); - u32 val; + u32 val, window_start; /* for offset beyond BAR + 4K - 32, may * need to wakeup MHI to access. @@ -171,10 +224,21 @@ u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset) if (offset < WINDOW_START) { val = ioread32(ab->mem + offset); } else { - spin_lock_bh(&ab_pci->window_lock); - ath11k_pci_select_window(ab_pci, offset); - val = ioread32(ab->mem + WINDOW_START + (offset & WINDOW_RANGE_MASK)); - spin_unlock_bh(&ab_pci->window_lock); + if (ab->bus_params.static_window_map) + window_start = ath11k_pci_get_window_start(ab, offset); + else + window_start = WINDOW_START; + + if (window_start == WINDOW_START) { + spin_lock_bh(&ab_pci->window_lock); + ath11k_pci_select_window(ab_pci, offset); + val = ioread32(ab->mem + window_start + + (offset & WINDOW_RANGE_MASK)); + spin_unlock_bh(&ab_pci->window_lock); + } else { + val = ioread32(ab->mem + window_start + + (offset & WINDOW_RANGE_MASK)); + } } if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && @@ -271,7 +335,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab) int ret; ret = ath11k_pci_set_link_reg(ab, - PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG, + PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(ab), PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL, PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK); if (ret) { @@ -280,27 +344,27 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab) } ret = ath11k_pci_set_link_reg(ab, - PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_REG, - PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_VAL, - PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK); + PCIE_PCS_OSC_DTCT_CONFIG1_REG(ab), + PCIE_PCS_OSC_DTCT_CONFIG1_VAL, + PCIE_PCS_OSC_DTCT_CONFIG_MSK); if (ret) { ath11k_warn(ab, "failed to set dtct config1 error: %d\n", ret); return ret; } ret = ath11k_pci_set_link_reg(ab, - PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_REG, - PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_VAL, - PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK); + PCIE_PCS_OSC_DTCT_CONFIG2_REG(ab), + PCIE_PCS_OSC_DTCT_CONFIG2_VAL, + PCIE_PCS_OSC_DTCT_CONFIG_MSK); if (ret) { ath11k_warn(ab, "failed to set dtct config2: %d\n", ret); return ret; } ret = ath11k_pci_set_link_reg(ab, - PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_REG, - PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_VAL, - PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK); + PCIE_PCS_OSC_DTCT_CONFIG4_REG(ab), + PCIE_PCS_OSC_DTCT_CONFIG4_VAL, + PCIE_PCS_OSC_DTCT_CONFIG_MSK); if (ret) { ath11k_warn(ab, "failed to set dtct config4: %d\n", ret); return ret; @@ -406,14 +470,15 @@ int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_nam u32 *base_vector) { struct ath11k_base *ab = ab_pci->ab; + const struct ath11k_msi_config *msi_config = ab_pci->msi_config; int idx; - for (idx = 0; idx < msi_config.total_users; idx++) { - if (strcmp(user_name, msi_config.users[idx].name) == 0) { - *num_vectors = msi_config.users[idx].num_vectors; - *user_base_data = msi_config.users[idx].base_vector + for (idx = 0; idx < msi_config->total_users; idx++) { + if (strcmp(user_name, msi_config->users[idx].name) == 0) { + *num_vectors = msi_config->users[idx].num_vectors; + *user_base_data = msi_config->users[idx].base_vector + ab_pci->msi_ep_base_data; - *base_vector = msi_config.users[idx].base_vector; + *base_vector = msi_config->users[idx].base_vector; ath11k_dbg(ab, ATH11K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n", user_name, *num_vectors, *user_base_data, @@ -428,6 +493,23 @@ int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_nam return -EINVAL; } +static void ath11k_pci_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, + u32 *msi_idx) +{ + u32 i, msi_data_idx; + + for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) { + if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + + if (ce_id == i) + break; + + msi_data_idx++; + } + *msi_idx = msi_data_idx; +} + static int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, int *num_vectors, u32 *user_base_data, u32 *base_vector) @@ -521,6 +603,9 @@ static irqreturn_t ath11k_pci_ce_interrupt_handler(int irq, void *arg) { struct ath11k_ce_pipe *ce_pipe = arg; + /* last interrupt received for this CE */ + ce_pipe->timestamp = jiffies; + ath11k_pci_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num); tasklet_schedule(&ce_pipe->intr_tq); @@ -615,6 +700,9 @@ static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg) ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq); + /* last interrupt received for this group */ + irq_grp->timestamp = jiffies; + ath11k_pci_ext_grp_disable(irq_grp); napi_schedule(&irq_grp->napi); @@ -625,8 +713,9 @@ static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg) static int ath11k_pci_ext_irq_config(struct ath11k_base *ab) { int i, j, ret, num_vectors = 0; - u32 user_base_data = 0, base_vector = 0; + u32 user_base_data = 0, base_vector = 0, base_idx; + base_idx = ATH11K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX; ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab), "DP", &num_vectors, &user_base_data, @@ -656,7 +745,7 @@ static int ath11k_pci_ext_irq_config(struct ath11k_base *ab) } irq_grp->num_irq = num_irq; - irq_grp->irqs[0] = base_vector + i; + irq_grp->irqs[0] = base_idx + i; for (j = 0; j < irq_grp->num_irq; j++) { int irq_idx = irq_grp->irqs[j]; @@ -667,6 +756,8 @@ static int ath11k_pci_ext_irq_config(struct ath11k_base *ab) ath11k_dbg(ab, ATH11K_DBG_PCI, "irq:%d group:%d\n", irq, i); + + irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); ret = request_irq(irq, ath11k_pci_ext_interrupt_handler, IRQF_SHARED, "DP_EXT_IRQ", irq_grp); @@ -687,7 +778,7 @@ static int ath11k_pci_config_irq(struct ath11k_base *ab) { struct ath11k_ce_pipe *ce_pipe; u32 msi_data_start; - u32 msi_data_count; + u32 msi_data_count, msi_data_idx; u32 msi_irq_start; unsigned int msi_data; int irq, i, ret, irq_idx; @@ -699,14 +790,14 @@ static int ath11k_pci_config_irq(struct ath11k_base *ab) return ret; /* Configure CE irqs */ - for (i = 0; i < ab->hw_params.ce_count; i++) { - msi_data = (i % msi_data_count) + msi_irq_start; - irq = ath11k_pci_get_msi_irq(ab->dev, msi_data); - ce_pipe = &ab->ce.ce_pipe[i]; - + for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) { if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; + msi_data = (msi_data_idx % msi_data_count) + msi_irq_start; + irq = ath11k_pci_get_msi_irq(ab->dev, msi_data); + ce_pipe = &ab->ce.ce_pipe[i]; + irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; tasklet_setup(&ce_pipe->intr_tq, ath11k_pci_ce_tasklet); @@ -721,6 +812,8 @@ static int ath11k_pci_config_irq(struct ath11k_base *ab) } ab->irq_num[irq_idx] = irq; + msi_data_idx++; + ath11k_pci_ce_irq_disable(ab, i); } @@ -740,7 +833,7 @@ static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab) cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map; cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len; - ab->qmi.service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390; + ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id; ath11k_ce_get_shadow_config(ab, &cfg->shadow_reg_v2, &cfg->shadow_reg_v2_len); @@ -760,17 +853,18 @@ static void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab) static int ath11k_pci_enable_msi(struct ath11k_pci *ab_pci) { struct ath11k_base *ab = ab_pci->ab; + const struct ath11k_msi_config *msi_config = ab_pci->msi_config; struct msi_desc *msi_desc; int num_vectors; int ret; num_vectors = pci_alloc_irq_vectors(ab_pci->pdev, - msi_config.total_vectors, - msi_config.total_vectors, + msi_config->total_vectors, + msi_config->total_vectors, PCI_IRQ_MSI); - if (num_vectors != msi_config.total_vectors) { + if (num_vectors != msi_config->total_vectors) { ath11k_err(ab, "failed to get %d MSI vectors, only %d available", - msi_config.total_vectors, num_vectors); + msi_config->total_vectors, num_vectors); if (num_vectors >= 0) return -EINVAL; @@ -932,6 +1026,9 @@ static int ath11k_pci_power_up(struct ath11k_base *ab) return ret; } + if (ab->bus_params.static_window_map) + ath11k_pci_select_static_window(ab_pci); + return 0; } @@ -1076,6 +1173,7 @@ static const struct ath11k_hif_ops ath11k_pci_hif_ops = { .map_service_to_pipe = ath11k_pci_map_service_to_pipe, .ce_irq_enable = ath11k_pci_hif_ce_irq_enable, .ce_irq_disable = ath11k_pci_hif_ce_irq_disable, + .get_ce_msi_idx = ath11k_pci_get_ce_msi_idx, }; static int ath11k_pci_probe(struct pci_dev *pdev, @@ -1130,6 +1228,12 @@ static int ath11k_pci_probe(struct pci_dev *pdev, ret = -EOPNOTSUPP; goto err_pci_free_region; } + ab_pci->msi_config = &ath11k_msi_config[0]; + break; + case QCN9074_DEVICE_ID: + ab_pci->msi_config = &ath11k_msi_config[1]; + ab->bus_params.static_window_map = true; + ab->hw_rev = ATH11K_HW_QCN9074_HW10; break; default: dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n", diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h index fe44d0dfce19..f3e645891d19 100644 --- a/drivers/net/wireless/ath/ath11k/pci.h +++ b/drivers/net/wireless/ath/ath11k/pci.h @@ -34,16 +34,20 @@ #define PCIE_SMLH_REQ_RST_LINK_DOWN 0x2 #define PCIE_INT_CLEAR_ALL 0xffffffff -#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG 0x01e0c0ac +#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(x) \ + (ab->hw_params.regs->pcie_qserdes_sysclk_en_sel) #define PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL 0x10 #define PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK 0xffffffff -#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_REG 0x01e0c628 -#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_VAL 0x02 -#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_REG 0x01e0c62c -#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_VAL 0x52 -#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_REG 0x01e0c634 -#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_VAL 0xff -#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK 0x000000ff +#define PCIE_PCS_OSC_DTCT_CONFIG1_REG(x) \ + (ab->hw_params.regs->pcie_pcs_osc_dtct_config_base) +#define PCIE_PCS_OSC_DTCT_CONFIG1_VAL 0x02 +#define PCIE_PCS_OSC_DTCT_CONFIG2_REG(x) \ + (ab->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0x4) +#define PCIE_PCS_OSC_DTCT_CONFIG2_VAL 0x52 +#define PCIE_PCS_OSC_DTCT_CONFIG4_REG(x) \ + (ab->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0xc) +#define PCIE_PCS_OSC_DTCT_CONFIG4_VAL 0xff +#define PCIE_PCS_OSC_DTCT_CONFIG_MSK 0x000000ff #define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c #define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4 @@ -73,6 +77,7 @@ struct ath11k_pci { char amss_path[100]; u32 msi_ep_base_data; struct mhi_controller *mhi_ctrl; + const struct ath11k_msi_config *msi_config; unsigned long mhi_state; u32 register_window; diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index 7968fe4eda22..b5e34d670715 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -1556,6 +1556,8 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab) req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT; } + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi host cap request\n"); + ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp); if (ret < 0) @@ -1566,7 +1568,7 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab) QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_host_cap_req_msg_v01_ei, &req); if (ret < 0) { - ath11k_warn(ab, "Failed to send host capability request,err = %d\n", ret); + ath11k_warn(ab, "failed to send host capability request: %d\n", ret); goto out; } @@ -1575,7 +1577,7 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab) goto out; if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { - ath11k_warn(ab, "Host capability request failed, result: %d, err: %d\n", + ath11k_warn(ab, "host capability request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; @@ -1624,24 +1626,26 @@ static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab) if (ret < 0) goto out; + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi indication register request\n"); + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_IND_REGISTER_REQ_V01, QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_ind_register_req_msg_v01_ei, req); if (ret < 0) { - ath11k_warn(ab, "Failed to send indication register request, err = %d\n", + ath11k_warn(ab, "failed to send indication register request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { - ath11k_warn(ab, "failed to register fw indication %d\n", ret); + ath11k_warn(ab, "failed to register fw indication: %d\n", ret); goto out; } if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { - ath11k_warn(ab, "FW Ind register request failed, result: %d, err: %d\n", + ath11k_warn(ab, "firmware indication register request failed: %d %d\n", resp->resp.result, resp->resp.error); ret = -EINVAL; goto out; @@ -1699,19 +1703,22 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab) if (ret < 0) goto out; + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi respond memory request delayed %i\n", + delayed); + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_RESPOND_MEM_REQ_V01, QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_respond_mem_req_msg_v01_ei, req); if (ret < 0) { - ath11k_warn(ab, "qmi failed to respond memory request, err = %d\n", + ath11k_warn(ab, "failed to respond qmi memory request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { - ath11k_warn(ab, "qmi failed memory request, err = %d\n", ret); + ath11k_warn(ab, "failed to wait qmi memory request: %d\n", ret); goto out; } @@ -1722,7 +1729,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab) if (delayed && resp.resp.error == 0) goto out; - ath11k_warn(ab, "Respond mem req failed, result: %d, err: %d\n", + ath11k_warn(ab, "qmi respond memory request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; @@ -1765,7 +1772,7 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab) &chunk->paddr, GFP_KERNEL); if (!chunk->vaddr) { - if (ab->qmi.mem_seg_count <= 2) { + if (ab->qmi.mem_seg_count <= ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT) { ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi dma allocation failed (%d B type %u), will try later with small size\n", chunk->size, @@ -1774,7 +1781,8 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab) ab->qmi.target_mem_delayed = true; return 0; } - ath11k_err(ab, "failed to alloc memory, size: 0x%x, type: %u\n", + + ath11k_err(ab, "failed to allocate dma memory for qmi (%d B type %u)\n", chunk->size, chunk->type); return -EINVAL; @@ -1843,24 +1851,26 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab) if (ret < 0) goto out; + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi target cap request\n"); + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_CAP_REQ_V01, QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_cap_req_msg_v01_ei, &req); if (ret < 0) { - ath11k_warn(ab, "qmi failed to send target cap request, err = %d\n", + ath11k_warn(ab, "failed to send qmi cap request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { - ath11k_warn(ab, "qmi failed target cap request %d\n", ret); + ath11k_warn(ab, "failed to wait qmi cap request: %d\n", ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { - ath11k_warn(ab, "qmi targetcap req failed, result: %d, err: %d\n", + ath11k_warn(ab, "qmi cap request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; @@ -1923,7 +1933,7 @@ ath11k_qmi_prepare_bdf_download(struct ath11k_base *ab, int type, ret = ath11k_core_fetch_bdf(ab, &bd); if (ret) { - ath11k_warn(ab, "qmi failed to load BDF\n"); + ath11k_warn(ab, "failed to load board file: %d\n", ret); return ret; } @@ -1971,7 +1981,7 @@ static int ath11k_qmi_load_bdf_fixed_addr(struct ath11k_base *ab) bdf_addr = ioremap(ab->hw_params.bdf_addr, ATH11K_QMI_BDF_MAX_SIZE); if (!bdf_addr) { - ath11k_warn(ab, "qmi ioremap error for BDF\n"); + ath11k_warn(ab, "failed ioremap for board file\n"); ret = -EIO; goto out; } @@ -2000,6 +2010,9 @@ static int ath11k_qmi_load_bdf_fixed_addr(struct ath11k_base *ab) if (ret < 0) goto out_qmi_bdf; + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf download req fixed addr type %d\n", + type); + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_BDF_DOWNLOAD_REQ_V01, QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN, @@ -2014,7 +2027,7 @@ static int ath11k_qmi_load_bdf_fixed_addr(struct ath11k_base *ab) goto out_qmi_bdf; if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { - ath11k_warn(ab, "qmi BDF download failed, result: %d, err: %d\n", + ath11k_warn(ab, "board file download request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out_qmi_bdf; @@ -2047,7 +2060,7 @@ static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab) memset(&bd, 0, sizeof(bd)); ret = ath11k_core_fetch_bdf(ab, &bd); if (ret) { - ath11k_warn(ab, "qmi failed to load bdf:\n"); + ath11k_warn(ab, "failed to fetch board file: %d\n", ret); goto out; } @@ -2090,6 +2103,9 @@ static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab) if (ret < 0) goto out_qmi_bdf; + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf download request remaining %i\n", + remaining); + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_BDF_DOWNLOAD_REQ_V01, QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN, @@ -2104,7 +2120,7 @@ static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab) goto out_qmi_bdf; if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { - ath11k_warn(ab, "qmi BDF download failed, result: %d, err: %d\n", + ath11k_warn(ab, "bdf download request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = resp.resp.result; goto out_qmi_bdf; @@ -2200,24 +2216,26 @@ static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab) if (ret < 0) goto out; + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi m3 info req\n"); + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_M3_INFO_REQ_V01, QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN, qmi_wlanfw_m3_info_req_msg_v01_ei, &req); if (ret < 0) { - ath11k_warn(ab, "qmi failed to send M3 information request, err = %d\n", + ath11k_warn(ab, "failed to send m3 information request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { - ath11k_warn(ab, "qmi failed M3 information request %d\n", ret); + ath11k_warn(ab, "failed to wait m3 information request: %d\n", ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { - ath11k_warn(ab, "qmi M3 info request failed, result: %d, err: %d\n", + ath11k_warn(ab, "m3 info request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; @@ -2246,12 +2264,14 @@ static int ath11k_qmi_wlanfw_mode_send(struct ath11k_base *ab, if (ret < 0) goto out; + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi wlan mode req mode %d\n", mode); + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_WLAN_MODE_REQ_V01, QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req); if (ret < 0) { - ath11k_warn(ab, "qmi failed to send mode request, mode: %d, err = %d\n", + ath11k_warn(ab, "failed to send wlan mode request (mode %d): %d\n", mode, ret); goto out; } @@ -2262,13 +2282,13 @@ static int ath11k_qmi_wlanfw_mode_send(struct ath11k_base *ab, ath11k_warn(ab, "WLFW service is dis-connected\n"); return 0; } - ath11k_warn(ab, "qmi failed set mode request, mode: %d, err = %d\n", + ath11k_warn(ab, "failed to wait wlan mode request (mode %d): %d\n", mode, ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { - ath11k_warn(ab, "Mode request failed, mode: %d, result: %d err: %d\n", + ath11k_warn(ab, "wlan mode request failed (mode: %d): %d %d\n", mode, resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; @@ -2338,24 +2358,26 @@ static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab) if (ret < 0) goto out; + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi wlan cfg req\n"); + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_WLAN_CFG_REQ_V01, QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req); if (ret < 0) { - ath11k_warn(ab, "qmi failed to send wlan config request, err = %d\n", + ath11k_warn(ab, "failed to send wlan config request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { - ath11k_warn(ab, "qmi failed wlan config request, err = %d\n", ret); + ath11k_warn(ab, "failed to wait wlan config request: %d\n", ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { - ath11k_warn(ab, "qmi wlan config request failed, result: %d, err: %d\n", + ath11k_warn(ab, "wlan config request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; @@ -2370,9 +2392,11 @@ void ath11k_qmi_firmware_stop(struct ath11k_base *ab) { int ret; + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware stop\n"); + ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_OFF); if (ret < 0) { - ath11k_warn(ab, "qmi failed to send wlan mode off\n"); + ath11k_warn(ab, "qmi failed to send wlan mode off: %d\n", ret); return; } } @@ -2382,15 +2406,17 @@ int ath11k_qmi_firmware_start(struct ath11k_base *ab, { int ret; + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware start\n"); + ret = ath11k_qmi_wlanfw_wlan_cfg_send(ab); if (ret < 0) { - ath11k_warn(ab, "qmi failed to send wlan cfg:%d\n", ret); + ath11k_warn(ab, "qmi failed to send wlan cfg: %d\n", ret); return ret; } ret = ath11k_qmi_wlanfw_mode_send(ab, mode); if (ret < 0) { - ath11k_warn(ab, "qmi failed to send wlan fw mode:%d\n", ret); + ath11k_warn(ab, "qmi failed to send wlan fw mode: %d\n", ret); return ret; } @@ -2404,7 +2430,7 @@ static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab) ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_COLD_BOOT); if (ret < 0) { - ath11k_warn(ab, "qmi failed to send wlan fw mode:%d\n", ret); + ath11k_warn(ab, "qmi failed to send wlan fw mode: %d\n", ret); return ret; } @@ -2414,7 +2440,7 @@ static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab) (ab->qmi.cal_done == 1), ATH11K_COLD_BOOT_FW_RESET_DELAY); if (timeout <= 0) { - ath11k_warn(ab, "Coldboot Calibration failed - wait ended\n"); + ath11k_warn(ab, "coldboot calibration timed out\n"); return 0; } @@ -2453,13 +2479,14 @@ static int ath11k_qmi_event_server_arrive(struct ath11k_qmi *qmi) ret = ath11k_qmi_fw_ind_register_send(ab); if (ret < 0) { - ath11k_warn(ab, "qmi failed to send FW indication QMI:%d\n", ret); + ath11k_warn(ab, "failed to send qmi firmware indication: %d\n", + ret); return ret; } ret = ath11k_qmi_host_cap_send(ab); if (ret < 0) { - ath11k_warn(ab, "qmi failed to send host cap QMI:%d\n", ret); + ath11k_warn(ab, "failed to send qmi host cap: %d\n", ret); return ret; } @@ -2473,7 +2500,7 @@ static int ath11k_qmi_event_mem_request(struct ath11k_qmi *qmi) ret = ath11k_qmi_respond_fw_mem_request(ab); if (ret < 0) { - ath11k_warn(ab, "qmi failed to respond fw mem req:%d\n", ret); + ath11k_warn(ab, "qmi failed to respond fw mem req: %d\n", ret); return ret; } @@ -2487,7 +2514,8 @@ static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi) ret = ath11k_qmi_request_target_cap(ab); if (ret < 0) { - ath11k_warn(ab, "qmi failed to req target capabilities:%d\n", ret); + ath11k_warn(ab, "failed to request qmi target capabilities: %d\n", + ret); return ret; } @@ -2496,13 +2524,13 @@ static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi) else ret = ath11k_qmi_load_bdf_qmi(ab); if (ret < 0) { - ath11k_warn(ab, "qmi failed to load board data file:%d\n", ret); + ath11k_warn(ab, "failed to load board data file: %d\n", ret); return ret; } ret = ath11k_qmi_wlanfw_m3_info_send(ab); if (ret < 0) { - ath11k_warn(ab, "qmi failed to send m3 info req:%d\n", ret); + ath11k_warn(ab, "failed to send qmi m3 info req: %d\n", ret); return ret; } @@ -2523,7 +2551,7 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl, if (msg->mem_seg_len == 0 || msg->mem_seg_len > ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01) - ath11k_warn(ab, "Invalid memory segment length: %u\n", + ath11k_warn(ab, "invalid memory segment length: %u\n", msg->mem_seg_len); ab->qmi.mem_seg_count = msg->mem_seg_len; @@ -2538,14 +2566,14 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl, if (ab->bus_params.fixed_mem_region) { ret = ath11k_qmi_assign_target_mem_chunk(ab); if (ret) { - ath11k_warn(ab, "qmi failed to assign target memory: %d\n", + ath11k_warn(ab, "failed to assign qmi target memory: %d\n", ret); return; } } else { ret = ath11k_qmi_alloc_target_mem_chunk(ab); if (ret) { - ath11k_warn(ab, "qmi failed to alloc target memory: %d\n", + ath11k_warn(ab, "failed to allocate qmi target memory: %d\n", ret); return; } @@ -2639,7 +2667,7 @@ static int ath11k_qmi_ops_new_server(struct qmi_handle *qmi_hdl, ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)sq, sizeof(*sq), 0); if (ret) { - ath11k_warn(ab, "qmi failed to connect to remote service %d\n", ret); + ath11k_warn(ab, "failed to connect to qmi remote service: %d\n", ret); return ret; } @@ -2725,7 +2753,7 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work) case ATH11K_QMI_EVENT_COLD_BOOT_CAL_DONE: break; default: - ath11k_warn(ab, "invalid event type: %d", event->type); + ath11k_warn(ab, "invalid qmi event type: %d", event->type); break; } kfree(event); @@ -2746,7 +2774,7 @@ int ath11k_qmi_init_service(struct ath11k_base *ab) ret = qmi_handle_init(&ab->qmi.handle, ATH11K_QMI_RESP_LEN_MAX, &ath11k_qmi_ops, ath11k_qmi_msg_handlers); if (ret < 0) { - ath11k_warn(ab, "failed to initialize qmi handle\n"); + ath11k_warn(ab, "failed to initialize qmi handle: %d\n", ret); return ret; } @@ -2765,7 +2793,7 @@ int ath11k_qmi_init_service(struct ath11k_base *ab) ATH11K_QMI_WLFW_SERVICE_VERS_V01, ab->qmi.service_ins_id); if (ret < 0) { - ath11k_warn(ab, "failed to add qmi lookup\n"); + ath11k_warn(ab, "failed to add qmi lookup: %d\n", ret); destroy_workqueue(ab->qmi.event_wq); return ret; } diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h index 7bad374cc23a..3d5930330703 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.h +++ b/drivers/net/wireless/ath/ath11k/qmi.h @@ -21,11 +21,13 @@ #define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01 0x02 #define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390 0x01 #define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074 0x02 +#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9074 0x07 #define ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 32 #define ATH11K_QMI_RESP_LEN_MAX 8192 -#define ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 32 +#define ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 52 #define ATH11K_QMI_CALDB_SIZE 0x480000 #define ATH11K_QMI_BDF_EXT_STR_LENGTH 0x20 +#define ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT 3 #define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035 #define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037 @@ -141,6 +143,7 @@ struct ath11k_qmi { #define QMI_IPQ8074_FW_MEM_MODE 0xFF #define HOST_DDR_REGION_TYPE 0x1 #define BDF_MEM_REGION_TYPE 0x2 +#define M3_DUMP_REGION_TYPE 0x3 #define CALDB_MEM_REGION_TYPE 0x4 struct qmi_wlanfw_host_cap_req_msg_v01 { @@ -216,8 +219,8 @@ struct qmi_wlanfw_ind_register_resp_msg_v01 { u64 fw_status; }; -#define QMI_WLANFW_REQUEST_MEM_IND_MSG_V01_MAX_LEN 1124 -#define QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN 548 +#define QMI_WLANFW_REQUEST_MEM_IND_MSG_V01_MAX_LEN 1824 +#define QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN 888 #define QMI_WLANFW_RESPOND_MEM_RESP_MSG_V01_MAX_LEN 7 #define QMI_WLANFW_REQUEST_MEM_IND_V01 0x0035 #define QMI_WLANFW_RESPOND_MEM_REQ_V01 0x0036 diff --git a/drivers/net/wireless/ath/ath11k/rx_desc.h b/drivers/net/wireless/ath/ath11k/rx_desc.h index 86494da1069a..0cdb4a1f816e 100644 --- a/drivers/net/wireless/ath/ath11k/rx_desc.h +++ b/drivers/net/wireless/ath/ath11k/rx_desc.h @@ -414,7 +414,7 @@ struct rx_attention { #define RX_MPDU_START_RAW_MPDU BIT(0) -struct rx_mpdu_start { +struct rx_mpdu_start_ipq8074 { __le16 info0; __le16 phy_ppdu_id; __le16 ast_index; @@ -440,6 +440,112 @@ struct rx_mpdu_start { __le32 raw; } __packed; +#define RX_MPDU_START_INFO7_REO_DEST_IND GENMASK(4, 0) +#define RX_MPDU_START_INFO7_LMAC_PEER_ID_MSB GENMASK(6, 5) +#define RX_MPDU_START_INFO7_FLOW_ID_TOEPLITZ BIT(7) +#define RX_MPDU_START_INFO7_PKT_SEL_FP_UCAST_DATA BIT(8) +#define RX_MPDU_START_INFO7_PKT_SEL_FP_MCAST_DATA BIT(9) +#define RX_MPDU_START_INFO7_PKT_SEL_FP_CTRL_BAR BIT(10) +#define RX_MPDU_START_INFO7_RXDMA0_SRC_RING_SEL GENMASK(12, 11) +#define RX_MPDU_START_INFO7_RXDMA0_DST_RING_SEL GENMASK(14, 13) + +#define RX_MPDU_START_INFO8_REO_QUEUE_DESC_HI GENMASK(7, 0) +#define RX_MPDU_START_INFO8_RECV_QUEUE_NUM GENMASK(23, 8) +#define RX_MPDU_START_INFO8_PRE_DELIM_ERR_WARN BIT(24) +#define RX_MPDU_START_INFO8_FIRST_DELIM_ERR BIT(25) + +#define RX_MPDU_START_INFO9_EPD_EN BIT(0) +#define RX_MPDU_START_INFO9_ALL_FRAME_ENCPD BIT(1) +#define RX_MPDU_START_INFO9_ENC_TYPE GENMASK(5, 2) +#define RX_MPDU_START_INFO9_VAR_WEP_KEY_WIDTH GENMASK(7, 6) +#define RX_MPDU_START_INFO9_MESH_STA GENMASK(9, 8) +#define RX_MPDU_START_INFO9_BSSID_HIT BIT(10) +#define RX_MPDU_START_INFO9_BSSID_NUM GENMASK(14, 11) +#define RX_MPDU_START_INFO9_TID GENMASK(18, 15) + +#define RX_MPDU_START_INFO10_RXPCU_MPDU_FLTR GENMASK(1, 0) +#define RX_MPDU_START_INFO10_SW_FRAME_GRP_ID GENMASK(8, 2) +#define RX_MPDU_START_INFO10_NDP_FRAME BIT(9) +#define RX_MPDU_START_INFO10_PHY_ERR BIT(10) +#define RX_MPDU_START_INFO10_PHY_ERR_MPDU_HDR BIT(11) +#define RX_MPDU_START_INFO10_PROTO_VER_ERR BIT(12) +#define RX_MPDU_START_INFO10_AST_LOOKUP_VALID BIT(13) + +#define RX_MPDU_START_INFO11_MPDU_FCTRL_VALID BIT(0) +#define RX_MPDU_START_INFO11_MPDU_DUR_VALID BIT(1) +#define RX_MPDU_START_INFO11_MAC_ADDR1_VALID BIT(2) +#define RX_MPDU_START_INFO11_MAC_ADDR2_VALID BIT(3) +#define RX_MPDU_START_INFO11_MAC_ADDR3_VALID BIT(4) +#define RX_MPDU_START_INFO11_MAC_ADDR4_VALID BIT(5) +#define RX_MPDU_START_INFO11_MPDU_SEQ_CTRL_VALID BIT(6) +#define RX_MPDU_START_INFO11_MPDU_QOS_CTRL_VALID BIT(7) +#define RX_MPDU_START_INFO11_MPDU_HT_CTRL_VALID BIT(8) +#define RX_MPDU_START_INFO11_ENCRYPT_INFO_VALID BIT(9) +#define RX_MPDU_START_INFO11_MPDU_FRAG_NUMBER GENMASK(13, 10) +#define RX_MPDU_START_INFO11_MORE_FRAG_FLAG BIT(14) +#define RX_MPDU_START_INFO11_FROM_DS BIT(16) +#define RX_MPDU_START_INFO11_TO_DS BIT(17) +#define RX_MPDU_START_INFO11_ENCRYPTED BIT(18) +#define RX_MPDU_START_INFO11_MPDU_RETRY BIT(19) +#define RX_MPDU_START_INFO11_MPDU_SEQ_NUM GENMASK(31, 20) + +#define RX_MPDU_START_INFO12_KEY_ID GENMASK(7, 0) +#define RX_MPDU_START_INFO12_NEW_PEER_ENTRY BIT(8) +#define RX_MPDU_START_INFO12_DECRYPT_NEEDED BIT(9) +#define RX_MPDU_START_INFO12_DECAP_TYPE GENMASK(11, 10) +#define RX_MPDU_START_INFO12_VLAN_TAG_C_PADDING BIT(12) +#define RX_MPDU_START_INFO12_VLAN_TAG_S_PADDING BIT(13) +#define RX_MPDU_START_INFO12_STRIP_VLAN_TAG_C BIT(14) +#define RX_MPDU_START_INFO12_STRIP_VLAN_TAG_S BIT(15) +#define RX_MPDU_START_INFO12_PRE_DELIM_COUNT GENMASK(27, 16) +#define RX_MPDU_START_INFO12_AMPDU_FLAG BIT(28) +#define RX_MPDU_START_INFO12_BAR_FRAME BIT(29) +#define RX_MPDU_START_INFO12_RAW_MPDU BIT(30) + +#define RX_MPDU_START_INFO13_MPDU_LEN GENMASK(13, 0) +#define RX_MPDU_START_INFO13_FIRST_MPDU BIT(14) +#define RX_MPDU_START_INFO13_MCAST_BCAST BIT(15) +#define RX_MPDU_START_INFO13_AST_IDX_NOT_FOUND BIT(16) +#define RX_MPDU_START_INFO13_AST_IDX_TIMEOUT BIT(17) +#define RX_MPDU_START_INFO13_POWER_MGMT BIT(18) +#define RX_MPDU_START_INFO13_NON_QOS BIT(19) +#define RX_MPDU_START_INFO13_NULL_DATA BIT(20) +#define RX_MPDU_START_INFO13_MGMT_TYPE BIT(21) +#define RX_MPDU_START_INFO13_CTRL_TYPE BIT(22) +#define RX_MPDU_START_INFO13_MORE_DATA BIT(23) +#define RX_MPDU_START_INFO13_EOSP BIT(24) +#define RX_MPDU_START_INFO13_FRAGMENT BIT(25) +#define RX_MPDU_START_INFO13_ORDER BIT(26) +#define RX_MPDU_START_INFO13_UAPSD_TRIGGER BIT(27) +#define RX_MPDU_START_INFO13_ENCRYPT_REQUIRED BIT(28) +#define RX_MPDU_START_INFO13_DIRECTED BIT(29) +#define RX_MPDU_START_INFO13_AMSDU_PRESENT BIT(30) + +struct rx_mpdu_start_qcn9074 { + __le32 info7; + __le32 reo_queue_desc_lo; + __le32 info8; + __le32 pn[4]; + __le32 info9; + __le32 peer_meta_data; + __le16 info10; + __le16 phy_ppdu_id; + __le16 ast_index; + __le16 sw_peer_id; + __le32 info11; + __le32 info12; + __le32 info13; + __le16 frame_ctrl; + __le16 duration; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; + __le16 seq_ctrl; + u8 addr4[ETH_ALEN]; + __le16 qos_ctrl; + __le32 ht_ctrl; +} __packed; + /* rx_mpdu_start * * rxpcu_mpdu_filter_in_category @@ -672,7 +778,19 @@ enum rx_msdu_start_reception_type { #define RX_MSDU_START_INFO3_RECEPTION_TYPE GENMASK(23, 21) #define RX_MSDU_START_INFO3_MIMO_SS_BITMAP GENMASK(31, 24) -struct rx_msdu_start { +struct rx_msdu_start_ipq8074 { + __le16 info0; + __le16 phy_ppdu_id; + __le32 info1; + __le32 info2; + __le32 toeplitz_hash; + __le32 flow_id_toeplitz; + __le32 info3; + __le32 ppdu_start_timestamp; + __le32 phy_meta_data; +} __packed; + +struct rx_msdu_start_qcn9074 { __le16 info0; __le16 phy_ppdu_id; __le32 info1; @@ -682,6 +800,8 @@ struct rx_msdu_start { __le32 info3; __le32 ppdu_start_timestamp; __le32 phy_meta_data; + __le16 vlan_ctag_c1; + __le16 vlan_stag_c1; } __packed; /* rx_msdu_start @@ -894,7 +1014,7 @@ struct rx_msdu_start { #define RX_MSDU_END_INFO5_REO_DEST_IND GENMASK(5, 1) #define RX_MSDU_END_INFO5_FLOW_IDX GENMASK(25, 6) -struct rx_msdu_end { +struct rx_msdu_end_ipq8074 { __le16 info0; __le16 phy_ppdu_id; __le16 ip_hdr_cksum; @@ -917,6 +1037,58 @@ struct rx_msdu_end { __le16 sa_sw_peer_id; } __packed; +#define RX_MSDU_END_MPDU_LENGTH_INFO GENMASK(13, 0) + +#define RX_MSDU_END_INFO2_DA_OFFSET GENMASK(5, 0) +#define RX_MSDU_END_INFO2_SA_OFFSET GENMASK(11, 6) +#define RX_MSDU_END_INFO2_DA_OFFSET_VALID BIT(12) +#define RX_MSDU_END_INFO2_SA_OFFSET_VALID BIT(13) +#define RX_MSDU_END_INFO2_L3_TYPE GENMASK(31, 16) + +#define RX_MSDU_END_INFO4_SA_IDX_TIMEOUT BIT(0) +#define RX_MSDU_END_INFO4_DA_IDX_TIMEOUT BIT(1) +#define RX_MSDU_END_INFO4_MSDU_LIMIT_ERR BIT(2) +#define RX_MSDU_END_INFO4_FLOW_IDX_TIMEOUT BIT(3) +#define RX_MSDU_END_INFO4_FLOW_IDX_INVALID BIT(4) +#define RX_MSDU_END_INFO4_WIFI_PARSER_ERR BIT(5) +#define RX_MSDU_END_INFO4_AMSDU_PARSER_ERR BIT(6) +#define RX_MSDU_END_INFO4_SA_IS_VALID BIT(7) +#define RX_MSDU_END_INFO4_DA_IS_VALID BIT(8) +#define RX_MSDU_END_INFO4_DA_IS_MCBC BIT(9) +#define RX_MSDU_END_INFO4_L3_HDR_PADDING GENMASK(11, 10) +#define RX_MSDU_END_INFO4_FIRST_MSDU BIT(12) +#define RX_MSDU_END_INFO4_LAST_MSDU BIT(13) + +#define RX_MSDU_END_INFO6_AGGR_COUNT GENMASK(7, 0) +#define RX_MSDU_END_INFO6_FLOW_AGGR_CONTN BIT(8) +#define RX_MSDU_END_INFO6_FISA_TIMEOUT BIT(9) + +struct rx_msdu_end_qcn9074 { + __le16 info0; + __le16 phy_ppdu_id; + __le16 ip_hdr_cksum; + __le16 mpdu_length_info; + __le32 info1; + __le32 rule_indication[2]; + __le32 info2; + __le32 ipv6_options_crc; + __le32 tcp_seq_num; + __le32 tcp_ack_num; + __le16 info3; + __le16 window_size; + __le16 tcp_udp_cksum; + __le16 info4; + __le16 sa_idx; + __le16 da_idx; + __le32 info5; + __le32 fse_metadata; + __le16 cce_metadata; + __le16 sa_sw_peer_id; + __le32 info6; + __le16 cum_l4_cksum; + __le16 cum_ip_length; +} __packed; + /* rx_msdu_end * * rxpcu_mpdu_filter_in_category @@ -1190,16 +1362,16 @@ struct rx_mpdu_end { #define HAL_RX_DESC_HDR_STATUS_LEN 120 -struct hal_rx_desc { +struct hal_rx_desc_ipq8074 { __le32 msdu_end_tag; - struct rx_msdu_end msdu_end; + struct rx_msdu_end_ipq8074 msdu_end; __le32 rx_attn_tag; struct rx_attention attention; __le32 msdu_start_tag; - struct rx_msdu_start msdu_start; + struct rx_msdu_start_ipq8074 msdu_start; u8 rx_padding0[HAL_RX_DESC_PADDING0_BYTES]; __le32 mpdu_start_tag; - struct rx_mpdu_start mpdu_start; + struct rx_mpdu_start_ipq8074 mpdu_start; __le32 mpdu_end_tag; struct rx_mpdu_end mpdu_end; u8 rx_padding1[HAL_RX_DESC_PADDING1_BYTES]; @@ -1209,6 +1381,32 @@ struct hal_rx_desc { u8 msdu_payload[0]; } __packed; +struct hal_rx_desc_qcn9074 { + __le32 msdu_end_tag; + struct rx_msdu_end_qcn9074 msdu_end; + __le32 rx_attn_tag; + struct rx_attention attention; + __le32 msdu_start_tag; + struct rx_msdu_start_qcn9074 msdu_start; + u8 rx_padding0[HAL_RX_DESC_PADDING0_BYTES]; + __le32 mpdu_start_tag; + struct rx_mpdu_start_qcn9074 mpdu_start; + __le32 mpdu_end_tag; + struct rx_mpdu_end mpdu_end; + u8 rx_padding1[HAL_RX_DESC_PADDING1_BYTES]; + __le32 hdr_status_tag; + __le32 phy_ppdu_id; + u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN]; + u8 msdu_payload[0]; +} __packed; + +struct hal_rx_desc { + union { + struct hal_rx_desc_ipq8074 ipq8074; + struct hal_rx_desc_qcn9074 qcn9074; + } u; +} __packed; + #define HAL_RX_RU_ALLOC_TYPE_MAX 6 #define RU_26 1 #define RU_52 2 diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index cccfd3bd4d27..5ca2d80679b6 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -5417,31 +5417,6 @@ int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb, return 0; } -static int -ath11k_pull_pdev_temp_ev(struct ath11k_base *ab, u8 *evt_buf, - u32 len, const struct wmi_pdev_temperature_event *ev) -{ - const void **tb; - int ret; - - tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC); - if (IS_ERR(tb)) { - ret = PTR_ERR(tb); - ath11k_warn(ab, "failed to parse tlv: %d\n", ret); - return ret; - } - - ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT]; - if (!ev) { - ath11k_warn(ab, "failed to fetch pdev temp ev"); - kfree(tb); - return -EPROTO; - } - - kfree(tb); - return 0; -} - size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head) { struct ath11k_fw_stats_vdev *i; @@ -6196,10 +6171,8 @@ static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb) } } - /* TODO: Pending handle beacon implementation - *if (ieee80211_is_beacon(hdr->frame_control)) - * ath11k_mac_handle_beacon(ar, skb); - */ + if (ieee80211_is_beacon(hdr->frame_control)) + ath11k_mac_handle_beacon(ar, skb); ath11k_dbg(ab, ATH11K_DBG_MGMT, "event mgmt rx skb %pK len %d ftype %02x stype %02x\n", @@ -6418,10 +6391,7 @@ static void ath11k_roam_event(struct ath11k_base *ab, struct sk_buff *skb) switch (roam_ev.reason) { case WMI_ROAM_REASON_BEACON_MISS: - /* TODO: Pending beacon miss and connection_loss_work - * implementation - * ath11k_mac_handle_beacon_miss(ar, vdev_id); - */ + ath11k_mac_handle_beacon_miss(ar, roam_ev.vdev_id); break; case WMI_ROAM_REASON_BETTER_AP: case WMI_ROAM_REASON_LOW_RSSI: @@ -6849,23 +6819,37 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k *ar; - struct wmi_pdev_temperature_event ev = {0}; + const void **tb; + const struct wmi_pdev_temperature_event *ev; + int ret; - if (ath11k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) { - ath11k_warn(ab, "failed to extract pdev temperature event"); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath11k_warn(ab, "failed to parse tlv: %d\n", ret); + return; + } + + ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT]; + if (!ev) { + ath11k_warn(ab, "failed to fetch pdev temp ev"); + kfree(tb); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, - "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id); + "pdev temperature ev temp %d pdev_id %d\n", ev->temp, ev->pdev_id); - ar = ath11k_mac_get_ar_by_pdev_id(ab, ev.pdev_id); + ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id); if (!ar) { - ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id); + ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id); + kfree(tb); return; } - ath11k_thermal_event_temperature(ar, ev.temp); + ath11k_thermal_event_temperature(ar, ev->temp); + + kfree(tb); } static void ath11k_fils_discovery_event(struct ath11k_base *ab, diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index db0c6fa9c9dc..ff61ae34ecdf 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -246,7 +246,7 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset) if (unlikely(r)) { ath_dbg(common, WMI, "REGISTER READ FAILED: (0x%04x, %d)\n", reg_offset, r); - return -EIO; + return -1; } return be32_to_cpu(val); diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 5abc2a5526ec..2ca3b86714a9 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -286,7 +286,7 @@ static bool ath9k_hw_read_revisions(struct ath_hw *ah) srev = REG_READ(ah, AR_SREV); - if (srev == -EIO) { + if (srev == -1) { ath_err(ath9k_hw_common(ah), "Failed to read SREV register"); return false; diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 01f9c26f9bf3..e9a36dd7144f 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -617,7 +617,6 @@ static int ath9k_of_init(struct ath_softc *sc) struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); enum ath_bus_type bus_type = common->bus_ops->ath_bus_type; - const char *mac; char eeprom_name[100]; int ret; @@ -640,9 +639,7 @@ static int ath9k_of_init(struct ath_softc *sc) ah->ah_flags |= AH_NO_EEP_SWAP; } - mac = of_get_mac_address(np); - if (!IS_ERR(mac)) - ether_addr_copy(common->macaddr, mac); + of_get_mac_address(np, common->macaddr); return 0; } diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h index 0d38100d6e4f..84a8ce0784b1 100644 --- a/drivers/net/wireless/ath/carl9170/carl9170.h +++ b/drivers/net/wireless/ath/carl9170/carl9170.h @@ -631,14 +631,9 @@ static inline u16 carl9170_get_seq(struct sk_buff *skb) return get_seq_h(carl9170_get_hdr(skb)); } -static inline u16 get_tid_h(struct ieee80211_hdr *hdr) -{ - return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK; -} - static inline u16 carl9170_get_tid(struct sk_buff *skb) { - return get_tid_h(carl9170_get_hdr(skb)); + return ieee80211_get_tid(carl9170_get_hdr(skb)); } static inline struct ieee80211_vif * diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index 6b8446ff48c8..88444fe6d1c6 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -394,7 +394,7 @@ static void carl9170_tx_status_process_ampdu(struct ar9170 *ar, if (unlikely(!sta)) goto out_rcu; - tid = get_tid_h(hdr); + tid = ieee80211_get_tid(hdr); sta_info = (void *) sta->drv_priv; tid_info = rcu_dereference(sta_info->agg[tid]); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 823ec6e78a22..02ad44997e87 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1456,7 +1456,7 @@ static void wil_link_stats_store_basic(struct wil6210_vif *vif, u8 cid = basic->cid; struct wil_sta_info *sta; - if (cid < 0 || cid >= wil->max_assoc_sta) { + if (cid >= wil->max_assoc_sta) { wil_err(wil, "invalid cid %d\n", cid); return; } diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index 150a366e8f62..17bcec5f3ff7 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -4053,7 +4053,7 @@ static void b43_update_basic_rates(struct b43_wldev *dev, u32 brates) { struct ieee80211_supported_band *sband = dev->wl->hw->wiphy->bands[b43_current_band(dev->wl)]; - struct ieee80211_rate *rate; + const struct ieee80211_rate *rate; int i; u16 basic, direct, offset, basic_offset, rateptr; diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c index 7692a2618c97..f64ebff68308 100644 --- a/drivers/net/wireless/broadcom/b43legacy/main.c +++ b/drivers/net/wireless/broadcom/b43legacy/main.c @@ -2762,7 +2762,7 @@ static void b43legacy_update_basic_rates(struct b43legacy_wldev *dev, u32 brates { struct ieee80211_supported_band *sband = dev->wl->hw->wiphy->bands[NL80211_BAND_2GHZ]; - struct ieee80211_rate *rate; + const struct ieee80211_rate *rate; int i; u16 basic, direct, offset, basic_offset, rateptr; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index ea78fe527c5d..838b09b23abf 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -151,7 +151,7 @@ static void _brcmf_set_multicast_list(struct work_struct *work) /* Send down the multicast list first. */ cnt = netdev_mc_count(ndev); buflen = sizeof(cnt) + (cnt * ETH_ALEN); - buf = kmalloc(buflen, GFP_ATOMIC); + buf = kmalloc(buflen, GFP_KERNEL); if (!buf) return; bufp = buf; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h index 4146faeed344..44ba6f389fa9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h @@ -112,7 +112,6 @@ do { \ extern int brcmf_msg_level; -struct brcmf_bus; struct brcmf_pub; #ifdef DEBUG struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.h index ee273e3bb101..e000ef78928c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.h @@ -28,7 +28,7 @@ struct brcmf_usbdev { int ntxq, nrxq, rxsize; u32 bus_mtu; int devid; - int chiprev; /* chip revsion number */ + int chiprev; /* chip revision number */ }; /* IO Request Block (IRB) */ diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 60db38c38960..fd37d4d2983b 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -3817,6 +3817,68 @@ static inline void set_auth_type(struct airo_info *local, int auth_type) local->last_auth = auth_type; } +static int noinline_for_stack airo_readconfig(struct airo_info *ai, u8 *mac, int lock) +{ + int i, status; + /* large variables, so don't inline this function, + * maybe change to kmalloc + */ + tdsRssiRid rssi_rid; + CapabilityRid cap_rid; + + kfree(ai->SSID); + ai->SSID = NULL; + // general configuration (read/modify/write) + status = readConfigRid(ai, lock); + if (status != SUCCESS) return ERROR; + + status = readCapabilityRid(ai, &cap_rid, lock); + if (status != SUCCESS) return ERROR; + + status = PC4500_readrid(ai, RID_RSSI, &rssi_rid, sizeof(rssi_rid), lock); + if (status == SUCCESS) { + if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL) + memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */ + } + else { + kfree(ai->rssi); + ai->rssi = NULL; + if (cap_rid.softCap & cpu_to_le16(8)) + ai->config.rmode |= RXMODE_NORMALIZED_RSSI; + else + airo_print_warn(ai->dev->name, "unknown received signal " + "level scale"); + } + ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS; + set_auth_type(ai, AUTH_OPEN); + ai->config.modulation = MOD_CCK; + + if (le16_to_cpu(cap_rid.len) >= sizeof(cap_rid) && + (cap_rid.extSoftCap & cpu_to_le16(1)) && + micsetup(ai) == SUCCESS) { + ai->config.opmode |= MODE_MIC; + set_bit(FLAG_MIC_CAPABLE, &ai->flags); + } + + /* Save off the MAC */ + for (i = 0; i < ETH_ALEN; i++) { + mac[i] = ai->config.macAddr[i]; + } + + /* Check to see if there are any insmod configured + rates to add */ + if (rates[0]) { + memset(ai->config.rates, 0, sizeof(ai->config.rates)); + for (i = 0; i < 8 && rates[i]; i++) { + ai->config.rates[i] = rates[i]; + } + } + set_bit (FLAG_COMMIT, &ai->flags); + + return SUCCESS; +} + + static u16 setup_card(struct airo_info *ai, u8 *mac, int lock) { Cmd cmd; @@ -3863,58 +3925,9 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock) if (lock) up(&ai->sem); if (ai->config.len == 0) { - int i; - tdsRssiRid rssi_rid; - CapabilityRid cap_rid; - - kfree(ai->SSID); - ai->SSID = NULL; - // general configuration (read/modify/write) - status = readConfigRid(ai, lock); - if (status != SUCCESS) return ERROR; - - status = readCapabilityRid(ai, &cap_rid, lock); - if (status != SUCCESS) return ERROR; - - status = PC4500_readrid(ai, RID_RSSI,&rssi_rid, sizeof(rssi_rid), lock); - if (status == SUCCESS) { - if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL) - memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */ - } - else { - kfree(ai->rssi); - ai->rssi = NULL; - if (cap_rid.softCap & cpu_to_le16(8)) - ai->config.rmode |= RXMODE_NORMALIZED_RSSI; - else - airo_print_warn(ai->dev->name, "unknown received signal " - "level scale"); - } - ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS; - set_auth_type(ai, AUTH_OPEN); - ai->config.modulation = MOD_CCK; - - if (le16_to_cpu(cap_rid.len) >= sizeof(cap_rid) && - (cap_rid.extSoftCap & cpu_to_le16(1)) && - micsetup(ai) == SUCCESS) { - ai->config.opmode |= MODE_MIC; - set_bit(FLAG_MIC_CAPABLE, &ai->flags); - } - - /* Save off the MAC */ - for (i = 0; i < ETH_ALEN; i++) { - mac[i] = ai->config.macAddr[i]; - } - - /* Check to see if there are any insmod configured - rates to add */ - if (rates[0]) { - memset(ai->config.rates, 0, sizeof(ai->config.rates)); - for (i = 0; i < 8 && rates[i]; i++) { - ai->config.rates[i] = rates[i]; - } - } - set_bit (FLAG_COMMIT, &ai->flags); + status = airo_readconfig(ai, mac, lock); + if (status != SUCCESS) + return ERROR; } /* Setup the SSIDs if present */ diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c index a0cf78c418ac..903de34028ef 100644 --- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c +++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c @@ -633,8 +633,10 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee, } if (ext->alg != IW_ENCODE_ALG_NONE) { - memcpy(sec.keys[idx], ext->key, ext->key_len); - sec.key_sizes[idx] = ext->key_len; + int key_len = clamp_val(ext->key_len, 0, SCM_KEY_LEN); + + memcpy(sec.keys[idx], ext->key, key_len); + sec.key_sizes[idx] = key_len; sec.flags |= (1 << idx); if (ext->alg == IW_ENCODE_ALG_WEP) { sec.encode_alg[idx] = SEC_ALG_WEP; diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index 4ca8212d4fa4..6ff2674f8466 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -751,9 +751,7 @@ il3945_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb) static void il3945_hdl_add_sta(struct il_priv *il, struct il_rx_buf *rxb) { -#ifdef CONFIG_IWLEGACY_DEBUG struct il_rx_pkt *pkt = rxb_addr(rxb); -#endif D_RX("Received C_ADD_STA: 0x%02X\n", pkt->u.status); } diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c index 0651a6a416d1..219fed91cac5 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.c +++ b/drivers/net/wireless/intel/iwlegacy/common.c @@ -1430,10 +1430,8 @@ static void il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb) { -#ifdef CONFIG_IWLEGACY_DEBUG struct il_rx_pkt *pkt = rxb_addr(rxb); struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw; -#endif D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", scan_notif->scanned_channels, scan_notif->tsf_low, diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h index ea1b1bb7ddcb..40877ef1fbf2 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.h +++ b/drivers/net/wireless/intel/iwlegacy/common.h @@ -2937,7 +2937,7 @@ do { \ } while (0) #else -#define IL_DBG(level, fmt, args...) +#define IL_DBG(level, fmt, args...) no_printk(fmt, ##args) static inline void il_print_hex_dump(struct il_priv *il, int level, const void *p, u32 len) { diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index 0a0e25a3c681..c2315dea9a23 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -9,7 +9,7 @@ #include "iwl-prph.h" /* Highest firmware API version supported */ -#define IWL_22000_UCODE_API_MAX 62 +#define IWL_22000_UCODE_API_MAX 63 /* Lowest firmware API version supported */ #define IWL_22000_UCODE_API_MIN 39 @@ -48,6 +48,10 @@ #define IWL_MA_A_GF4_A_FW_PRE "iwlwifi-ma-a0-gf4-a0-" #define IWL_MA_A_MR_A_FW_PRE "iwlwifi-ma-a0-mr-a0-" #define IWL_SNJ_A_MR_A_FW_PRE "iwlwifi-SoSnj-a0-mr-a0-" +#define IWL_BZ_A_HR_B_FW_PRE "iwlwifi-bz-a0-hr-b0-" +#define IWL_BZ_A_GF_A_FW_PRE "iwlwifi-bz-a0-gf-a0-" +#define IWL_BZ_A_GF4_A_FW_PRE "iwlwifi-bz-a0-gf4-a0-" +#define IWL_BZ_A_MR_A_FW_PRE "iwlwifi-bz-a0-mr-a0-" #define IWL_QU_B_HR_B_MODULE_FIRMWARE(api) \ IWL_QU_B_HR_B_FW_PRE __stringify(api) ".ucode" @@ -91,6 +95,14 @@ IWL_MA_A_MR_A_FW_PRE __stringify(api) ".ucode" #define IWL_SNJ_A_MR_A_MODULE_FIRMWARE(api) \ IWL_SNJ_A_MR_A_FW_PRE __stringify(api) ".ucode" +#define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \ + IWL_BZ_A_HR_B_FW_PRE __stringify(api) ".ucode" +#define IWL_BZ_A_GF_A_MODULE_FIRMWARE(api) \ + IWL_BZ_A_GF_A_FW_PRE __stringify(api) ".ucode" +#define IWL_BZ_A_GF4_A_MODULE_FIRMWARE(api) \ + IWL_BZ_A_GF4_A_FW_PRE __stringify(api) ".ucode" +#define IWL_BZ_A_MR_A_MODULE_FIRMWARE(api) \ + IWL_BZ_A_MR_A_FW_PRE __stringify(api) ".ucode" static const struct iwl_base_params iwl_22000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, @@ -357,13 +369,27 @@ const struct iwl_cfg_trans_params iwl_ma_trans_cfg = { .umac_prph_offset = 0x300000 }; +const struct iwl_cfg_trans_params iwl_bz_trans_cfg = { + .device_family = IWL_DEVICE_FAMILY_AX210, + .base_params = &iwl_ax210_base_params, + .mq_rx_supported = true, + .use_tfh = true, + .rf_id = true, + .gen2 = true, + .integrated = true, + .umac_prph_offset = 0x300000, + .xtal_latency = 12000, + .low_latency_xtal = true, + .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, +}; + const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101"; const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz"; const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz"; const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203"; -const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6 AX211 160MHz"; -const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6 AX411 160MHz"; -const char iwl_ma_name[] = "Intel(R) Wi-Fi 6"; +const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz"; +const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz"; +const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz"; const char iwl_ax200_killer_1650w_name[] = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)"; @@ -373,6 +399,10 @@ const char iwl_ax201_killer_1650s_name[] = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)"; const char iwl_ax201_killer_1650i_name[] = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)"; +const char iwl_ax210_killer_1675w_name[] = + "Killer(R) Wi-Fi 6E AX1675w 160MHz Wireless Network Adapter (210D2W)"; +const char iwl_ax210_killer_1675x_name[] = + "Killer(R) Wi-Fi 6E AX1675x 160MHz Wireless Network Adapter (210NGW)"; const struct iwl_cfg iwl_qu_b0_hr1_b0 = { .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, @@ -578,7 +608,7 @@ const struct iwl_cfg iwl_qnj_b0_hr_b0_cfg = { .num_rbds = IWL_NUM_RBDS_22000_HE, }; -const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = { +const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0 = { .name = "Intel(R) Wireless-AC 9560 160MHz", .fw_name_pre = IWL_SO_A_JF_B_FW_PRE, IWL_DEVICE_AX210, @@ -719,6 +749,34 @@ const struct iwl_cfg iwl_cfg_quz_a0_hr_b0 = { .num_rbds = IWL_NUM_RBDS_22000_HE, }; +const struct iwl_cfg iwl_cfg_bz_a0_hr_b0 = { + .fw_name_pre = IWL_BZ_A_HR_B_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_bz_a0_gf_a0 = { + .fw_name_pre = IWL_BZ_A_GF_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0 = { + .fw_name_pre = IWL_BZ_A_GF4_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_bz_a0_mr_a0 = { + .fw_name_pre = IWL_BZ_A_MR_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); @@ -740,3 +798,7 @@ MODULE_FIRMWARE(IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_SNJ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BZ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BZ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BZ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index c4164bf508e5..df1297358379 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -168,7 +168,7 @@ const char iwl9462_160_name[] = "Intel(R) Wireless-AC 9462 160MHz"; const char iwl9560_160_name[] = "Intel(R) Wireless-AC 9560 160MHz"; const char iwl9260_killer_1550_name[] = - "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)"; + "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW) 160MHz"; const char iwl9560_killer_1550i_name[] = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)"; const char iwl9560_killer_1550s_name[] = diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index 82a4f7e8ba54..e31bba836c6f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2019-2020 Intel Corporation + * Copyright (C) 2019-2021 Intel Corporation */ #include <linux/uuid.h> #include "iwl-drv.h" @@ -181,14 +181,13 @@ union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, /* * We need at least two packages, one for the revision and one * for the data itself. Also check that the revision is valid - * (i.e. it is an integer smaller than 2, as we currently support only - * 2 revisions). + * (i.e. it is an integer (each caller has to check by itself + * if the returned revision is supported)). */ if (data->type != ACPI_TYPE_PACKAGE || data->package.count < 2 || - data->package.elements[0].type != ACPI_TYPE_INTEGER || - data->package.elements[0].integer.value > 1) { - IWL_DEBUG_DEV_RADIO(dev, "Unsupported packages structure\n"); + data->package.elements[0].type != ACPI_TYPE_INTEGER) { + IWL_DEBUG_DEV_RADIO(dev, "Invalid packages structure\n"); return ERR_PTR(-EINVAL); } @@ -696,3 +695,70 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, return 0; } IWL_EXPORT_SYMBOL(iwl_sar_geo_init); + +static u32 iwl_acpi_eval_dsm_func(struct device *dev, enum iwl_dsm_funcs_rev_0 eval_func) +{ + union acpi_object *obj; + u32 ret; + + obj = iwl_acpi_get_dsm_object(dev, 0, + eval_func, NULL, + &iwl_guid); + + if (IS_ERR(obj)) { + IWL_DEBUG_DEV_RADIO(dev, + "ACPI: DSM func '%d': Got Error in obj = %ld\n", + eval_func, + PTR_ERR(obj)); + return 0; + } + + if (obj->type != ACPI_TYPE_INTEGER) { + IWL_DEBUG_DEV_RADIO(dev, + "ACPI: DSM func '%d' did not return a valid object, type=%d\n", + eval_func, + obj->type); + ret = 0; + goto out; + } + + ret = obj->integer.value; + IWL_DEBUG_DEV_RADIO(dev, + "ACPI: DSM method evaluated: func='%d', ret=%d\n", + eval_func, + ret); +out: + ACPI_FREE(obj); + return ret; +} + +__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) +{ + u32 ret; + __le32 config_bitmap = 0; + + /* + ** Evaluate func 'DSM_FUNC_ENABLE_INDONESIA_5G2' + */ + ret = iwl_acpi_eval_dsm_func(fwrt->dev, DSM_FUNC_ENABLE_INDONESIA_5G2); + + if (ret == DSM_VALUE_INDONESIA_ENABLE) + config_bitmap |= + cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK); + + /* + ** Evaluate func 'DSM_FUNC_DISABLE_SRD' + */ + ret = iwl_acpi_eval_dsm_func(fwrt->dev, DSM_FUNC_DISABLE_SRD); + + if (ret == DSM_VALUE_SRD_PASSIVE) + config_bitmap |= + cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK); + + else if (ret == DSM_VALUE_SRD_DISABLE) + config_bitmap |= + cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK); + + return config_bitmap; +} +IWL_EXPORT_SYMBOL(iwl_acpi_get_lari_config_bitmap); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index 030c50082568..d16e6ec08c9f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -53,8 +53,8 @@ #define ACPI_WGDS_TABLE_SIZE 3 -#define ACPI_PPAG_WIFI_DATA_SIZE ((IWL_NUM_CHAIN_LIMITS * \ - IWL_NUM_SUB_BANDS) + 2) +#define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \ + IWL_NUM_SUB_BANDS_V1) + 2) #define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((IWL_NUM_CHAIN_LIMITS * \ IWL_NUM_SUB_BANDS_V2) + 2) @@ -77,6 +77,7 @@ enum iwl_dsm_funcs_rev_0 { DSM_FUNC_QUERY = 0, DSM_FUNC_DISABLE_SRD = 1, DSM_FUNC_ENABLE_INDONESIA_5G2 = 2, + DSM_FUNC_11AX_ENABLEMENT = 6, }; enum iwl_dsm_values_srd { @@ -160,6 +161,8 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, __le32 *block_list_array, int *block_list_size); +__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt); + #else /* CONFIG_ACPI */ static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method) @@ -235,5 +238,11 @@ static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, { return -ENOENT; } + +static inline __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) +{ + return 0; +} + #endif /* CONFIG_ACPI */ #endif /* __iwl_fw_acpi__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h index ceeef8749765..0e38eb1cd75d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h @@ -147,6 +147,10 @@ enum iwl_tof_mcsi_enable { * @IWL_TOF_RESPONDER_CMD_VALID_RETRY_ON_ALGO_FAIL: retry on algorithm failure * is valid * @IWL_TOF_RESPONDER_CMD_VALID_STA_ID: station ID is valid + * @IWL_TOF_RESPONDER_CMD_VALID_NDP_SUPPORT: enable/disable NDP ranging support + * is valid + * @IWL_TOF_RESPONDER_CMD_VALID_NDP_PARAMS: NDP parameters are valid + * @IWL_TOF_RESPONDER_CMD_VALID_LMR_FEEDBACK: LMR feedback support is valid */ enum iwl_tof_responder_cmd_valid_field { IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO = BIT(0), @@ -162,6 +166,9 @@ enum iwl_tof_responder_cmd_valid_field { IWL_TOF_RESPONDER_CMD_VALID_FAST_ALGO_SUPPORT = BIT(10), IWL_TOF_RESPONDER_CMD_VALID_RETRY_ON_ALGO_FAIL = BIT(11), IWL_TOF_RESPONDER_CMD_VALID_STA_ID = BIT(12), + IWL_TOF_RESPONDER_CMD_VALID_NDP_SUPPORT = BIT(22), + IWL_TOF_RESPONDER_CMD_VALID_NDP_PARAMS = BIT(23), + IWL_TOF_RESPONDER_CMD_VALID_LMR_FEEDBACK = BIT(24), }; /** @@ -176,6 +183,9 @@ enum iwl_tof_responder_cmd_valid_field { * @IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT: fast algorithm support * @IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL: retry on algorithm fail * @IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT: TX antenna mask + * @IWL_TOF_RESPONDER_FLAGS_NDP_SUPPORT: support NDP ranging + * @IWL_TOF_RESPONDER_FLAGS_LMR_FEEDBACK: request for LMR feedback if the + * initiator supports it */ enum iwl_tof_responder_cfg_flags { IWL_TOF_RESPONDER_FLAGS_NON_ASAP_SUPPORT = BIT(0), @@ -188,6 +198,8 @@ enum iwl_tof_responder_cfg_flags { IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT = BIT(9), IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL = BIT(10), IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT = RATE_MCS_ANT_ABC_MSK, + IWL_TOF_RESPONDER_FLAGS_NDP_SUPPORT = BIT(24), + IWL_TOF_RESPONDER_FLAGS_LMR_FEEDBACK = BIT(25), }; /** @@ -226,7 +238,7 @@ struct iwl_tof_responder_config_cmd_v6 { } __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_6 */ /** - * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug) + * struct iwl_tof_responder_config_cmd_v7 - ToF AP mode (for debug) * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field * @responder_cfg_flags: &iwl_tof_responder_cfg_flags * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. @@ -245,7 +257,7 @@ struct iwl_tof_responder_config_cmd_v6 { * @bssid: Current AP BSSID * @reserved2: reserved */ -struct iwl_tof_responder_config_cmd { +struct iwl_tof_responder_config_cmd_v7 { __le32 cmd_valid_fields; __le32 responder_cfg_flags; u8 format_bw; @@ -259,7 +271,56 @@ struct iwl_tof_responder_config_cmd { __le16 specific_calib; u8 bssid[ETH_ALEN]; __le16 reserved2; -} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_6 */ +} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_7 */ + +#define IWL_RESPONDER_STS_POS 3 +#define IWL_RESPONDER_TOTAL_LTF_POS 6 + +/** + * struct iwl_tof_responder_config_cmd_v8 - ToF AP mode (for debug) + * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field + * @responder_cfg_flags: &iwl_tof_responder_cfg_flags + * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. + * bits 4 - 7: &enum iwl_location_bw. + * @rate: current AP rate + * @channel_num: current AP Channel + * @ctrl_ch_position: coding of the control channel position relative to + * the center frequency, see iwl_mvm_get_ctrl_pos() + * @sta_id: index of the AP STA when in AP mode + * @reserved1: reserved + * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug + * purposes, simulating station movement by adding various values + * to this field + * @common_calib: XVT: common calibration value + * @specific_calib: XVT: specific calibration value + * @bssid: Current AP BSSID + * @r2i_ndp_params: parameters for R2I NDP. + * bits 0 - 2: max number of LTF repetitions + * bits 3 - 5: max number of spatial streams (supported values are < 2) + * bits 6 - 7: max number of total LTFs + * (&enum ieee80211_range_params_max_total_ltf) + * @i2r_ndp_params: parameters for I2R NDP. + * bits 0 - 2: max number of LTF repetitions + * bits 3 - 5: max number of spatial streams + * bits 6 - 7: max number of total LTFs + * (&enum ieee80211_range_params_max_total_ltf) + */ +struct iwl_tof_responder_config_cmd_v8 { + __le32 cmd_valid_fields; + __le32 responder_cfg_flags; + u8 format_bw; + u8 rate; + u8 channel_num; + u8 ctrl_ch_position; + u8 sta_id; + u8 reserved1; + __le16 toa_offset; + __le16 common_calib; + __le16 specific_calib; + u8 bssid[ETH_ALEN]; + u8 r2i_ndp_params; + u8 i2r_ndp_params; +} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_8 */ #define IWL_LCI_CIVIC_IE_MAX_SIZE 400 @@ -422,10 +483,12 @@ struct iwl_tof_range_req_ap_entry_v2 { * driver. * @IWL_INITIATOR_AP_FLAGS_NON_TB: Use non trigger based flow * @IWL_INITIATOR_AP_FLAGS_TB: Use trigger based flow - * @IWL_INITIATOR_AP_FLAGS_SECURED: request secured measurement + * @IWL_INITIATOR_AP_FLAGS_SECURED: request secure LTF measurement * @IWL_INITIATOR_AP_FLAGS_LMR_FEEDBACK: Send LMR feedback * @IWL_INITIATOR_AP_FLAGS_USE_CALIB: Use calibration values from the request * instead of fw internal values. + * @IWL_INITIATOR_AP_FLAGS_PMF: request to protect the negotiation and LMR + * frames with protected management frames. */ enum iwl_initiator_ap_flags { IWL_INITIATOR_AP_FLAGS_ASAP = BIT(1), @@ -440,6 +503,7 @@ enum iwl_initiator_ap_flags { IWL_INITIATOR_AP_FLAGS_SECURED = BIT(11), IWL_INITIATOR_AP_FLAGS_LMR_FEEDBACK = BIT(12), IWL_INITIATOR_AP_FLAGS_USE_CALIB = BIT(13), + IWL_INITIATOR_AP_FLAGS_PMF = BIT(14), }; /** @@ -657,6 +721,79 @@ struct iwl_tof_range_req_ap_entry_v7 { u8 tx_pn[IEEE80211_CCMP_PN_LEN]; } __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_7 */ +#define IWL_LOCATION_MAX_STS_POS 3 + +/** + * struct iwl_tof_range_req_ap_entry_v8 - AP configuration parameters + * @initiator_ap_flags: see &enum iwl_initiator_ap_flags. + * @channel_num: AP Channel number + * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. + * bits 4 - 7: &enum iwl_location_bw. + * @ctrl_ch_position: Coding of the control channel position relative to the + * center frequency, see iwl_mvm_get_ctrl_pos(). + * @ftmr_max_retries: Max number of retries to send the FTMR in case of no + * reply from the AP. + * @bssid: AP's BSSID + * @burst_period: Recommended value to be sent to the AP. Measurement + * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0 + * @samples_per_burst: the number of FTMs pairs in single Burst (1-31); + * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of + * the number of measurement iterations (min 2^0 = 1, max 2^14) + * @sta_id: the station id of the AP. Only relevant when associated to the AP, + * otherwise should be set to &IWL_MVM_INVALID_STA. + * @cipher: pairwise cipher suite for secured measurement. + * &enum iwl_location_cipher. + * @hltk: HLTK to be used for secured 11az measurement + * @tk: TK to be used for secured 11az measurement + * @calib: An array of calibration values per FTM rx bandwidth. + * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the + * calibration value that corresponds to the rx bandwidth of the FTM + * frame. + * @beacon_interval: beacon interval of the AP in TUs. Only required if + * &IWL_INITIATOR_AP_FLAGS_TB is set. + * @rx_pn: the next expected PN for protected management frames Rx. LE byte + * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id + * is set to &IWL_MVM_INVALID_STA. + * @tx_pn: the next PN to use for protected management frames Tx. LE byte + * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id + * is set to &IWL_MVM_INVALID_STA. + * @r2i_ndp_params: parameters for R2I NDP ranging negotiation. + * bits 0 - 2: max LTF repetitions + * bits 3 - 5: max number of spatial streams + * bits 6 - 7: reserved + * @i2r_ndp_params: parameters for I2R NDP ranging negotiation. + * bits 0 - 2: max LTF repetitions + * bits 3 - 5: max number of spatial streams (supported values are < 2) + * bits 6 - 7: reserved + * @r2i_max_total_ltf: R2I Max Total LTFs for NDP ranging negotiation. + * One of &enum ieee80211_range_params_max_total_ltf. + * @i2r_max_total_ltf: I2R Max Total LTFs for NDP ranging negotiation. + * One of &enum ieee80211_range_params_max_total_ltf. + */ +struct iwl_tof_range_req_ap_entry_v8 { + __le32 initiator_ap_flags; + u8 channel_num; + u8 format_bw; + u8 ctrl_ch_position; + u8 ftmr_max_retries; + u8 bssid[ETH_ALEN]; + __le16 burst_period; + u8 samples_per_burst; + u8 num_of_bursts; + u8 sta_id; + u8 cipher; + u8 hltk[HLTK_11AZ_LEN]; + u8 tk[TK_11AZ_LEN]; + __le16 calib[IWL_TOF_BW_NUM]; + __le16 beacon_interval; + u8 rx_pn[IEEE80211_CCMP_PN_LEN]; + u8 tx_pn[IEEE80211_CCMP_PN_LEN]; + u8 r2i_ndp_params; + u8 i2r_ndp_params; + u8 r2i_max_total_ltf; + u8 i2r_max_total_ltf; +} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_8 */ + /** * enum iwl_tof_response_mode * @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as @@ -878,6 +1015,34 @@ struct iwl_tof_range_req_cmd_v11 { struct iwl_tof_range_req_ap_entry_v7 ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_11 */ +/** + * struct iwl_tof_range_req_cmd_v12 - start measurement cmd + * @initiator_flags: see flags @ iwl_tof_initiator_flags + * @request_id: A Token incremented per request. The same Token will be + * sent back in the range response + * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @range_req_bssid: ranging request BSSID + * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. + * Bits set to 1 shall be randomized by the UMAC + * @macaddr_template: MAC address template to use for non-randomized bits + * @req_timeout_ms: Requested timeout of the response in units of milliseconds. + * This is the session time for completing the measurement. + * @tsf_mac_id: report the measurement start time for each ap in terms of the + * TSF of this mac id. 0xff to disable TSF reporting. + * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2. + */ +struct iwl_tof_range_req_cmd_v12 { + __le32 initiator_flags; + u8 request_id; + u8 num_of_ap; + u8 range_req_bssid[ETH_ALEN]; + u8 macaddr_mask[ETH_ALEN]; + u8 macaddr_template[ETH_ALEN]; + __le32 req_timeout_ms; + __le32 tsf_mac_id; + struct iwl_tof_range_req_ap_entry_v8 ap[IWL_MVM_TOF_MAX_APS]; +} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_12 */ + /* * enum iwl_tof_range_request_status - status of the sent request * @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index fbca9dd872e7..dc8f2777e944 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -414,6 +414,9 @@ enum iwl_lari_config_masks { LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK = BIT(3), }; +#define IWL_11AX_UKRAINE_MASK 3 +#define IWL_11AX_UKRAINE_SHIFT 8 + /** * struct iwl_lari_config_change_cmd_v1 - change LARI configuration * @config_bitmap: bit map of the config commands. each bit will trigger a @@ -435,6 +438,21 @@ struct iwl_lari_config_change_cmd_v2 { } __packed; /* LARI_CHANGE_CONF_CMD_S_VER_2 */ /** + * struct iwl_lari_config_change_cmd_v3 - change LARI configuration + * @config_bitmap: bit map of the config commands. each bit will trigger a + * different predefined FW config operation + * @oem_uhb_allow_bitmap: bitmap of UHB enabled MCC sets + * @oem_11ax_allow_bitmap: bitmap of 11ax allowed MCCs. + * For each supported country, a pair of regulatory override bit and 11ax mode exist + * in the bit field. + */ +struct iwl_lari_config_change_cmd_v3 { + __le32 config_bitmap; + __le32 oem_uhb_allow_bitmap; + __le32 oem_11ax_allow_bitmap; +} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_3 */ + +/** * struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete * @status: PNVM image loading status */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h index 798417182d54..86445385f072 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h @@ -274,7 +274,7 @@ enum iwl_dev_tx_power_cmd_mode { #define IWL_NUM_CHAIN_TABLES 1 #define IWL_NUM_CHAIN_TABLES_V2 2 #define IWL_NUM_CHAIN_LIMITS 2 -#define IWL_NUM_SUB_BANDS 5 +#define IWL_NUM_SUB_BANDS_V1 5 #define IWL_NUM_SUB_BANDS_V2 11 /** @@ -300,7 +300,7 @@ struct iwl_dev_tx_power_common { * @per_chain: per chain restrictions */ struct iwl_dev_tx_power_cmd_v3 { - __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; + __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1]; } __packed; /* TX_REDUCED_POWER_API_S_VER_3 */ #define IWL_DEV_MAX_TX_POWER 0x7FFF @@ -313,7 +313,7 @@ struct iwl_dev_tx_power_cmd_v3 { * @reserved: reserved (padding) */ struct iwl_dev_tx_power_cmd_v4 { - __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; + __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1]; u8 enable_ack_reduction; u8 reserved[3]; } __packed; /* TX_REDUCED_POWER_API_S_VER_4 */ @@ -332,7 +332,7 @@ struct iwl_dev_tx_power_cmd_v4 { * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER */ struct iwl_dev_tx_power_cmd_v5 { - __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; + __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1]; u8 enable_ack_reduction; u8 per_chain_restriction_changed; u8 reserved[2]; @@ -454,21 +454,23 @@ struct iwl_geo_tx_power_profiles_resp { /** * union iwl_ppag_table_cmd - union for all versions of PPAG command - * @v1: version 1, table revision = 0 - * @v2: version 2, table revision = 1 + * @v1: version 1 + * @v2: version 2 * - * @enabled: 1 if PPAG is enabled, 0 otherwise + * @flags: bit 0 - indicates enablement of PPAG for ETSI + * bit 1 - indicates enablement of PPAG for CHINA BIOS + * bit 1 can be used only in v3 (identical to v2) * @gain: table of antenna gain values per chain and sub-band * @reserved: reserved */ union iwl_ppag_table_cmd { struct { - __le32 enabled; - s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; + __le32 flags; + s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1]; s8 reserved[2]; } v1; struct { - __le32 enabled; + __le32 flags; s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2]; s8 reserved[2]; } v2; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index 2c74db823778..3f13b572915a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -780,36 +780,6 @@ struct iwl_rxq_sync_notification { } __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */ /** - * enum iwl_mvm_rxq_notif_type - Internal message identifier - * - * @IWL_MVM_RXQ_EMPTY: empty sync notification - * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA - * @IWL_MVM_RXQ_NSSN_SYNC: notify all the RSS queues with the new NSSN - */ -enum iwl_mvm_rxq_notif_type { - IWL_MVM_RXQ_EMPTY, - IWL_MVM_RXQ_NOTIF_DEL_BA, - IWL_MVM_RXQ_NSSN_SYNC, -}; - -/** - * struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent - * in &iwl_rxq_sync_cmd. Should be DWORD aligned. - * FW is agnostic to the payload, so there are no endianity requirements. - * - * @type: value from &iwl_mvm_rxq_notif_type - * @sync: ctrl path is waiting for all notifications to be received - * @cookie: internal cookie to identify old notifications - * @data: payload - */ -struct iwl_mvm_internal_rxq_notif { - u16 type; - u16 sync; - u32 cookie; - u8 data[]; -} __packed; - -/** * enum iwl_mvm_pm_event - type of station PM event * @IWL_MVM_PM_EVENT_AWAKE: station woke up * @IWL_MVM_PM_EVENT_ASLEEP: station went to sleep diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index 6b8ca35cec1a..b2605aefc290 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -634,6 +634,12 @@ enum iwl_umac_scan_general_flags2 { * @IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN: at the end of 2.4GHz and * 5.2Ghz bands scan, trigger scan on 6GHz band to discover * the reported collocated APs + * @IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN: at the end of 2.4GHz and 5GHz + * bands scan, if not APs were discovered, allow scan to conitnue and scan + * 6GHz PSC channels in order to discover country information. + * @IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN_FILTER_IN: in case + * &IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN is enabled and scan is + * activated over 6GHz PSC channels, filter in beacons and probe responses. */ enum iwl_umac_scan_general_flags_v2 { IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC = BIT(0), @@ -649,6 +655,8 @@ enum iwl_umac_scan_general_flags_v2 { IWL_UMAC_SCAN_GEN_FLAGS_V2_MULTI_SSID = BIT(10), IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE = BIT(11), IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN = BIT(12), + IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN = BIT(13), + IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN_FILTER_IN = BIT(14), }; /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 504729663c35..cc4e18ca9566 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -2559,7 +2559,9 @@ int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, fwrt->dump.wks[idx].dump_data = *dump_data; - IWL_WARN(fwrt, "WRT: Collecting data: ini trigger %d fired.\n", tp_id); + IWL_WARN(fwrt, + "WRT: Collecting data: ini trigger %d fired (delay=%dms).\n", + tp_id, (u32)(delay / USEC_PER_MSEC)); schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay)); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 35dffcaf5aba..f9c5cf538ad1 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -362,6 +362,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_PROTECTED_TWT: Supports protection of TWT action frames * @IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE: Supports the firmware handshake in * reset flow + * @IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN: Support for passive scan on 6GHz PSC + * channels even when these are not enabled. * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used */ @@ -408,6 +410,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD = (__force iwl_ucode_tlv_capa_t)54, IWL_UCODE_TLV_CAPA_PROTECTED_TWT = (__force iwl_ucode_tlv_capa_t)56, IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE = (__force iwl_ucode_tlv_capa_t)57, + IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN = (__force iwl_ucode_tlv_capa_t)58, /* set 2 */ IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h index 1dee4714e505..153a3529e77a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ @@ -116,6 +116,9 @@ struct fw_img { #define PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS 0 #define PAGING_TLV_SECURE_MASK 1 +/* FW MSB Mask for regions/cache_control */ +#define FW_ADDR_CACHE_CONTROL 0xC0000000UL + /** * struct iwl_fw_paging * @fw_paging_phys: page phy pointer diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c index 986913f2fbd5..2ecec00db9da 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/init.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c @@ -10,6 +10,8 @@ #include "fw/api/soc.h" #include "fw/api/commands.h" +#include "fw/api/rx.h" +#include "fw/api/datapath.h" void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, const struct iwl_fw *fw, @@ -95,3 +97,60 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt) return ret; } IWL_EXPORT_SYMBOL(iwl_set_soc_latency); + +int iwl_configure_rxq(struct iwl_fw_runtime *fwrt) +{ + int i, num_queues, size, ret; + struct iwl_rfh_queue_config *cmd; + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(DATA_PATH_GROUP, RFH_QUEUE_CONFIG_CMD), + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; + + /* + * The default queue is configured via context info, so if we + * have a single queue, there's nothing to do here. + */ + if (fwrt->trans->num_rx_queues == 1) + return 0; + + if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22000) + return 0; + + /* skip the default queue */ + num_queues = fwrt->trans->num_rx_queues - 1; + + size = struct_size(cmd, data, num_queues); + + cmd = kzalloc(size, GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->num_queues = num_queues; + + for (i = 0; i < num_queues; i++) { + struct iwl_trans_rxq_dma_data data; + + cmd->data[i].q_num = i + 1; + iwl_trans_get_rxq_dma_data(fwrt->trans, i + 1, &data); + + cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb); + cmd->data[i].urbd_stts_wrptr = + cpu_to_le64(data.urbd_stts_wrptr); + cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb); + cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid); + } + + hcmd.data[0] = cmd; + hcmd.len[0] = size; + + ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); + + kfree(cmd); + + if (ret) + IWL_ERR(fwrt, "Failed to configure RX queues: %d\n", ret); + + return ret; +} +IWL_EXPORT_SYMBOL(iwl_configure_rxq); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 0dba5444f2db..35af85a5430b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -190,5 +190,6 @@ void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt); void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt); int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt); +int iwl_configure_rxq(struct iwl_fw_runtime *fwrt); #endif /* __iwl_fw_runtime_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index c4f5da76f1c0..b35ffdfdf14b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -416,6 +416,7 @@ struct iwl_cfg { #define IWL_CFG_MAC_TYPE_SNJ 0x42 #define IWL_CFG_MAC_TYPE_SOF 0x43 #define IWL_CFG_MAC_TYPE_MA 0x44 +#define IWL_CFG_MAC_TYPE_BZ 0x46 #define IWL_CFG_RF_TYPE_TH 0x105 #define IWL_CFG_RF_TYPE_TH1 0x108 @@ -477,6 +478,7 @@ extern const struct iwl_cfg_trans_params iwl_snj_trans_cfg; extern const struct iwl_cfg_trans_params iwl_so_trans_cfg; extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg; extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg; +extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg; extern const char iwl9162_name[]; extern const char iwl9260_name[]; extern const char iwl9260_1_name[]; @@ -501,8 +503,10 @@ extern const char iwl_ax200_killer_1650w_name[]; extern const char iwl_ax200_killer_1650x_name[]; extern const char iwl_ax201_killer_1650s_name[]; extern const char iwl_ax201_killer_1650i_name[]; -extern const char iwl_ma_name[]; +extern const char iwl_ax210_killer_1675w_name[]; +extern const char iwl_ax210_killer_1675x_name[]; extern const char iwl_ax211_name[]; +extern const char iwl_ax221_name[]; extern const char iwl_ax411_name[]; #if IS_ENABLED(CONFIG_IWLDVM) extern const struct iwl_cfg iwl5300_agn_cfg; @@ -594,7 +598,7 @@ extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0; extern const struct iwl_cfg killer1650x_2ax_cfg; extern const struct iwl_cfg killer1650w_2ax_cfg; extern const struct iwl_cfg iwl_qnj_b0_hr_b0_cfg; -extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0; +extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0; extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0; extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0; extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long; @@ -612,6 +616,10 @@ extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0; extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0; extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0; extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0; +extern const struct iwl_cfg iwl_cfg_bz_a0_hr_b0; +extern const struct iwl_cfg iwl_cfg_bz_a0_gf_a0; +extern const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0; +extern const struct iwl_cfg iwl_cfg_bz_a0_mr_a0; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 6ccde7e30211..db312abd2e09 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -578,6 +578,9 @@ enum msix_fh_int_causes { MSIX_FH_INT_CAUSES_FH_ERR = BIT(21), }; +/* The low 16 bits are for rx data queue indication */ +#define MSIX_FH_INT_CAUSES_DATA_QUEUE 0xffff + /* * Causes for the HW register interrupts */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index 579bc81cc0ae..4cd8c39cc3e9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #include <linux/firmware.h> #include "iwl-drv.h" @@ -426,7 +426,8 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans) const struct firmware *fw; int res; - if (!iwlwifi_mod_params.enable_ini) + if (!iwlwifi_mod_params.enable_ini || + trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_9000) return; res = firmware_request_nowarn(&fw, "iwl-debug-yoyo.bin", dev); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index eb168dc535d4..884750bf7840 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -550,8 +550,6 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv, return 0; } -#define FW_ADDR_CACHE_CONTROL 0xC0000000 - static int iwl_parse_tlv_firmware(struct iwl_drv *drv, const struct firmware *ucode_raw, struct iwl_firmware_pieces *pieces, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index c5a1e84dc1ab..fc75d049046d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -550,9 +550,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | - IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2, .mac_cap_info[4] = - IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU | + IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU | IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39, .mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 | @@ -583,11 +583,11 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 | IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2, .phy_cap_info[6] = - IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB | - IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB | + IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB | + IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB | IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, .phy_cap_info[7] = - IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR | + IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP | IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI | IEEE80211_HE_PHY_CAP7_MAX_NC_1, .phy_cap_info[8] = @@ -636,9 +636,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { IEEE80211_HE_MAC_CAP2_BSR, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | - IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2, .mac_cap_info[4] = - IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, + IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU, .mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU, .phy_cap_info[0] = diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h index e6d2e0994317..cf9c64090014 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 Intel Deutschland GmbH */ @@ -176,6 +176,8 @@ iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, bool state) static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) { + if (WARN_ON_ONCE(!op_mode)) + return; op_mode->ops->free_skb(op_mode, skb); } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c index 60e0db4a5e20..9236f9106826 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c @@ -2,7 +2,7 @@ /* * Copyright (C) 2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2019-2020 Intel Corporation + * Copyright (C) 2019-2021 Intel Corporation */ #include <linux/kernel.h> #include <linux/bsearch.h> @@ -21,7 +21,6 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, const struct iwl_cfg_trans_params *cfg_trans) { struct iwl_trans *trans; - int txcmd_size, txcmd_align; #ifdef CONFIG_LOCKDEP static struct lock_class_key __key; #endif @@ -31,10 +30,40 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, return NULL; trans->trans_cfg = cfg_trans; - if (!cfg_trans->gen2) { + +#ifdef CONFIG_LOCKDEP + lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map", + &__key, 0); +#endif + + trans->dev = dev; + trans->ops = ops; + trans->num_rx_queues = 1; + + WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty); + + if (trans->trans_cfg->use_tfh) { + trans->txqs.tfd.addr_size = 64; + trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS; + trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd); + } else { + trans->txqs.tfd.addr_size = 36; + trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS; + trans->txqs.tfd.size = sizeof(struct iwl_tfd); + } + trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans); + + return trans; +} + +int iwl_trans_init(struct iwl_trans *trans) +{ + int txcmd_size, txcmd_align; + + if (!trans->trans_cfg->gen2) { txcmd_size = sizeof(struct iwl_tx_cmd); txcmd_align = sizeof(void *); - } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) { + } else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) { txcmd_size = sizeof(struct iwl_tx_cmd_gen2); txcmd_align = 64; } else { @@ -46,17 +75,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, txcmd_size += 36; /* biggest possible 802.11 header */ /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */ - if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align)) - return ERR_PTR(-EINVAL); - -#ifdef CONFIG_LOCKDEP - lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map", - &__key, 0); -#endif - - trans->dev = dev; - trans->ops = ops; - trans->num_rx_queues = 1; + if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align)) + return -EINVAL; if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) trans->txqs.bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl); @@ -68,23 +88,16 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, * allocate here. */ if (trans->trans_cfg->gen2) { - trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", dev, + trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", trans->dev, trans->txqs.bc_tbl_size, 256, 0); if (!trans->txqs.bc_pool) - return NULL; + return -ENOMEM; } - if (trans->trans_cfg->use_tfh) { - trans->txqs.tfd.addr_size = 64; - trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS; - trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd); - } else { - trans->txqs.tfd.addr_size = 36; - trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS; - trans->txqs.tfd.size = sizeof(struct iwl_tfd); - } - trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans); + /* Some things must not change even if the config does */ + WARN_ON(trans->txqs.tfd.addr_size != + (trans->trans_cfg->use_tfh ? 64 : 36)); snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), "iwl_cmd_pool:%s", dev_name(trans->dev)); @@ -93,35 +106,35 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, txcmd_size, txcmd_align, SLAB_HWCACHE_ALIGN, NULL); if (!trans->dev_cmd_pool) - return NULL; - - WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty); + return -ENOMEM; trans->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); if (!trans->txqs.tso_hdr_page) { kmem_cache_destroy(trans->dev_cmd_pool); - return NULL; + return -ENOMEM; } /* Initialize the wait queue for commands */ init_waitqueue_head(&trans->wait_command_queue); - return trans; + return 0; } void iwl_trans_free(struct iwl_trans *trans) { int i; - for_each_possible_cpu(i) { - struct iwl_tso_hdr_page *p = - per_cpu_ptr(trans->txqs.tso_hdr_page, i); + if (trans->txqs.tso_hdr_page) { + for_each_possible_cpu(i) { + struct iwl_tso_hdr_page *p = + per_cpu_ptr(trans->txqs.tso_hdr_page, i); - if (p->page) - __free_page(p->page); - } + if (p && p->page) + __free_page(p->page); + } - free_percpu(trans->txqs.tso_hdr_page); + free_percpu(trans->txqs.tso_hdr_page); + } kmem_cache_destroy(trans->dev_cmd_pool); } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 4a5822c1be13..bf569f856ad8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -1267,7 +1267,8 @@ static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty)) return -ENOTSUPP; - if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { + /* No need to wait if the firmware is not alive */ + if (trans->state != IWL_TRANS_FW_ALIVE) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return -EIO; } @@ -1438,6 +1439,7 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, const struct iwl_trans_ops *ops, const struct iwl_cfg_trans_params *cfg_trans); +int iwl_trans_init(struct iwl_trans *trans); void iwl_trans_free(struct iwl_trans *trans); /***************************************************** diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 617b41ee5801..1343f25f1090 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -93,6 +93,15 @@ #define IWL_MVM_ENABLE_EBS 1 #define IWL_MVM_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE #define IWL_MVM_FTM_INITIATOR_DYNACK true +#define IWL_MVM_FTM_R2I_MAX_REP 7 +#define IWL_MVM_FTM_I2R_MAX_REP 7 +#define IWL_MVM_FTM_R2I_MAX_STS 1 +#define IWL_MVM_FTM_I2R_MAX_STS 1 +#define IWL_MVM_FTM_R2I_MAX_TOTAL_LTF 3 +#define IWL_MVM_FTM_I2R_MAX_TOTAL_LTF 3 +#define IWL_MVM_FTM_INITIATOR_SECURE_LTF false +#define IWL_MVM_FTM_RESP_NDP_SUPPORT true +#define IWL_MVM_FTM_RESP_LMR_FEEDBACK_SUPPORT true #define IWL_MVM_D3_DEBUG false #define IWL_MVM_USE_TWT true #define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10 @@ -108,5 +117,7 @@ #define IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT 20016 #define IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC 2 #define IWL_MVM_DISABLE_AP_FILS false +#define IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT 3000 /* in seconds */ +#define IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT 60 /* in seconds */ #endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index a7dc85c704a9..2e28cf299ef4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -2028,6 +2028,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) mutex_lock(&mvm->mutex); + mvm->last_reset_or_resume_time_jiffies = jiffies; + /* get the BSS vif pointer again */ vif = iwl_mvm_get_bss_vif(mvm); if (IS_ERR_OR_NULL(vif)) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 34ddef97b099..63d65018d098 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -1210,10 +1210,10 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len) IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE)) return -EINVAL; - rcu_read_lock(); + mutex_lock(&mvm->mutex); for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) { - vif = iwl_mvm_rcu_dereference_vif_id(mvm, i, true); + vif = iwl_mvm_rcu_dereference_vif_id(mvm, i, false); if (!vif) continue; @@ -1253,18 +1253,16 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len) &beacon_cmd.tim_size, beacon->data, beacon->len); - mutex_lock(&mvm->mutex); iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd)); mutex_unlock(&mvm->mutex); dev_kfree_skb(beacon); - rcu_read_unlock(); return 0; out_err: - rcu_read_unlock(); + mutex_unlock(&mvm->mutex); return -EINVAL; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index a4fd0bf9ba19..a456b8a0ae58 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -490,6 +490,15 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (vif->bss_conf.assoc && !memcmp(peer->addr, vif->bss_conf.bssid, ETH_ALEN)) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_sta *sta; + + rcu_read_lock(); + + sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]); + if (sta->mfp) + FTM_PUT_FLAG(PMF); + + rcu_read_unlock(); target->sta_id = mvmvif->ap_sta_id; } else { @@ -684,6 +693,19 @@ iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif, } } +static int +iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request_peer *peer, + struct iwl_tof_range_req_ap_entry_v7 *target) +{ + int err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target); + if (err) + return err; + + iwl_mvm_ftm_set_secured_ranging(mvm, vif, target); + return err; +} + static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) @@ -704,11 +726,67 @@ static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm, struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; struct iwl_tof_range_req_ap_entry_v7 *target = &cmd.ap[i]; - err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target); + err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, target); + if (err) + return err; + } + + return iwl_mvm_ftm_send_cmd(mvm, &hcmd); +} + +static void +iwl_mvm_ftm_set_ndp_params(struct iwl_mvm *mvm, + struct iwl_tof_range_req_ap_entry_v8 *target) +{ + /* Only 2 STS are supported on Tx */ + u32 i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 : + IWL_MVM_FTM_I2R_MAX_STS; + + target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP | + (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS); + target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP | + (i2r_max_sts << IWL_LOCATION_MAX_STS_POS); + target->r2i_max_total_ltf = IWL_MVM_FTM_R2I_MAX_TOTAL_LTF; + target->i2r_max_total_ltf = IWL_MVM_FTM_I2R_MAX_TOTAL_LTF; +} + +static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *req) +{ + struct iwl_tof_range_req_cmd_v12 cmd; + struct iwl_host_cmd hcmd = { + .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), + .dataflags[0] = IWL_HCMD_DFL_DUP, + .data[0] = &cmd, + .len[0] = sizeof(cmd), + }; + u8 i; + int err; + + iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); + + for (i = 0; i < cmd.num_of_ap; i++) { + struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; + struct iwl_tof_range_req_ap_entry_v8 *target = &cmd.ap[i]; + u32 flags; + + err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target); if (err) return err; - iwl_mvm_ftm_set_secured_ranging(mvm, vif, target); + iwl_mvm_ftm_set_ndp_params(mvm, target); + + /* + * If secure LTF is turned off, replace the flag with PMF only + */ + flags = le32_to_cpu(target->initiator_ap_flags); + if ((flags & IWL_INITIATOR_AP_FLAGS_SECURED) && + !IWL_MVM_FTM_INITIATOR_SECURE_LTF) { + flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED; + flags |= IWL_INITIATOR_AP_FLAGS_PMF; + target->initiator_ap_flags = cpu_to_le32(flags); + } } return iwl_mvm_ftm_send_cmd(mvm, &hcmd); @@ -732,6 +810,9 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_FW_CMD_VER_UNKNOWN); switch (cmd_ver) { + case 12: + err = iwl_mvm_ftm_start_v12(mvm, vif, req); + break; case 11: err = iwl_mvm_ftm_start_v11(mvm, vif, req); break; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c index 996f45c19f10..5a249ea97eb2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c @@ -75,6 +75,24 @@ static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef, return 0; } +static void +iwl_mvm_ftm_responder_set_ndp(struct iwl_mvm *mvm, + struct iwl_tof_responder_config_cmd_v8 *cmd) +{ + /* Up to 2 R2I STS are allowed on the responder */ + u32 r2i_max_sts = IWL_MVM_FTM_R2I_MAX_STS < 2 ? + IWL_MVM_FTM_R2I_MAX_STS : 1; + + cmd->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP | + (r2i_max_sts << IWL_RESPONDER_STS_POS) | + (IWL_MVM_FTM_R2I_MAX_TOTAL_LTF << IWL_RESPONDER_TOTAL_LTF_POS); + cmd->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP | + (IWL_MVM_FTM_I2R_MAX_STS << IWL_RESPONDER_STS_POS) | + (IWL_MVM_FTM_I2R_MAX_TOTAL_LTF << IWL_RESPONDER_TOTAL_LTF_POS); + cmd->cmd_valid_fields |= + cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_NDP_PARAMS); +} + static int iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -82,11 +100,11 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); /* - * The command structure is the same for versions 6 and 7, (only the + * The command structure is the same for versions 6, 7 and 8 (only the * field interpretation is different), so the same struct can be use * for all cases. */ - struct iwl_tof_responder_config_cmd cmd = { + struct iwl_tof_responder_config_cmd_v8 cmd = { .channel_num = chandef->chan->hw_value, .cmd_valid_fields = cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO | @@ -100,7 +118,10 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); - if (cmd_ver == 7) +if (cmd_ver == 8) + iwl_mvm_ftm_responder_set_ndp(mvm, &cmd); + + if (cmd_ver >= 7) err = iwl_mvm_ftm_responder_set_bw_v2(chandef, &cmd.format_bw, &cmd.ctrl_ch_position); else diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 5ee64f7f3c85..8aa5f1a2c58c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -29,6 +29,9 @@ #define UCODE_VALID_OK cpu_to_le32(0x1) +#define IWL_PPAG_MASK 3 +#define IWL_PPAG_ETSI_MASK BIT(0) + struct iwl_mvm_alive_data { bool valid; u32 scd_base_addr; @@ -70,56 +73,6 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); } -static int iwl_configure_rxq(struct iwl_mvm *mvm) -{ - int i, num_queues, size, ret; - struct iwl_rfh_queue_config *cmd; - struct iwl_host_cmd hcmd = { - .id = WIDE_ID(DATA_PATH_GROUP, RFH_QUEUE_CONFIG_CMD), - .dataflags[0] = IWL_HCMD_DFL_NOCOPY, - }; - - /* - * The default queue is configured via context info, so if we - * have a single queue, there's nothing to do here. - */ - if (mvm->trans->num_rx_queues == 1) - return 0; - - /* skip the default queue */ - num_queues = mvm->trans->num_rx_queues - 1; - - size = struct_size(cmd, data, num_queues); - - cmd = kzalloc(size, GFP_KERNEL); - if (!cmd) - return -ENOMEM; - - cmd->num_queues = num_queues; - - for (i = 0; i < num_queues; i++) { - struct iwl_trans_rxq_dma_data data; - - cmd->data[i].q_num = i + 1; - iwl_trans_get_rxq_dma_data(mvm->trans, i + 1, &data); - - cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb); - cmd->data[i].urbd_stts_wrptr = - cpu_to_le64(data.urbd_stts_wrptr); - cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb); - cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid); - } - - hcmd.data[0] = cmd; - hcmd.len[0] = size; - - ret = iwl_mvm_send_cmd(mvm, &hcmd); - - kfree(cmd); - - return ret; -} - static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm) { struct iwl_dqa_enable_cmd dqa_cmd = { @@ -233,7 +186,8 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, mvm->trans->dbg.lmac_error_event_table[1] = le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr); - umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr); + umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) & + ~FW_ADDR_CACHE_CONTROL; if (umac_error_table) { if (umac_error_table >= @@ -773,16 +727,16 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) } else if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_REDUCE_TX_POWER)) { len = sizeof(cmd.v5); - n_subbands = IWL_NUM_SUB_BANDS; + n_subbands = IWL_NUM_SUB_BANDS_V1; per_chain = cmd.v5.per_chain[0][0]; } else if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) { len = sizeof(cmd.v4); - n_subbands = IWL_NUM_SUB_BANDS; + n_subbands = IWL_NUM_SUB_BANDS_V1; per_chain = cmd.v4.per_chain[0][0]; } else { len = sizeof(cmd.v3); - n_subbands = IWL_NUM_SUB_BANDS; + n_subbands = IWL_NUM_SUB_BANDS_V1; per_chain = cmd.v3.per_chain[0][0]; } @@ -909,46 +863,50 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm) { - union acpi_object *wifi_pkg, *data, *enabled; + union acpi_object *wifi_pkg, *data, *flags; int i, j, ret, tbl_rev, num_sub_bands; int idx = 2; s8 *gain; /* - * The 'enabled' field is the same in v1 and v2 so we can just + * The 'flags' field is the same in v1 and in v2 so we can just * use v1 to access it. */ - mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(0); + mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(0); + data = iwl_acpi_get_object(mvm->dev, ACPI_PPAG_METHOD); if (IS_ERR(data)) return PTR_ERR(data); - /* try to read ppag table revision 1 */ + /* try to read ppag table rev 2 or 1 (both have the same data size) */ wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data, ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev); if (!IS_ERR(wifi_pkg)) { - if (tbl_rev != 1) { + if (tbl_rev == 1 || tbl_rev == 2) { + num_sub_bands = IWL_NUM_SUB_BANDS_V2; + gain = mvm->fwrt.ppag_table.v2.gain[0]; + mvm->fwrt.ppag_ver = tbl_rev; + IWL_DEBUG_RADIO(mvm, + "Reading PPAG table v2 (tbl_rev=%d)\n", + tbl_rev); + goto read_table; + } else { ret = -EINVAL; goto out_free; } - num_sub_bands = IWL_NUM_SUB_BANDS_V2; - gain = mvm->fwrt.ppag_table.v2.gain[0]; - mvm->fwrt.ppag_ver = 2; - IWL_DEBUG_RADIO(mvm, "Reading PPAG table v2 (tbl_rev=1)\n"); - goto read_table; } /* try to read ppag table revision 0 */ wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data, - ACPI_PPAG_WIFI_DATA_SIZE, &tbl_rev); + ACPI_PPAG_WIFI_DATA_SIZE_V1, &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 0) { ret = -EINVAL; goto out_free; } - num_sub_bands = IWL_NUM_SUB_BANDS; + num_sub_bands = IWL_NUM_SUB_BANDS_V1; gain = mvm->fwrt.ppag_table.v1.gain[0]; - mvm->fwrt.ppag_ver = 1; + mvm->fwrt.ppag_ver = 0; IWL_DEBUG_RADIO(mvm, "Reading PPAG table v1 (tbl_rev=0)\n"); goto read_table; } @@ -956,15 +914,17 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm) goto out_free; read_table: - enabled = &wifi_pkg->package.elements[1]; - if (enabled->type != ACPI_TYPE_INTEGER || - (enabled->integer.value != 0 && enabled->integer.value != 1)) { + flags = &wifi_pkg->package.elements[1]; + + if (flags->type != ACPI_TYPE_INTEGER) { ret = -EINVAL; goto out_free; } - mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(enabled->integer.value); - if (!mvm->fwrt.ppag_table.v1.enabled) { + mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(flags->integer.value & + IWL_PPAG_MASK); + + if (!mvm->fwrt.ppag_table.v1.flags) { ret = 0; goto out_free; } @@ -992,12 +952,13 @@ read_table: (j != 0 && (gain[i * num_sub_bands + j] > ACPI_PPAG_MAX_HB || gain[i * num_sub_bands + j] < ACPI_PPAG_MIN_HB))) { - mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(0); + mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(0); ret = -EINVAL; goto out_free; } } } + ret = 0; out_free: kfree(data); @@ -1015,7 +976,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) "PPAG capability not supported by FW, command not sent.\n"); return 0; } - if (!mvm->fwrt.ppag_table.v1.enabled) { + if (!mvm->fwrt.ppag_table.v1.flags) { IWL_DEBUG_RADIO(mvm, "PPAG not enabled, command not sent.\n"); return 0; } @@ -1024,20 +985,28 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) PER_PLATFORM_ANT_GAIN_CMD, IWL_FW_CMD_VER_UNKNOWN); if (cmd_ver == 1) { - num_sub_bands = IWL_NUM_SUB_BANDS; + num_sub_bands = IWL_NUM_SUB_BANDS_V1; gain = mvm->fwrt.ppag_table.v1.gain[0]; cmd_size = sizeof(mvm->fwrt.ppag_table.v1); - if (mvm->fwrt.ppag_ver == 2) { + if (mvm->fwrt.ppag_ver == 1 || mvm->fwrt.ppag_ver == 2) { IWL_DEBUG_RADIO(mvm, - "PPAG table is v2 but FW supports v1, sending truncated table\n"); + "PPAG table rev is %d but FW supports v1, sending truncated table\n", + mvm->fwrt.ppag_ver); + mvm->fwrt.ppag_table.v1.flags &= + cpu_to_le32(IWL_PPAG_ETSI_MASK); } - } else if (cmd_ver == 2) { + } else if (cmd_ver == 2 || cmd_ver == 3) { num_sub_bands = IWL_NUM_SUB_BANDS_V2; gain = mvm->fwrt.ppag_table.v2.gain[0]; cmd_size = sizeof(mvm->fwrt.ppag_table.v2); - if (mvm->fwrt.ppag_ver == 1) { + if (mvm->fwrt.ppag_ver == 0) { IWL_DEBUG_RADIO(mvm, "PPAG table is v1 but FW supports v2, sending padded table\n"); + } else if (cmd_ver == 2 && mvm->fwrt.ppag_ver == 2) { + IWL_DEBUG_RADIO(mvm, + "PPAG table is v3 but FW supports v2, sending partial bitmap.\n"); + mvm->fwrt.ppag_table.v1.flags &= + cpu_to_le32(IWL_PPAG_ETSI_MASK); } } else { IWL_DEBUG_RADIO(mvm, "Unsupported PPAG command version\n"); @@ -1102,7 +1071,7 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm) IWL_DEBUG_RADIO(mvm, "System vendor '%s' is not in the approved list, disabling PPAG.\n", dmi_get_system_info(DMI_SYS_VENDOR)); - mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(0); + mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(0); return 0; } @@ -1144,33 +1113,6 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm) IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret); } -static u8 iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm) -{ - u8 value; - - int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0, - DSM_FUNC_ENABLE_INDONESIA_5G2, - &iwl_guid, &value); - - if (ret < 0) - IWL_DEBUG_RADIO(mvm, - "Failed to evaluate DSM function ENABLE_INDONESIA_5G2, ret=%d\n", - ret); - - else if (value >= DSM_VALUE_INDONESIA_MAX) - IWL_DEBUG_RADIO(mvm, - "DSM function ENABLE_INDONESIA_5G2 return invalid value, value=%d\n", - value); - - else if (value == DSM_VALUE_INDONESIA_ENABLE) { - IWL_DEBUG_RADIO(mvm, - "Evaluated DSM function ENABLE_INDONESIA_5G2: Enabling 5g2\n"); - return DSM_VALUE_INDONESIA_ENABLE; - } - /* default behaviour is disabled */ - return DSM_VALUE_INDONESIA_DISABLE; -} - static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm) { u8 value; @@ -1195,64 +1137,27 @@ static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm) return DSM_VALUE_RFI_DISABLE; } -static u8 iwl_mvm_eval_dsm_disable_srd(struct iwl_mvm *mvm) -{ - u8 value; - int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0, - DSM_FUNC_DISABLE_SRD, - &iwl_guid, &value); - - if (ret < 0) - IWL_DEBUG_RADIO(mvm, - "Failed to evaluate DSM function DISABLE_SRD, ret=%d\n", - ret); - - else if (value >= DSM_VALUE_SRD_MAX) - IWL_DEBUG_RADIO(mvm, - "DSM function DISABLE_SRD return invalid value, value=%d\n", - value); - - else if (value == DSM_VALUE_SRD_PASSIVE) { - IWL_DEBUG_RADIO(mvm, - "Evaluated DSM function DISABLE_SRD: setting SRD to passive\n"); - return DSM_VALUE_SRD_PASSIVE; - - } else if (value == DSM_VALUE_SRD_DISABLE) { - IWL_DEBUG_RADIO(mvm, - "Evaluated DSM function DISABLE_SRD: disabling SRD\n"); - return DSM_VALUE_SRD_DISABLE; - } - /* default behaviour is active */ - return DSM_VALUE_SRD_ACTIVE; -} - static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) { - u8 ret; int cmd_ret; - struct iwl_lari_config_change_cmd_v2 cmd = {}; - - if (iwl_mvm_eval_dsm_indonesia_5g2(mvm) == DSM_VALUE_INDONESIA_ENABLE) - cmd.config_bitmap |= - cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK); + struct iwl_lari_config_change_cmd_v3 cmd = {}; - ret = iwl_mvm_eval_dsm_disable_srd(mvm); - if (ret == DSM_VALUE_SRD_PASSIVE) - cmd.config_bitmap |= - cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK); - - else if (ret == DSM_VALUE_SRD_DISABLE) - cmd.config_bitmap |= - cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK); + cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt); /* apply more config masks here */ if (cmd.config_bitmap) { - size_t cmd_size = iwl_fw_lookup_cmd_ver(mvm->fw, - REGULATORY_AND_NVM_GROUP, - LARI_CONFIG_CHANGE, 1) == 2 ? - sizeof(struct iwl_lari_config_change_cmd_v2) : - sizeof(struct iwl_lari_config_change_cmd_v1); + size_t cmd_size; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + REGULATORY_AND_NVM_GROUP, + LARI_CONFIG_CHANGE, 1); + if (cmd_ver == 3) + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3); + else if (cmd_ver == 2) + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2); + else + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1); + IWL_DEBUG_RADIO(mvm, "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x\n", le32_to_cpu(cmd.config_bitmap)); @@ -1485,14 +1390,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm) } /* Init RSS configuration */ - if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { - ret = iwl_configure_rxq(mvm); - if (ret) { - IWL_ERR(mvm, "Failed to configure RX queues: %d\n", - ret); - goto error; - } - } + ret = iwl_configure_rxq(&mvm->fwrt); + if (ret) + goto error; if (iwl_mvm_has_new_rx_api(mvm)) { ret = iwl_send_rss_cfg_cmd(mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index baf7404c137d..607d5d564928 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1099,6 +1099,8 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm) iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_PERIODIC, NULL); + mvm->last_reset_or_resume_time_jiffies = jiffies; + if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { /* Something went wrong - we need to finish some cleanup * that normally iwl_mvm_mac_restart_complete() below @@ -4610,6 +4612,16 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, break; case NL80211_IFTYPE_STATION: + /* + * We haven't configured the firmware to be associated yet since + * we don't know the dtim period. In this case, the firmware can't + * track the beacons. + */ + if (!vif->bss_conf.assoc || !vif->bss_conf.dtim_period) { + ret = -EBUSY; + goto out_unlock; + } + if (chsw->delay > IWL_MAX_CSA_BLOCK_TX) schedule_delayed_work(&mvmvif->csa_work, 0); @@ -5134,28 +5146,50 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, } void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, - struct iwl_mvm_internal_rxq_notif *notif, - u32 size) + enum iwl_mvm_rxq_notif_type type, + bool sync, + const void *data, u32 size) { - u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; + struct { + struct iwl_rxq_sync_cmd cmd; + struct iwl_mvm_internal_rxq_notif notif; + } __packed cmd = { + .cmd.rxq_mask = cpu_to_le32(BIT(mvm->trans->num_rx_queues) - 1), + .cmd.count = + cpu_to_le32(sizeof(struct iwl_mvm_internal_rxq_notif) + + size), + .notif.type = type, + .notif.sync = sync, + }; + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(DATA_PATH_GROUP, TRIGGER_RX_QUEUES_NOTIF_CMD), + .data[0] = &cmd, + .len[0] = sizeof(cmd), + .data[1] = data, + .len[1] = size, + .flags = sync ? 0 : CMD_ASYNC, + }; int ret; + /* size must be a multiple of DWORD */ + if (WARN_ON(cmd.cmd.count & cpu_to_le32(3))) + return; if (!iwl_mvm_has_new_rx_api(mvm)) return; - if (notif->sync) { - notif->cookie = mvm->queue_sync_cookie; + if (sync) { + cmd.notif.cookie = mvm->queue_sync_cookie; mvm->queue_sync_state = (1 << mvm->trans->num_rx_queues) - 1; } - ret = iwl_mvm_notify_rx_queue(mvm, qmask, notif, size, !notif->sync); + ret = iwl_mvm_send_cmd(mvm, &hcmd); if (ret) { IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); goto out; } - if (notif->sync) { + if (sync) { lockdep_assert_held(&mvm->mutex); ret = wait_event_timeout(mvm->rx_sync_waitq, READ_ONCE(mvm->queue_sync_state) == 0 || @@ -5167,21 +5201,18 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, } out: - mvm->queue_sync_state = 0; - if (notif->sync) + if (sync) { + mvm->queue_sync_state = 0; mvm->queue_sync_cookie++; + } } static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_internal_rxq_notif data = { - .type = IWL_MVM_RXQ_EMPTY, - .sync = 1, - }; mutex_lock(&mvm->mutex); - iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data)); + iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, true, NULL, 0); mutex_unlock(&mvm->mutex); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 0a963d01b825..4d9d4d6892fc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -591,7 +591,6 @@ struct iwl_mvm_tcm { enum iwl_mvm_traffic_load global_load; bool low_latency[NUM_MAC_INDEX_DRIVER]; bool change[NUM_MAC_INDEX_DRIVER]; - bool global_change; } result; }; @@ -1096,6 +1095,9 @@ struct iwl_mvm { /* sniffer data to include in radiotap */ __le16 cur_aid; u8 cur_bssid[ETH_ALEN]; + + unsigned long last_6ghz_passive_scan_jiffies; + unsigned long last_reset_or_resume_time_jiffies; }; /* Extract MVM priv from op_mode and _hw */ @@ -1570,9 +1572,6 @@ void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); -int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, - const struct iwl_mvm_internal_rxq_notif *notif, - u32 notif_size, bool async); void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); @@ -2001,8 +2000,9 @@ void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_tdls_ch_switch_work(struct work_struct *work); void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, - struct iwl_mvm_internal_rxq_notif *notif, - u32 size); + enum iwl_mvm_rxq_notif_type type, + bool sync, + const void *data, u32 size); void iwl_mvm_reorder_timer_expired(struct timer_list *t); struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index 8772b65c9dab..2d58cb969918 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #include "rs.h" #include "fw-api.h" @@ -72,19 +72,15 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm, bool vht_ena = vht_cap->vht_supported; u16 flags = 0; + /* get STBC flags */ if (mvm->cfg->ht_params->stbc && (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) { - if (he_cap->has_he) { - if (he_cap->he_cap_elem.phy_cap_info[2] & - IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) - flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; - - if (he_cap->he_cap_elem.phy_cap_info[7] & - IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ) - flags |= IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK; - } else if ((ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) || - (vht_ena && - (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))) + if (he_cap->has_he && he_cap->he_cap_elem.phy_cap_info[2] & + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) + flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; + else if (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK) + flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; + else if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 91b6541d579f..b97708cb869d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** * - * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2014, 2018 - 2021 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * @@ -1926,9 +1926,7 @@ static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (is_ht(rate)) return index == IWL_RATE_MCS_7_INDEX; if (is_vht(rate)) - return index == IWL_RATE_MCS_7_INDEX || - index == IWL_RATE_MCS_8_INDEX || - index == IWL_RATE_MCS_9_INDEX; + return index == IWL_RATE_MCS_9_INDEX; WARN_ON_ONCE(1); return false; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index af5a6dd81c41..8e26422ca326 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -527,37 +527,6 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, return false; } -int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, - const struct iwl_mvm_internal_rxq_notif *notif, - u32 notif_size, bool async) -{ - u8 buf[sizeof(struct iwl_rxq_sync_cmd) + - sizeof(struct iwl_mvm_rss_sync_notif)]; - struct iwl_rxq_sync_cmd *cmd = (void *)buf; - u32 data_size = sizeof(*cmd) + notif_size; - int ret; - - /* - * size must be a multiple of DWORD - * Ensure we don't overflow buf - */ - if (WARN_ON(notif_size & 3 || - notif_size > sizeof(struct iwl_mvm_rss_sync_notif))) - return -EINVAL; - - cmd->rxq_mask = cpu_to_le32(rxq_mask); - cmd->count = cpu_to_le32(notif_size); - cmd->flags = 0; - memcpy(cmd->payload, notif, notif_size); - - ret = iwl_mvm_send_cmd_pdu(mvm, - WIDE_ID(DATA_PATH_GROUP, - TRIGGER_RX_QUEUES_NOTIF_CMD), - async ? CMD_ASYNC : 0, data_size, cmd); - - return ret; -} - /* * Returns true if sn2 - buffer_size < sn1 < sn2. * To be used only in order to compare reorder buffer head with NSSN. @@ -573,15 +542,13 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size) static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn) { if (IWL_MVM_USE_NSSN_SYNC) { - struct iwl_mvm_rss_sync_notif notif = { - .metadata.type = IWL_MVM_RXQ_NSSN_SYNC, - .metadata.sync = 0, - .nssn_sync.baid = baid, - .nssn_sync.nssn = nssn, + struct iwl_mvm_nssn_sync_data notif = { + .baid = baid, + .nssn = nssn, }; - iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, - sizeof(notif)); + iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NSSN_SYNC, false, + ¬if, sizeof(notif)); } } @@ -830,8 +797,7 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, "invalid notification size %d (%d)", len, (int)(sizeof(*notif) + sizeof(*internal_notif)))) return; - /* remove only the firmware header, we want all of our payload below */ - len -= sizeof(*notif); + len -= sizeof(*notif) + sizeof(*internal_notif); if (internal_notif->sync && mvm->queue_sync_cookie != internal_notif->cookie) { @@ -841,21 +807,19 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, switch (internal_notif->type) { case IWL_MVM_RXQ_EMPTY: - WARN_ONCE(len != sizeof(*internal_notif), - "invalid empty notification size %d (%d)", - len, (int)sizeof(*internal_notif)); + WARN_ONCE(len, "invalid empty notification size %d", len); break; case IWL_MVM_RXQ_NOTIF_DEL_BA: - if (WARN_ONCE(len != sizeof(struct iwl_mvm_rss_sync_notif), + if (WARN_ONCE(len != sizeof(struct iwl_mvm_delba_data), "invalid delba notification size %d (%d)", - len, (int)sizeof(struct iwl_mvm_rss_sync_notif))) + len, (int)sizeof(struct iwl_mvm_delba_data))) break; iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data); break; case IWL_MVM_RXQ_NSSN_SYNC: - if (WARN_ONCE(len != sizeof(struct iwl_mvm_rss_sync_notif), + if (WARN_ONCE(len != sizeof(struct iwl_mvm_nssn_sync_data), "invalid nssn sync notification size %d (%d)", - len, (int)sizeof(struct iwl_mvm_rss_sync_notif))) + len, (int)sizeof(struct iwl_mvm_nssn_sync_data))) break; iwl_mvm_nssn_sync(mvm, napi, queue, (void *)internal_notif->data); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index caf87f320094..5a0696c44f6d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -43,6 +43,9 @@ /* adaptive dwell number of APs override for social channels */ #define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS 2 +/* minimal number of 2GHz and 5GHz channels in the regular scan request */ +#define IWL_MVM_6GHZ_PASSIVE_SCAN_MIN_CHANS 4 + struct iwl_mvm_scan_timing_params { u32 suspend_time; u32 max_out_time; @@ -94,6 +97,7 @@ struct iwl_mvm_scan_params { struct cfg80211_scan_6ghz_params *scan_6ghz_params; u32 n_6ghz_params; bool scan_6ghz; + bool enable_6ghz_passive; }; static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm) @@ -1873,6 +1877,98 @@ static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm, return flags; } +static void iwl_mvm_scan_6ghz_passive_scan(struct iwl_mvm *mvm, + struct iwl_mvm_scan_params *params, + struct ieee80211_vif *vif) +{ + struct ieee80211_supported_band *sband = + &mvm->nvm_data->bands[NL80211_BAND_6GHZ]; + u32 n_disabled, i; + + params->enable_6ghz_passive = false; + + if (params->scan_6ghz) + return; + + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN)) { + IWL_DEBUG_SCAN(mvm, + "6GHz passive scan: Not supported by FW\n"); + return; + } + + /* 6GHz passive scan allowed only on station interface */ + if (vif->type != NL80211_IFTYPE_STATION) { + IWL_DEBUG_SCAN(mvm, + "6GHz passive scan: not station interface\n"); + return; + } + + /* + * 6GHz passive scan is allowed while associated in a defined time + * interval following HW reset or resume flow + */ + if (vif->bss_conf.assoc && + (time_before(mvm->last_reset_or_resume_time_jiffies + + (IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT * HZ), + jiffies))) { + IWL_DEBUG_SCAN(mvm, "6GHz passive scan: associated\n"); + return; + } + + /* No need for 6GHz passive scan if not enough time elapsed */ + if (time_after(mvm->last_6ghz_passive_scan_jiffies + + (IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) { + IWL_DEBUG_SCAN(mvm, + "6GHz passive scan: timeout did not expire\n"); + return; + } + + /* not enough channels in the regular scan request */ + if (params->n_channels < IWL_MVM_6GHZ_PASSIVE_SCAN_MIN_CHANS) { + IWL_DEBUG_SCAN(mvm, + "6GHz passive scan: not enough channels\n"); + return; + } + + for (i = 0; i < params->n_ssids; i++) { + if (!params->ssids[i].ssid_len) + break; + } + + /* not a wildcard scan, so cannot enable passive 6GHz scan */ + if (i == params->n_ssids) { + IWL_DEBUG_SCAN(mvm, + "6GHz passive scan: no wildcard SSID\n"); + return; + } + + if (!sband || !sband->n_channels) { + IWL_DEBUG_SCAN(mvm, + "6GHz passive scan: no 6GHz channels\n"); + return; + } + + for (i = 0, n_disabled = 0; i < sband->n_channels; i++) { + if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED)) + n_disabled++; + } + + /* + * Not all the 6GHz channels are disabled, so no need for 6GHz passive + * scan + */ + if (n_disabled != sband->n_channels) { + IWL_DEBUG_SCAN(mvm, + "6GHz passive scan: 6GHz channels enabled\n"); + return; + } + + /* all conditions to enable 6ghz passive scan are satisfied */ + IWL_DEBUG_SCAN(mvm, "6GHz passive scan: can be enabled\n"); + params->enable_6ghz_passive = true; +} + static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif, @@ -1911,6 +2007,9 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm, params->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ) flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN; + if (params->enable_6ghz_passive) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN; + return flags; } @@ -2183,6 +2282,30 @@ iwl_mvm_scan_umac_fill_ch_p_v6(struct iwl_mvm *mvm, params->n_channels, channel_cfg_flags, vif->type); + + if (params->enable_6ghz_passive) { + struct ieee80211_supported_band *sband = + &mvm->nvm_data->bands[NL80211_BAND_6GHZ]; + u32 i; + + for (i = 0; i < sband->n_channels; i++) { + struct ieee80211_channel *channel = + &sband->channels[i]; + + struct iwl_scan_channel_cfg_umac *cfg = + &cp->channel_config[cp->count]; + + if (!cfg80211_channel_is_psc(channel)) + continue; + + cfg->flags = 0; + cfg->v2.channel_num = channel->hw_value; + cfg->v2.band = PHY_BAND_6; + cfg->v2.iter_count = 1; + cfg->v2.iter_interval = 0; + cp->count++; + } + } } static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -2500,6 +2623,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); + iwl_mvm_scan_6ghz_passive_scan(mvm, ¶ms, vif); + uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, ¶ms, IWL_MVM_SCAN_REGULAR); @@ -2524,6 +2649,9 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvm->scan_status |= IWL_MVM_SCAN_REGULAR; mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif); + if (params.enable_6ghz_passive) + mvm->last_6ghz_passive_scan_jiffies = jiffies; + schedule_delayed_work(&mvm->scan_timeout_dwork, msecs_to_jiffies(SCAN_TIMEOUT)); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 3a411bbda5fd..f618368eda83 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -2441,12 +2441,12 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) { - struct iwl_mvm_rss_sync_notif notif = { - .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA, - .metadata.sync = 1, - .delba.baid = baid, + struct iwl_mvm_delba_data notif = { + .baid = baid, }; - iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif)); + + iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NOTIF_DEL_BA, true, + ¬if, sizeof(notif)); }; static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 35a18b96aac5..32b4d1935788 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -281,6 +281,36 @@ struct iwl_mvm_key_pn { } ____cacheline_aligned_in_smp q[]; }; +/** + * enum iwl_mvm_rxq_notif_type - Internal message identifier + * + * @IWL_MVM_RXQ_EMPTY: empty sync notification + * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA + * @IWL_MVM_RXQ_NSSN_SYNC: notify all the RSS queues with the new NSSN + */ +enum iwl_mvm_rxq_notif_type { + IWL_MVM_RXQ_EMPTY, + IWL_MVM_RXQ_NOTIF_DEL_BA, + IWL_MVM_RXQ_NSSN_SYNC, +}; + +/** + * struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent + * in &iwl_rxq_sync_cmd. Should be DWORD aligned. + * FW is agnostic to the payload, so there are no endianity requirements. + * + * @type: value from &iwl_mvm_rxq_notif_type + * @sync: ctrl path is waiting for all notifications to be received + * @cookie: internal cookie to identify old notifications + * @data: payload + */ +struct iwl_mvm_internal_rxq_notif { + u16 type; + u16 sync; + u32 cookie; + u8 data[]; +} __packed; + struct iwl_mvm_delba_data { u32 baid; } __packed; @@ -290,14 +320,6 @@ struct iwl_mvm_nssn_sync_data { u32 nssn; } __packed; -struct iwl_mvm_rss_sync_notif { - struct iwl_mvm_internal_rxq_notif metadata; - union { - struct iwl_mvm_delba_data delba; - struct iwl_mvm_nssn_sync_data nssn_sync; - }; -} __packed; - /** * struct iwl_mvm_rxq_dup_data - per station per rx queue data * @last_seq: last sequence per tid for duplicate packet detection diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 0b012f8c9eb2..83342a6a6d5b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH */ @@ -151,6 +151,16 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, if (errmsg) IWL_ERR(mvm, "%s\n", errmsg); + if (mvmvif->csa_bcn_pending) { + struct iwl_mvm_sta *mvmsta; + + rcu_read_lock(); + mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); + if (!WARN_ON(!mvmsta)) + iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); + rcu_read_unlock(); + } + iwl_mvm_connection_loss(mvm, vif, errmsg); return true; } @@ -285,6 +295,17 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, break; case NL80211_IFTYPE_STATION: /* + * If we are switching channel, don't disconnect + * if the time event is already done. Beacons can + * be delayed a bit after the switch. + */ + if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) { + IWL_DEBUG_TE(mvm, + "No beacon heard and the CS time event is over, don't disconnect\n"); + break; + } + + /* * By now, we should have finished association * and know the dtim period. */ @@ -713,8 +734,8 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm, IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id)); ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0, sizeof(time_cmd), &time_cmd); - if (WARN_ON(ret)) - return; + if (ret) + IWL_ERR(mvm, "Couldn't remove the time event\n"); } /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index b6b481ff1518..c566be99a4c7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -1030,15 +1030,9 @@ iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed) return IWL_MVM_TRAFFIC_LOW; } -struct iwl_mvm_tcm_iter_data { - struct iwl_mvm *mvm; - bool any_sent; -}; - static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { - struct iwl_mvm_tcm_iter_data *data = _data; - struct iwl_mvm *mvm = data->mvm; + struct iwl_mvm *mvm = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC; @@ -1060,22 +1054,15 @@ static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) } else { iwl_mvm_update_quotas(mvm, false, NULL); } - - data->any_sent = true; } static void iwl_mvm_tcm_results(struct iwl_mvm *mvm) { - struct iwl_mvm_tcm_iter_data data = { - .mvm = mvm, - .any_sent = false, - }; - mutex_lock(&mvm->mutex); ieee80211_iterate_active_interfaces( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_tcm_iter, &data); + iwl_mvm_tcm_iter, mvm); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) iwl_mvm_config_scan(mvm); @@ -1257,7 +1244,6 @@ static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm, } load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed); - mvm->tcm.result.global_change = load != mvm->tcm.result.global_load; mvm->tcm.result.global_load = load; for (i = 0; i < NUM_NL80211_BANDS; i++) { diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 558a0b2ef0fc..d94bd8d732e9 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -17,10 +17,20 @@ #include "iwl-prph.h" #include "internal.h" +#define TRANS_CFG_MARKER BIT(0) +#define _IS_A(cfg, _struct) __builtin_types_compatible_p(typeof(cfg), \ + struct _struct) +extern int _invalid_type; +#define _TRANS_CFG_MARKER(cfg) \ + (__builtin_choose_expr(_IS_A(cfg, iwl_cfg_trans_params), \ + TRANS_CFG_MARKER, \ + __builtin_choose_expr(_IS_A(cfg, iwl_cfg), 0, _invalid_type))) +#define _ASSIGN_CFG(cfg) (_TRANS_CFG_MARKER(cfg) + (kernel_ulong_t)&(cfg)) + #define IWL_PCI_DEVICE(dev, subdev, cfg) \ .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ .subvendor = PCI_ANY_ID, .subdevice = (subdev), \ - .driver_data = (kernel_ulong_t)&(cfg) + .driver_data = _ASSIGN_CFG(cfg) /* Hardware specific file defines the PCI IDs table for that hardware module */ static const struct pci_device_id iwl_hw_card_ids[] = { @@ -490,6 +500,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)}, {IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)}, +/* Bz devices */ + {IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)}, #endif /* CONFIG_IWLMVM */ {0} @@ -607,6 +619,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x2725, 0x4020, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x6020, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x6024, iwlax210_2ax_cfg_ty_gf_a0, NULL), + IWL_DEV_INFO(0x2725, 0x1673, iwlax210_2ax_cfg_ty_gf_a0, iwl_ax210_killer_1675w_name), + IWL_DEV_INFO(0x2725, 0x1674, iwlax210_2ax_cfg_ty_gf_a0, iwl_ax210_killer_1675x_name), IWL_DEV_INFO(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0_long, NULL), IWL_DEV_INFO(0x7A70, 0x0098, iwlax211_2ax_cfg_so_gf_a0_long, NULL), IWL_DEV_INFO(0x7A70, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0_long, NULL), @@ -1014,12 +1028,12 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_ma_a0_mr_a0, iwl_ma_name), + iwl_cfg_ma_a0_mr_a0, iwl_ax221_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_snj_a0_mr_a0, iwl_ma_name), + iwl_cfg_snj_a0_mr_a0, iwl_ax221_name), /* So with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, @@ -1067,6 +1081,35 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), +/* Bz */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_bz_a0_hr_b0, iwl_ax201_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_bz_a0_gf_a0, iwl_ax211_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, + iwl_cfg_bz_a0_gf4_a0, iwl_ax211_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_bz_a0_mr_a0, iwl_ax211_name), + +/* So with GF */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name) + #endif /* CONFIG_IWLMVM */ }; @@ -1075,19 +1118,22 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - const struct iwl_cfg_trans_params *trans = - (struct iwl_cfg_trans_params *)(ent->driver_data); + const struct iwl_cfg_trans_params *trans; const struct iwl_cfg *cfg_7265d __maybe_unused = NULL; struct iwl_trans *iwl_trans; struct iwl_trans_pcie *trans_pcie; int i, ret; + const struct iwl_cfg *cfg; + + trans = (void *)(ent->driver_data & ~TRANS_CFG_MARKER); + /* * This is needed for backwards compatibility with the old * tables, so we don't need to change all the config structs * at the same time. The cfg is used to compare with the old * full cfg structs. */ - const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); + cfg = (void *)(ent->driver_data & ~TRANS_CFG_MARKER); /* make sure trans is the first element in iwl_cfg */ BUILD_BUG_ON(offsetof(struct iwl_cfg, trans)); @@ -1165,7 +1211,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) iwl_trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0; } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) { - iwl_trans->cfg = &iwlax210_2ax_cfg_so_jf_a0; + iwl_trans->cfg = &iwlax210_2ax_cfg_so_jf_b0; } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) { iwl_trans->cfg = &iwlax211_2ax_cfg_so_gf_a0; @@ -1202,11 +1248,19 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) #endif /* - * If we didn't set the cfg yet, assume the trans is actually - * a full cfg from the old tables. + * If we didn't set the cfg yet, the PCI ID table entry should have + * been a full config - if yes, use it, otherwise fail. */ - if (!iwl_trans->cfg) + if (!iwl_trans->cfg) { + if (ent->driver_data & TRANS_CFG_MARKER) { + pr_err("No config found for PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n", + pdev->device, pdev->subsystem_device, + iwl_trans->hw_rev, iwl_trans->hw_rf_id); + ret = -EINVAL; + goto out_free_trans; + } iwl_trans->cfg = cfg; + } /* if we don't have a name yet, copy name from the old cfg */ if (!iwl_trans->name) @@ -1222,6 +1276,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) trans_pcie->num_rx_bufs = RX_QUEUE_SIZE; } + ret = iwl_trans_init(iwl_trans); + if (ret) + goto out_free_trans; + pci_set_drvdata(pdev, iwl_trans); iwl_trans->drv = iwl_drv_start(iwl_trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index d9688c7bed07..76a512cd2e5c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -447,6 +447,11 @@ struct iwl_trans const struct iwl_cfg_trans_params *cfg_trans); void iwl_trans_pcie_free(struct iwl_trans *trans); +bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans); +#define _iwl_trans_pcie_grab_nic_access(trans) \ + __cond_lock(nic_access_nobh, \ + likely(__iwl_trans_pcie_grab_nic_access(trans))) + /***************************************************** * RX ******************************************************/ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 2bec97133119..fb8491412be4 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2003-2014, 2018-2020 Intel Corporation + * Copyright (C) 2003-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -1023,6 +1023,9 @@ static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget) ret = iwl_pcie_rx_handle(trans, rxq->id, budget); + IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", + rxq->id, ret, budget); + if (ret < budget) { spin_lock(&trans_pcie->irq_lock); if (test_bit(STATUS_INT_ENABLED, &trans->status)) @@ -1046,33 +1049,19 @@ static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget) trans = trans_pcie->trans; ret = iwl_pcie_rx_handle(trans, rxq->id, budget); + IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", rxq->id, ret, + budget); if (ret < budget) { - spin_lock(&trans_pcie->irq_lock); - iwl_pcie_clear_irq(trans, rxq->id); - spin_unlock(&trans_pcie->irq_lock); + int irq_line = rxq->id; - napi_complete_done(&rxq->napi, ret); - } + /* FIRST_RSS is shared with line 0 */ + if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS && + rxq->id == 1) + irq_line = 0; - return ret; -} - -static int iwl_pcie_napi_poll_msix_shared(struct napi_struct *napi, int budget) -{ - struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi); - struct iwl_trans_pcie *trans_pcie; - struct iwl_trans *trans; - int ret; - - trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev); - trans = trans_pcie->trans; - - ret = iwl_pcie_rx_handle(trans, rxq->id, budget); - - if (ret < budget) { spin_lock(&trans_pcie->irq_lock); - iwl_pcie_clear_irq(trans, 0); + iwl_pcie_clear_irq(trans, irq_line); spin_unlock(&trans_pcie->irq_lock); napi_complete_done(&rxq->napi, ret); @@ -1134,18 +1123,9 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) if (!rxq->napi.poll) { int (*poll)(struct napi_struct *, int) = iwl_pcie_napi_poll; - if (trans_pcie->msix_enabled) { + if (trans_pcie->msix_enabled) poll = iwl_pcie_napi_poll_msix; - if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX && - i == 0) - poll = iwl_pcie_napi_poll_msix_shared; - - if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS && - i == 1) - poll = iwl_pcie_napi_poll_msix_shared; - } - netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, poll, NAPI_POLL_WEIGHT); napi_enable(&rxq->napi); @@ -1659,10 +1639,13 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) if (WARN_ON(entry->entry >= trans->num_rx_queues)) return IRQ_NONE; - if (WARN_ONCE(!rxq, "Got MSI-X interrupt before we have Rx queues")) + if (WARN_ONCE(!rxq, + "[%d] Got MSI-X interrupt before we have Rx queues", + entry->entry)) return IRQ_NONE; lock_map_acquire(&trans->sync_cmd_lockdep_map); + IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry); local_bh_disable(); if (napi_schedule_prep(&rxq->napi)) @@ -2194,9 +2177,16 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); struct iwl_trans *trans = trans_pcie->trans; struct isr_statistics *isr_stats = &trans_pcie->isr_stats; + u32 inta_fh_msk = ~MSIX_FH_INT_CAUSES_DATA_QUEUE; u32 inta_fh, inta_hw; bool polling = false; + if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) + inta_fh_msk |= MSIX_FH_INT_CAUSES_Q0; + + if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) + inta_fh_msk |= MSIX_FH_INT_CAUSES_Q1; + lock_map_acquire(&trans->sync_cmd_lockdep_map); spin_lock_bh(&trans_pcie->irq_lock); @@ -2205,7 +2195,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) /* * Clear causes registers to avoid being handling the same cause. */ - iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh); + iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh & inta_fh_msk); iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); spin_unlock_bh(&trans_pcie->irq_lock); @@ -2219,8 +2209,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) if (iwl_have_debug_level(IWL_DL_ISR)) { IWL_DEBUG_ISR(trans, - "ISR inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", - inta_fh, trans_pcie->fh_mask, + "ISR[%d] inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", + entry->entry, inta_fh, trans_pcie->fh_mask, iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD)); if (inta_fh & ~trans_pcie->fh_mask) IWL_DEBUG_ISR(trans, @@ -2275,8 +2265,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) /* After checking FH register check HW register */ if (iwl_have_debug_level(IWL_DL_ISR)) { IWL_DEBUG_ISR(trans, - "ISR inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", - inta_hw, trans_pcie->hw_mask, + "ISR[%d] inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", + entry->entry, inta_hw, trans_pcie->hw_mask, iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD)); if (inta_hw & ~trans_pcie->hw_mask) IWL_DEBUG_ISR(trans, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index 94ffc1ae484d..1bcd36e9e008 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #include "iwl-trans.h" #include "iwl-prph.h" @@ -108,8 +108,8 @@ static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans) ret = wait_event_timeout(trans_pcie->fw_reset_waitq, trans_pcie->fw_reset_done, FW_RESET_TIMEOUT); if (!ret) - IWL_ERR(trans, - "firmware didn't ACK the reset - continue anyway\n"); + IWL_INFO(trans, + "firmware didn't ACK the reset - continue anyway\n"); } void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) @@ -143,7 +143,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); - iwl_txq_gen2_tx_stop(trans); + iwl_txq_gen2_tx_free(trans); iwl_pcie_rx_stop(trans); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 1bf4c37fe960..239bc177a3e5 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1604,6 +1604,11 @@ iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, } else { trans_pcie->trans->num_rx_queues = num_irqs - 1; } + + IWL_DEBUG_INFO(trans, + "MSI-X enabled with rx queues %d, vec mask 0x%x\n", + trans_pcie->trans->num_rx_queues, trans_pcie->shared_vec_mask); + WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES); trans_pcie->alloc_vecs = num_irqs; @@ -1973,12 +1978,16 @@ static void iwl_trans_pcie_removal_wk(struct work_struct *wk) module_put(THIS_MODULE); } -static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) +/* + * This version doesn't disable BHs but rather assumes they're + * already disabled. + */ +bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) { int ret; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - spin_lock_bh(&trans_pcie->reg_lock); + spin_lock(&trans_pcie->reg_lock); if (trans_pcie->cmd_hold_nic_awake) goto out; @@ -2063,7 +2072,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) } err: - spin_unlock_bh(&trans_pcie->reg_lock); + spin_unlock(&trans_pcie->reg_lock); return false; } @@ -2076,6 +2085,20 @@ out: return true; } +static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) +{ + bool ret; + + local_bh_disable(); + ret = __iwl_trans_pcie_grab_nic_access(trans); + if (ret) { + /* keep BHs disabled until iwl_trans_pcie_release_nic_access */ + return ret; + } + local_bh_enable(); + return false; +} + static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 4456abb9a074..34bde8c87324 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -40,6 +40,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; struct iwl_tfh_tfd *tfd; + unsigned long flags; copy_size = sizeof(struct iwl_cmd_header_wide); cmd_size = sizeof(struct iwl_cmd_header_wide); @@ -108,14 +109,14 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, goto free_dup_buf; } - spin_lock_bh(&txq->lock); + spin_lock_irqsave(&txq->lock, flags); idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); memset(tfd, 0, sizeof(*tfd)); if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { - spin_unlock_bh(&txq->lock); + spin_unlock_irqrestore(&txq->lock, flags); IWL_ERR(trans, "No space in command queue\n"); iwl_op_mode_cmd_queue_full(trans->op_mode); @@ -250,7 +251,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, spin_unlock(&trans_pcie->reg_lock); out: - spin_unlock_bh(&txq->lock); + spin_unlock_irqrestore(&txq->lock, flags); free_dup_buf: if (idx < 0) kfree(dup_buf); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 7ae32491b5da..4f6c187eed69 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2003-2014, 2018-2020 Intel Corporation + * Copyright (C) 2003-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -181,16 +181,20 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - lockdep_assert_held(&trans_pcie->reg_lock); - if (!trans->trans_cfg->base_params->apmg_wake_up_wa) return; - if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) + + spin_lock(&trans_pcie->reg_lock); + + if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) { + spin_unlock(&trans_pcie->reg_lock); return; + } trans_pcie->cmd_hold_nic_awake = false; __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + spin_unlock(&trans_pcie->reg_lock); } /* @@ -198,7 +202,6 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) */ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans->txqs.txq[txq_id]; if (!txq) { @@ -222,12 +225,9 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) iwl_txq_free_tfd(trans, txq); txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); - if (txq->read_ptr == txq->write_ptr) { - spin_lock(&trans_pcie->reg_lock); - if (txq_id == trans->txqs.cmd.q_id) - iwl_pcie_clear_cmd_in_flight(trans); - spin_unlock(&trans_pcie->reg_lock); - } + if (txq->read_ptr == txq->write_ptr && + txq_id == trans->txqs.cmd.q_id) + iwl_pcie_clear_cmd_in_flight(trans); } while (!skb_queue_empty(&txq->overflow_q)) { @@ -629,38 +629,30 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, const struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int ret; - - lockdep_assert_held(&trans_pcie->reg_lock); /* Make sure the NIC is still alive in the bus */ if (test_bit(STATUS_TRANS_DEAD, &trans->status)) return -ENODEV; + if (!trans->trans_cfg->base_params->apmg_wake_up_wa) + return 0; + /* * wake up the NIC to make sure that the firmware will see the host * command - we will let the NIC sleep once all the host commands * returned. This needs to be done only on NICs that have - * apmg_wake_up_wa set. + * apmg_wake_up_wa set (see above.) */ - if (trans->trans_cfg->base_params->apmg_wake_up_wa && - !trans_pcie->cmd_hold_nic_awake) { - __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - - ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, - (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | - CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), - 15000); - if (ret < 0) { - __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); - return -EIO; - } - trans_pcie->cmd_hold_nic_awake = true; - } + if (!_iwl_trans_pcie_grab_nic_access(trans)) + return -EIO; + + /* + * In iwl_trans_grab_nic_access(), we've acquired the reg_lock. + * There, we also returned immediately if cmd_hold_nic_awake is + * already true, so it's OK to unconditionally set it to true. + */ + trans_pcie->cmd_hold_nic_awake = true; + spin_unlock(&trans_pcie->reg_lock); return 0; } @@ -674,7 +666,6 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, */ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans->txqs.txq[txq_id]; int nfreed = 0; u16 r; @@ -705,12 +696,8 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) } } - if (txq->read_ptr == txq->write_ptr) { - /* BHs are also disabled due to txq->lock */ - spin_lock(&trans_pcie->reg_lock); + if (txq->read_ptr == txq->write_ptr) iwl_pcie_clear_cmd_in_flight(trans); - spin_unlock(&trans_pcie->reg_lock); - } iwl_txq_progress(txq); } @@ -914,7 +901,6 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; @@ -1161,19 +1147,16 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); - spin_lock(&trans_pcie->reg_lock); ret = iwl_pcie_set_cmd_in_flight(trans, cmd); if (ret < 0) { idx = ret; - goto unlock_reg; + goto out; } /* Increment and update queue's write index */ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); iwl_pcie_txq_inc_wr_ptr(trans, txq); - unlock_reg: - spin_unlock(&trans_pcie->reg_lock); out: spin_unlock_irqrestore(&txq->lock, flags); free_dup_buf: @@ -1367,7 +1350,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, /* this is the data left for this subframe */ unsigned int data_left = min_t(unsigned int, mss, total_len); - struct sk_buff *csum_skb = NULL; unsigned int hdr_tb_len; dma_addr_t hdr_tb_phys; u8 *subf_hdrs_start = hdr_page->pos; @@ -1398,10 +1380,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, hdr_tb_len = hdr_page->pos - start_hdr; hdr_tb_phys = dma_map_single(trans->dev, start_hdr, hdr_tb_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) { - dev_kfree_skb(csum_skb); + if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) return -EINVAL; - } iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, hdr_tb_len, false); trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, @@ -1420,10 +1400,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, tb_phys = dma_map_single(trans->dev, tso.data, size, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { - dev_kfree_skb(csum_skb); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) return -EINVAL; - } iwl_pcie_txq_build_tfd(trans, txq, tb_phys, size, false); diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c index 833f43d1ca7a..451b06069350 100644 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2020 Intel Corporation + * Copyright (C) 2020-2021 Intel Corporation */ #include <net/tso.h> #include <linux/tcp.h> @@ -14,30 +14,6 @@ #include <linux/dmapool.h> /* - * iwl_txq_gen2_tx_stop - Stop all Tx DMA channels - */ -void iwl_txq_gen2_tx_stop(struct iwl_trans *trans) -{ - int txq_id; - - /* - * This function can be called before the op_mode disabled the - * queues. This happens when we have an rfkill interrupt. - * Since we stop Tx altogether - mark the queues as stopped. - */ - memset(trans->txqs.queue_stopped, 0, - sizeof(trans->txqs.queue_stopped)); - memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); - - /* Unmap DMA from host system and free skb's */ - for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) { - if (!trans->txqs.txq[txq_id]) - continue; - iwl_txq_gen2_unmap(trans, txq_id); - } -} - -/* * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array */ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans, @@ -399,7 +375,6 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, while (total_len) { /* this is the data left for this subframe */ unsigned int data_left = min_t(unsigned int, mss, total_len); - struct sk_buff *csum_skb = NULL; unsigned int tb_len; dma_addr_t tb_phys; u8 *subf_hdrs_start = hdr_page->pos; @@ -430,10 +405,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, tb_len = hdr_page->pos - start_hdr; tb_phys = dma_map_single(trans->dev, start_hdr, tb_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { - dev_kfree_skb(csum_skb); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; - } /* * No need for _with_wa, this is from the TSO page and * we leave some space at the end of it so can't hit @@ -458,10 +431,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, tso.data, tb_len, NULL); - if (ret) { - dev_kfree_skb(csum_skb); + if (ret) goto out_err; - } data_left -= tb_len; tso_build_data(skb, &tso, tb_len); @@ -1189,6 +1160,12 @@ static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq, goto error_free_resp; } + if (WARN_ONCE(trans->txqs.txq[qid], + "queue %d already allocated\n", qid)) { + ret = -EIO; + goto error_free_resp; + } + txq->id = qid; trans->txqs.txq[qid] = txq; wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h index af1dbdf5617a..20efc62acf13 100644 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2020 Intel Corporation + * Copyright (C) 2020-2021 Intel Corporation */ #ifndef __iwl_trans_queue_tx_h__ #define __iwl_trans_queue_tx_h__ @@ -123,7 +123,6 @@ int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, void iwl_txq_dyn_free(struct iwl_trans *trans, int queue); void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq); -void iwl_txq_gen2_tx_stop(struct iwl_trans *trans); void iwl_txq_gen2_tx_free(struct iwl_trans *trans); int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue); diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index fa7d4c20dc13..51ce767eaf88 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -596,7 +596,7 @@ static const struct nl80211_vendor_cmd_info mac80211_hwsim_vendor_events[] = { { .vendor_id = OUI_QCA, .subcmd = 1 }, }; -static spinlock_t hwsim_radio_lock; +static DEFINE_SPINLOCK(hwsim_radio_lock); static LIST_HEAD(hwsim_radios); static struct rhashtable hwsim_radios_rht; static int hwsim_radio_idx; @@ -763,7 +763,7 @@ static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = { /* MAC80211_HWSIM virtio queues */ static struct virtqueue *hwsim_vqs[HWSIM_NUM_VQS]; static bool hwsim_virtio_enabled; -static spinlock_t hwsim_virtio_lock; +static DEFINE_SPINLOCK(hwsim_virtio_lock); static void hwsim_virtio_rx_work(struct work_struct *work); static DECLARE_WORK(hwsim_virtio_rx, hwsim_virtio_rx_work); @@ -2795,8 +2795,8 @@ static const struct ieee80211_sband_iftype_data he_capa_2ghz[] = { IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | - IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, - .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3, + .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU, .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | @@ -2839,8 +2839,8 @@ static const struct ieee80211_sband_iftype_data he_capa_2ghz[] = { IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | - IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, - .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3, + .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU, .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | @@ -2885,8 +2885,8 @@ static const struct ieee80211_sband_iftype_data he_capa_5ghz[] = { IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | - IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, - .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3, + .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU, .phy_cap_info[0] = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | @@ -2933,8 +2933,8 @@ static const struct ieee80211_sband_iftype_data he_capa_5ghz[] = { IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | - IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, - .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3, + .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU, .phy_cap_info[0] = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | @@ -4410,8 +4410,6 @@ static struct virtio_driver virtio_hwsim = { static int hwsim_register_virtio_driver(void) { - spin_lock_init(&hwsim_virtio_lock); - return register_virtio_driver(&virtio_hwsim); } @@ -4440,8 +4438,6 @@ static int __init init_mac80211_hwsim(void) if (channels < 1) return -EINVAL; - spin_lock_init(&hwsim_radio_lock); - err = rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params); if (err) return err; diff --git a/drivers/net/wireless/marvell/libertas/decl.h b/drivers/net/wireless/marvell/libertas/decl.h index 5d1e30e0c5db..c1e0388ef01d 100644 --- a/drivers/net/wireless/marvell/libertas/decl.h +++ b/drivers/net/wireless/marvell/libertas/decl.h @@ -23,7 +23,6 @@ struct lbs_private; typedef void (*lbs_fw_cb)(struct lbs_private *priv, int ret, const struct firmware *helper, const struct firmware *mainfw); -struct lbs_private; struct sk_buff; struct net_device; struct cmd_ds_command; diff --git a/drivers/net/wireless/marvell/libertas/mesh.h b/drivers/net/wireless/marvell/libertas/mesh.h index d49717b20c09..44c4cd0230a8 100644 --- a/drivers/net/wireless/marvell/libertas/mesh.h +++ b/drivers/net/wireless/marvell/libertas/mesh.h @@ -60,13 +60,13 @@ void lbs_mesh_ethtool_get_strings(struct net_device *dev, #else -#define lbs_init_mesh(priv) -#define lbs_deinit_mesh(priv) -#define lbs_start_mesh(priv) -#define lbs_add_mesh(priv) -#define lbs_remove_mesh(priv) +#define lbs_init_mesh(priv) do { } while (0) +#define lbs_deinit_mesh(priv) do { } while (0) +#define lbs_start_mesh(priv) do { } while (0) +#define lbs_add_mesh(priv) do { } while (0) +#define lbs_remove_mesh(priv) do { } while (0) #define lbs_mesh_set_dev(priv, dev, rxpd) (dev) -#define lbs_mesh_set_txpd(priv, dev, txpd) +#define lbs_mesh_set_txpd(priv, dev, txpd) do { } while (0) #define lbs_mesh_set_channel(priv, channel) (0) #define lbs_mesh_activated(priv) (false) diff --git a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h index 67bbb6a8f113..5d726545d987 100644 --- a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h +++ b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h @@ -453,7 +453,6 @@ struct cmd_ds_802_11_beacon_set { u8 beacon[MRVL_MAX_BCN_SIZE]; }; -struct lbtf_private; struct cmd_ctrl_node; /** Function Prototype Declaration */ diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index a2ed268ce0da..0961f4a5e415 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -2300,8 +2300,7 @@ done: is_scanning_required = 1; } else { mwifiex_dbg(priv->adapter, MSG, - "info: trying to associate to '%.*s' bssid %pM\n", - req_ssid.ssid_len, (char *)req_ssid.ssid, + "info: trying to associate to bssid %pM\n", bss->bssid); memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN); break; @@ -2378,8 +2377,7 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, } mwifiex_dbg(adapter, INFO, - "info: Trying to associate to %.*s and bssid %pM\n", - (int)sme->ssid_len, (char *)sme->ssid, sme->bssid); + "info: Trying to associate to bssid %pM\n", sme->bssid); if (!mwifiex_stop_bg_scan(priv)) cfg80211_sched_scan_stopped_locked(priv->wdev.wiphy, 0); @@ -2512,9 +2510,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, goto done; } - mwifiex_dbg(priv->adapter, MSG, - "info: trying to join to %.*s and bssid %pM\n", - params->ssid_len, (char *)params->ssid, params->bssid); + mwifiex_dbg(priv->adapter, MSG, "info: trying to join to bssid %pM\n", + params->bssid); mwifiex_set_ibss_params(priv, params); diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index c2a685f63e95..0b877f3f6b97 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -1211,7 +1211,6 @@ mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter, int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, struct mwifiex_bssdescriptor *bss_entry) { - int ret = 0; u8 element_id; struct ieee_types_fh_param_set *fh_param_set; struct ieee_types_ds_param_set *ds_param_set; @@ -1464,7 +1463,7 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, bytes_left -= total_ie_len; } /* while (bytes_left > 2) */ - return ret; + return 0; } /* diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c index c9f8c056aa51..84b32a5f01ee 100644 --- a/drivers/net/wireless/marvell/mwl8k.c +++ b/drivers/net/wireless/marvell/mwl8k.c @@ -1473,6 +1473,7 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index) if (txq->skb == NULL) { dma_free_coherent(&priv->pdev->dev, size, txq->txd, txq->txd_dma); + txq->txd = NULL; return -ENOMEM; } diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c index df25c00d9e06..72622220051b 100644 --- a/drivers/net/wireless/mediatek/mt76/agg-rx.c +++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c @@ -76,9 +76,9 @@ mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames) nframes--; status = (struct mt76_rx_status *)skb->cb; - if (!time_after(jiffies, - status->reorder_time + - mt76_aggr_tid_to_timeo(tid->num))) + if (!time_after32(jiffies, + status->reorder_time + + mt76_aggr_tid_to_timeo(tid->num))) continue; mt76_rx_aggr_release_frames(tid, frames, status->seqno); @@ -122,6 +122,7 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames) struct ieee80211_bar *bar = mt76_skb_get_hdr(skb); struct mt76_wcid *wcid = status->wcid; struct mt76_rx_tid *tid; + u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; u16 seqno; if (!ieee80211_is_ctl(bar->frame_control)) @@ -130,9 +131,9 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames) if (!ieee80211_is_back_req(bar->frame_control)) return; - status->tid = le16_to_cpu(bar->control) >> 12; + status->qos_ctl = tidno = le16_to_cpu(bar->control) >> 12; seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num)); - tid = rcu_dereference(wcid->aggr[status->tid]); + tid = rcu_dereference(wcid->aggr[tidno]); if (!tid) return; @@ -147,12 +148,12 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames) void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames) { struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; - struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); struct mt76_wcid *wcid = status->wcid; struct ieee80211_sta *sta; struct mt76_rx_tid *tid; bool sn_less; u16 seqno, head, size, idx; + u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; u8 ackp; __skb_queue_tail(frames, skb); @@ -161,18 +162,18 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames) if (!sta) return; - if (!status->aggr) { + if (!status->aggr && !(status->flag & RX_FLAG_8023)) { mt76_rx_aggr_check_ctl(skb, frames); return; } /* not part of a BA session */ - ackp = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_ACK_POLICY_MASK; + ackp = status->qos_ctl & IEEE80211_QOS_CTL_ACK_POLICY_MASK; if (ackp != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && ackp != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) return; - tid = rcu_dereference(wcid->aggr[status->tid]); + tid = rcu_dereference(wcid->aggr[tidno]); if (!tid) return; diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c index d4a6b8108971..fa48cc3a7a8f 100644 --- a/drivers/net/wireless/mediatek/mt76/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/debugfs.c @@ -25,6 +25,32 @@ mt76_reg_get(void *data, u64 *val) DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n"); +static int +mt76_napi_threaded_set(void *data, u64 val) +{ + struct mt76_dev *dev = data; + + if (!mt76_is_mmio(dev)) + return -EOPNOTSUPP; + + if (dev->napi_dev.threaded != val) + return dev_set_threaded(&dev->napi_dev, val); + + return 0; +} + +static int +mt76_napi_threaded_get(void *data, u64 *val) +{ + struct mt76_dev *dev = data; + + *val = dev->napi_dev.threaded; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_napi_threaded, mt76_napi_threaded_get, + mt76_napi_threaded_set, "%llu\n"); + int mt76_queues_read(struct seq_file *s, void *data) { struct mt76_dev *dev = dev_get_drvdata(s->private); @@ -102,6 +128,8 @@ struct dentry *mt76_register_debugfs(struct mt76_dev *dev) debugfs_create_u32("regidx", 0600, dir, &dev->debugfs_reg); debugfs_create_file_unsafe("regval", 0600, dir, dev, &fops_regval); + debugfs_create_file_unsafe("napi_threaded", 0600, dir, dev, + &fops_napi_threaded); debugfs_create_blob("eeprom", 0400, dir, &dev->eeprom); if (dev->otp.data) debugfs_create_blob("otp", 0400, dir, &dev->otp); diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 2f27c43ad76d..72b1cc0ecfda 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -79,13 +79,38 @@ mt76_free_pending_txwi(struct mt76_dev *dev) local_bh_enable(); } +static void +mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) +{ + writel(q->desc_dma, &q->regs->desc_base); + writel(q->ndesc, &q->regs->ring_size); + q->head = readl(&q->regs->dma_idx); + q->tail = q->head; +} + +static void +mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) +{ + int i; + + if (!q) + return; + + /* clear descriptors */ + for (i = 0; i < q->ndesc; i++) + q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); + + writel(0, &q->regs->cpu_idx); + writel(0, &q->regs->dma_idx); + mt76_dma_sync_idx(dev, q); +} + static int mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, int idx, int n_desc, int bufsize, u32 ring_base) { int size; - int i; spin_lock_init(&q->lock); spin_lock_init(&q->cleanup_lock); @@ -105,14 +130,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, if (!q->entry) return -ENOMEM; - /* clear descriptors */ - for (i = 0; i < q->ndesc; i++) - q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); - - writel(q->desc_dma, &q->regs->desc_base); - writel(0, &q->regs->cpu_idx); - writel(0, &q->regs->dma_idx); - writel(q->ndesc, &q->regs->ring_size); + mt76_dma_queue_reset(dev, q); return 0; } @@ -202,15 +220,6 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, } static void -mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) -{ - writel(q->desc_dma, &q->regs->desc_base); - writel(q->ndesc, &q->regs->ring_size); - q->head = readl(&q->regs->dma_idx); - q->tail = q->head; -} - -static void mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) { wmb(); @@ -309,7 +318,7 @@ static int mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, struct sk_buff *skb, u32 tx_info) { - struct mt76_queue_buf buf; + struct mt76_queue_buf buf = {}; dma_addr_t addr; if (q->queued + 1 >= q->ndesc - 1) @@ -593,8 +602,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) return done; } -static int -mt76_dma_rx_poll(struct napi_struct *napi, int budget) +int mt76_dma_rx_poll(struct napi_struct *napi, int budget) { struct mt76_dev *dev; int qid, done = 0, cur; @@ -602,7 +610,6 @@ mt76_dma_rx_poll(struct napi_struct *napi, int budget) dev = container_of(napi->dev, struct mt76_dev, napi_dev); qid = napi - dev->napi; - local_bh_disable(); rcu_read_lock(); do { @@ -612,24 +619,28 @@ mt76_dma_rx_poll(struct napi_struct *napi, int budget) } while (cur && done < budget); rcu_read_unlock(); - local_bh_enable(); if (done < budget && napi_complete(napi)) dev->drv->rx_poll_complete(dev, qid); return done; } +EXPORT_SYMBOL_GPL(mt76_dma_rx_poll); static int -mt76_dma_init(struct mt76_dev *dev) +mt76_dma_init(struct mt76_dev *dev, + int (*poll)(struct napi_struct *napi, int budget)) { int i; init_dummy_netdev(&dev->napi_dev); + init_dummy_netdev(&dev->tx_napi_dev); + snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s", + wiphy_name(dev->hw->wiphy)); + dev->napi_dev.threaded = 1; mt76_for_each_q_rx(dev, i) { - netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll, - 64); + netif_napi_add(&dev->napi_dev, &dev->napi[i], poll, 64); mt76_dma_rx_fill(dev, &dev->q_rx[i]); napi_enable(&dev->napi[i]); } @@ -640,9 +651,11 @@ mt76_dma_init(struct mt76_dev *dev) static const struct mt76_queue_ops mt76_dma_ops = { .init = mt76_dma_init, .alloc = mt76_dma_alloc_queue, + .reset_q = mt76_dma_queue_reset, .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw, .tx_queue_skb = mt76_dma_tx_queue_skb, .tx_cleanup = mt76_dma_tx_cleanup, + .rx_cleanup = mt76_dma_rx_cleanup, .rx_reset = mt76_dma_rx_reset, .kick = mt76_dma_kick_queue, }; diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h index e7c27697ef04..fdf786f975ea 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.h +++ b/drivers/net/wireless/mediatek/mt76/dma.h @@ -45,6 +45,7 @@ enum mt76_mcu_evt_type { EVT_EVENT_DFS_DETECT_RSP, }; +int mt76_dma_rx_poll(struct napi_struct *napi, int budget); void mt76_dma_attach(struct mt76_dev *dev); void mt76_dma_cleanup(struct mt76_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c index 665b54c5c8ae..3b47e85e95e7 100644 --- a/drivers/net/wireless/mediatek/mt76/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/eeprom.c @@ -9,8 +9,7 @@ #include <linux/etherdevice.h> #include "mt76.h" -static int -mt76_get_of_eeprom(struct mt76_dev *dev, int len) +int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int offset, int len) { #if defined(CONFIG_OF) && defined(CONFIG_MTD) struct device_node *np = dev->dev->of_node; @@ -18,7 +17,6 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len) const __be32 *list; const char *part; phandle phandle; - int offset = 0; int size; size_t retlen; int ret; @@ -54,7 +52,7 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len) } offset = be32_to_cpup(list); - ret = mtd_read(mtd, offset, len, &retlen, dev->eeprom.data); + ret = mtd_read(mtd, offset, len, &retlen, eep); put_mtd_device(mtd); if (ret) goto out_put_node; @@ -65,7 +63,7 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len) } if (of_property_read_bool(dev->dev->of_node, "big-endian")) { - u8 *data = (u8 *)dev->eeprom.data; + u8 *data = (u8 *)eep; int i; /* convert eeprom data in Little Endian */ @@ -86,21 +84,15 @@ out_put_node: return -ENOENT; #endif } +EXPORT_SYMBOL_GPL(mt76_get_of_eeprom); void mt76_eeprom_override(struct mt76_phy *phy) { struct mt76_dev *dev = phy->dev; - -#ifdef CONFIG_OF struct device_node *np = dev->dev->of_node; - const u8 *mac = NULL; - if (np) - mac = of_get_mac_address(np); - if (!IS_ERR_OR_NULL(mac)) - ether_addr_copy(phy->macaddr, mac); -#endif + of_get_mac_address(np, phy->macaddr); if (!is_valid_ether_addr(phy->macaddr)) { eth_random_addr(phy->macaddr); @@ -111,6 +103,226 @@ mt76_eeprom_override(struct mt76_phy *phy) } EXPORT_SYMBOL_GPL(mt76_eeprom_override); +static bool mt76_string_prop_find(struct property *prop, const char *str) +{ + const char *cp = NULL; + + if (!prop || !str || !str[0]) + return false; + + while ((cp = of_prop_next_string(prop, cp)) != NULL) + if (!strcasecmp(cp, str)) + return true; + + return false; +} + +static struct device_node * +mt76_find_power_limits_node(struct mt76_dev *dev) +{ + struct device_node *np = dev->dev->of_node; + const char *const region_names[] = { + [NL80211_DFS_ETSI] = "etsi", + [NL80211_DFS_FCC] = "fcc", + [NL80211_DFS_JP] = "jp", + }; + struct device_node *cur, *fallback = NULL; + const char *region_name = NULL; + + if (dev->region < ARRAY_SIZE(region_names)) + region_name = region_names[dev->region]; + + np = of_get_child_by_name(np, "power-limits"); + if (!np) + return NULL; + + for_each_child_of_node(np, cur) { + struct property *country = of_find_property(cur, "country", NULL); + struct property *regd = of_find_property(cur, "regdomain", NULL); + + if (!country && !regd) { + fallback = cur; + continue; + } + + if (mt76_string_prop_find(country, dev->alpha2) || + mt76_string_prop_find(regd, region_name)) + return cur; + } + + return fallback; +} + +static const __be32 * +mt76_get_of_array(struct device_node *np, char *name, size_t *len, int min) +{ + struct property *prop = of_find_property(np, name, NULL); + + if (!prop || !prop->value || prop->length < min * 4) + return NULL; + + *len = prop->length; + + return prop->value; +} + +static struct device_node * +mt76_find_channel_node(struct device_node *np, struct ieee80211_channel *chan) +{ + struct device_node *cur; + const __be32 *val; + size_t len; + + for_each_child_of_node(np, cur) { + val = mt76_get_of_array(cur, "channels", &len, 2); + if (!val) + continue; + + while (len >= 2 * sizeof(*val)) { + if (chan->hw_value >= be32_to_cpu(val[0]) && + chan->hw_value <= be32_to_cpu(val[1])) + return cur; + + val += 2; + len -= 2 * sizeof(*val); + } + } + + return NULL; +} + +static s8 +mt76_get_txs_delta(struct device_node *np, u8 nss) +{ + const __be32 *val; + size_t len; + + val = mt76_get_of_array(np, "txs-delta", &len, nss); + if (!val) + return 0; + + return be32_to_cpu(val[nss - 1]); +} + +static void +mt76_apply_array_limit(s8 *pwr, size_t pwr_len, const __be32 *data, + s8 target_power, s8 nss_delta, s8 *max_power) +{ + int i; + + if (!data) + return; + + for (i = 0; i < pwr_len; i++) { + pwr[i] = min_t(s8, target_power, + be32_to_cpu(data[i]) + nss_delta); + *max_power = max(*max_power, pwr[i]); + } +} + +static void +mt76_apply_multi_array_limit(s8 *pwr, size_t pwr_len, s8 pwr_num, + const __be32 *data, size_t len, s8 target_power, + s8 nss_delta, s8 *max_power) +{ + int i, cur; + + if (!data) + return; + + len /= 4; + cur = be32_to_cpu(data[0]); + for (i = 0; i < pwr_num; i++) { + if (len < pwr_len + 1) + break; + + mt76_apply_array_limit(pwr + pwr_len * i, pwr_len, data + 1, + target_power, nss_delta, max_power); + if (--cur > 0) + continue; + + data += pwr_len + 1; + len -= pwr_len + 1; + if (!len) + break; + + cur = be32_to_cpu(data[0]); + } +} + +s8 mt76_get_rate_power_limits(struct mt76_phy *phy, + struct ieee80211_channel *chan, + struct mt76_power_limits *dest, + s8 target_power) +{ + struct mt76_dev *dev = phy->dev; + struct device_node *np; + const __be32 *val; + char name[16]; + u32 mcs_rates = dev->drv->mcs_rates; + u32 ru_rates = ARRAY_SIZE(dest->ru[0]); + char band; + size_t len; + s8 max_power = 0; + s8 txs_delta; + + if (!mcs_rates) + mcs_rates = 10; + + memset(dest, target_power, sizeof(*dest)); + + if (!IS_ENABLED(CONFIG_OF)) + return target_power; + + np = mt76_find_power_limits_node(dev); + if (!np) + return target_power; + + switch (chan->band) { + case NL80211_BAND_2GHZ: + band = '2'; + break; + case NL80211_BAND_5GHZ: + band = '5'; + break; + default: + return target_power; + } + + snprintf(name, sizeof(name), "txpower-%cg", band); + np = of_get_child_by_name(np, name); + if (!np) + return target_power; + + np = mt76_find_channel_node(np, chan); + if (!np) + return target_power; + + txs_delta = mt76_get_txs_delta(np, hweight8(phy->antenna_mask)); + + val = mt76_get_of_array(np, "rates-cck", &len, ARRAY_SIZE(dest->cck)); + mt76_apply_array_limit(dest->cck, ARRAY_SIZE(dest->cck), val, + target_power, txs_delta, &max_power); + + val = mt76_get_of_array(np, "rates-ofdm", + &len, ARRAY_SIZE(dest->ofdm)); + mt76_apply_array_limit(dest->ofdm, ARRAY_SIZE(dest->ofdm), val, + target_power, txs_delta, &max_power); + + val = mt76_get_of_array(np, "rates-mcs", &len, mcs_rates + 1); + mt76_apply_multi_array_limit(dest->mcs[0], ARRAY_SIZE(dest->mcs[0]), + ARRAY_SIZE(dest->mcs), val, len, + target_power, txs_delta, &max_power); + + val = mt76_get_of_array(np, "rates-ru", &len, ru_rates + 1); + mt76_apply_multi_array_limit(dest->ru[0], ARRAY_SIZE(dest->ru[0]), + ARRAY_SIZE(dest->ru), val, len, + target_power, txs_delta, &max_power); + + return max_power; +} +EXPORT_SYMBOL_GPL(mt76_get_rate_power_limits); + int mt76_eeprom_init(struct mt76_dev *dev, int len) { @@ -119,6 +331,6 @@ mt76_eeprom_init(struct mt76_dev *dev, int len) if (!dev->eeprom.data) return -ENOMEM; - return !mt76_get_of_eeprom(dev, len); + return !mt76_get_of_eeprom(dev, dev->eeprom.data, 0, len); } EXPORT_SYMBOL_GPL(mt76_eeprom_init); diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 696d00d1976c..977acab0360a 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -428,6 +428,9 @@ mt76_alloc_device(struct device *pdev, unsigned int size, mutex_init(&dev->mcu.mutex); dev->tx_worker.fn = mt76_tx_worker; + spin_lock_init(&dev->token_lock); + idr_init(&dev->token); + INIT_LIST_HEAD(&dev->txwi_cache); for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) @@ -508,6 +511,39 @@ void mt76_free_device(struct mt76_dev *dev) } EXPORT_SYMBOL_GPL(mt76_free_device); +static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q) +{ + struct sk_buff *skb = phy->rx_amsdu[q].head; + struct mt76_dev *dev = phy->dev; + + phy->rx_amsdu[q].head = NULL; + phy->rx_amsdu[q].tail = NULL; + __skb_queue_tail(&dev->rx_skb[q], skb); +} + +static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q, + struct sk_buff *skb) +{ + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + + if (phy->rx_amsdu[q].head && + (!status->amsdu || status->first_amsdu || + status->seqno != phy->rx_amsdu[q].seqno)) + mt76_rx_release_amsdu(phy, q); + + if (!phy->rx_amsdu[q].head) { + phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list; + phy->rx_amsdu[q].seqno = status->seqno; + phy->rx_amsdu[q].head = skb; + } else { + *phy->rx_amsdu[q].tail = skb; + phy->rx_amsdu[q].tail = &skb->next; + } + + if (!status->amsdu || status->last_amsdu) + mt76_rx_release_amsdu(phy, q); +} + void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb) { struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; @@ -525,7 +561,8 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb) phy->test.rx_stats.fcs_error[q]++; } #endif - __skb_queue_tail(&dev->rx_skb[q], skb); + + mt76_rx_release_burst(phy, q, skb); } EXPORT_SYMBOL_GPL(mt76_rx); @@ -720,6 +757,8 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb, status->signal = mstat.signal; status->chains = mstat.chains; status->ampdu_reference = mstat.ampdu_ref; + status->device_timestamp = mstat.timestamp; + status->mactime = mstat.timestamp; BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb)); BUILD_BUG_ON(sizeof(status->chain_signal) != @@ -737,6 +776,7 @@ mt76_check_ccmp_pn(struct sk_buff *skb) struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; struct mt76_wcid *wcid = status->wcid; struct ieee80211_hdr *hdr; + u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; int ret; if (!(status->flag & RX_FLAG_DECRYPTED)) @@ -757,12 +797,12 @@ mt76_check_ccmp_pn(struct sk_buff *skb) } BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0])); - ret = memcmp(status->iv, wcid->rx_key_pn[status->tid], + ret = memcmp(status->iv, wcid->rx_key_pn[tidno], sizeof(status->iv)); if (ret <= 0) return -EINVAL; /* replay */ - memcpy(wcid->rx_key_pn[status->tid], status->iv, sizeof(status->iv)); + memcpy(wcid->rx_key_pn[tidno], status->iv, sizeof(status->iv)); if (status->flag & RX_FLAG_IV_STRIPPED) status->flag |= RX_FLAG_PN_VALIDATED; @@ -785,6 +825,7 @@ mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status, }; struct ieee80211_sta *sta; u32 airtime; + u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len); spin_lock(&dev->cc_lock); @@ -795,7 +836,7 @@ mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status, return; sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); - ieee80211_sta_register_airtime(sta, status->tid, 0, airtime); + ieee80211_sta_register_airtime(sta, tidno, 0, airtime); } static void @@ -823,7 +864,6 @@ mt76_airtime_flush_ampdu(struct mt76_dev *dev) static void mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb) { - struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; struct mt76_wcid *wcid = status->wcid; @@ -831,6 +871,11 @@ mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb) return; if (!wcid || !wcid->sta) { + struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); + + if (status->flag & RX_FLAG_8023) + return; + if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr)) return; @@ -864,10 +909,12 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb) struct ieee80211_sta *sta; struct ieee80211_hw *hw; struct mt76_wcid *wcid = status->wcid; + u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; bool ps; hw = mt76_phy_hw(dev, status->ext_phy); - if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) { + if (ieee80211_is_pspoll(hdr->frame_control) && !wcid && + !(status->flag & RX_FLAG_8023)) { sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL); if (sta) wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv; @@ -885,6 +932,9 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb) wcid->inactive_count = 0; + if (status->flag & RX_FLAG_8023) + return; + if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags)) return; @@ -902,7 +952,7 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb) if (ps && (ieee80211_is_data_qos(hdr->frame_control) || ieee80211_is_qos_nullfunc(hdr->frame_control))) - ieee80211_sta_uapsd_trigger(sta, status->tid); + ieee80211_sta_uapsd_trigger(sta, tidno); if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps) return; @@ -926,13 +976,26 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, spin_lock(&dev->rx_lock); while ((skb = __skb_dequeue(frames)) != NULL) { + struct sk_buff *nskb = skb_shinfo(skb)->frag_list; + if (mt76_check_ccmp_pn(skb)) { dev_kfree_skb(skb); continue; } + skb_shinfo(skb)->frag_list = NULL; mt76_rx_convert(dev, skb, &hw, &sta); ieee80211_rx_list(hw, sta, skb, &list); + + /* subsequent amsdu frames */ + while (nskb) { + skb = nskb; + nskb = nskb->next; + skb->next = NULL; + + mt76_rx_convert(dev, skb, &hw, &sta); + ieee80211_rx_list(hw, sta, skb, &list); + } } spin_unlock(&dev->rx_lock); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 8bf45497cfca..36ede65919f8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -17,12 +17,14 @@ #include "util.h" #include "testmode.h" -#define MT_MCU_RING_SIZE 32 -#define MT_RX_BUF_SIZE 2048 -#define MT_SKB_HEAD_LEN 128 +#define MT_MCU_RING_SIZE 32 +#define MT_RX_BUF_SIZE 2048 +#define MT_SKB_HEAD_LEN 128 -#define MT_MAX_NON_AQL_PKT 16 -#define MT_TXQ_FREE_THR 32 +#define MT_MAX_NON_AQL_PKT 16 +#define MT_TXQ_FREE_THR 32 + +#define MT76_TOKEN_FREE_THR 64 struct mt76_dev; struct mt76_phy; @@ -169,7 +171,8 @@ struct mt76_mcu_ops { }; struct mt76_queue_ops { - int (*init)(struct mt76_dev *dev); + int (*init)(struct mt76_dev *dev, + int (*poll)(struct napi_struct *napi, int budget)); int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q, int idx, int n_desc, int bufsize, @@ -190,13 +193,18 @@ struct mt76_queue_ops { void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q, bool flush); + void (*rx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q); + void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); + + void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q); }; enum mt76_wcid_flags { MT_WCID_FLAG_CHECK_PS, MT_WCID_FLAG_PS, MT_WCID_FLAG_4ADDR, + MT_WCID_FLAG_HDR_TRANS, }; #define MT76_N_WCIDS 288 @@ -222,6 +230,7 @@ struct mt76_wcid { u16 idx; u8 hw_key_idx; + u8 hw_key_idx2; u8 sta:1; u8 ext_phy:1; @@ -324,6 +333,8 @@ struct mt76_driver_ops { u32 drv_flags; u32 survey_flags; u16 txwi_size; + u16 token_size; + u8 mcs_rates; void (*update_survey)(struct mt76_dev *dev); @@ -491,15 +502,16 @@ struct mt76_rx_status { u16 wcid_idx; }; - unsigned long reorder_time; + u32 reorder_time; u32 ampdu_ref; + u32 timestamp; u8 iv[6]; u8 ext_phy:1; u8 aggr:1; - u8 tid; + u8 qos_ctl; u16 seqno; u16 freq; @@ -507,6 +519,7 @@ struct mt76_rx_status { u8 enc_flags; u8 encoding:2, bw:3, he_ru:3; u8 he_gi:2, he_dcm:1; + u8 amsdu:1, first_amsdu:1, last_amsdu:1; u8 rate_idx; u8 nss; u8 band; @@ -529,7 +542,7 @@ struct mt76_testmode_data { struct sk_buff *tx_skb; u32 tx_count; - u16 tx_msdu_len; + u16 tx_mpdu_len; u8 tx_rate_mode; u8 tx_rate_idx; @@ -600,6 +613,12 @@ struct mt76_phy { struct delayed_work mac_work; u8 mac_work_count; + + struct { + struct sk_buff *head; + struct sk_buff **tail; + u16 seqno; + } rx_amsdu[__MT_RXQ_MAX]; }; struct mt76_dev { @@ -628,6 +647,7 @@ struct mt76_dev { struct mt76_mcu mcu; struct net_device napi_dev; + struct net_device tx_napi_dev; spinlock_t rx_lock; struct napi_struct napi[__MT_RXQ_MAX]; struct sk_buff_head rx_skb[__MT_RXQ_MAX]; @@ -641,6 +661,10 @@ struct mt76_dev { struct mt76_worker tx_worker; struct napi_struct tx_napi; + spinlock_t token_lock; + struct idr token; + int token_count; + wait_queue_head_t tx_wait; struct sk_buff_head status_list; @@ -695,6 +719,13 @@ struct mt76_dev { }; }; +struct mt76_power_limits { + s8 cck[4]; + s8 ofdm[8]; + s8 mcs[4][10]; + s8 ru[7][12]; +}; + enum mt76_phy_type { MT_PHY_TYPE_CCK, MT_PHY_TYPE_OFDM, @@ -778,13 +809,15 @@ static inline u16 mt76_rev(struct mt76_dev *dev) #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76)) -#define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76)) +#define mt76_init_queues(dev, ...) (dev)->mt76.queue_ops->init(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__) #define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) -#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) +#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) +#define mt76_queue_rx_cleanup(dev, ...) (dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) +#define mt76_queue_reset(dev, ...) (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__) #define mt76_for_each_q_rx(dev, i) \ for (i = 0; i < ARRAY_SIZE((dev)->q_rx) && \ @@ -811,6 +844,7 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str, int mt76_eeprom_init(struct mt76_dev *dev, int len); void mt76_eeprom_override(struct mt76_phy *phy); +int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len); struct mt76_queue * mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, @@ -988,6 +1022,7 @@ void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta, void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb); void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid); void mt76_txq_schedule_all(struct mt76_phy *phy); +void mt76_tx_worker_run(struct mt76_dev *dev); void mt76_tx_worker(struct mt76_worker *w); void mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, @@ -1056,6 +1091,7 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, struct netlink_callback *cb, void *data, int len); int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state); +int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len); static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable) { @@ -1176,4 +1212,45 @@ mt76_mcu_skb_send_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd, void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set); +s8 mt76_get_rate_power_limits(struct mt76_phy *phy, + struct ieee80211_channel *chan, + struct mt76_power_limits *dest, + s8 target_power); + +struct mt76_txwi_cache * +mt76_token_release(struct mt76_dev *dev, int token, bool *wake); +int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi); +void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked); + +static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) +{ + spin_lock_bh(&dev->token_lock); + __mt76_set_tx_blocked(dev, blocked); + spin_unlock_bh(&dev->token_lock); +} + +static inline int +mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) +{ + int token; + + spin_lock_bh(&dev->token_lock); + token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size, + GFP_ATOMIC); + spin_unlock_bh(&dev->token_lock); + + return token; +} + +static inline struct mt76_txwi_cache * +mt76_token_put(struct mt76_dev *dev, int token) +{ + struct mt76_txwi_cache *txwi; + + spin_lock_bh(&dev->token_lock); + txwi = idr_remove(&dev->token, token); + spin_unlock_bh(&dev->token_lock); + + return txwi; +} #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c index 0086f18cb79a..415ea17b9be6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c @@ -219,11 +219,11 @@ int mt7603_dma_init(struct mt7603_dev *dev) return ret; mt76_wr(dev, MT_DELAY_INT_CFG, 0); - ret = mt76_init_queues(dev); + ret = mt76_init_queues(dev, mt76_dma_rx_poll); if (ret) return ret; - netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi, + netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, mt7603_poll_tx, NAPI_POLL_WEIGHT); napi_enable(&dev->mt76.tx_napi); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c index f0b879c3eba8..e1b2cfa56074 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c @@ -548,6 +548,9 @@ int mt7603_register_device(struct mt7603_dev *dev) hw->max_report_rates = 7; hw->max_rate_tries = 11; + hw->radiotap_timestamp.units_pos = + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US; + hw->sta_data_size = sizeof(struct mt7603_sta); hw->vif_data_size = sizeof(struct mt7603_vif); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c index cc4e7bc48294..fbceb07c5f37 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c @@ -532,20 +532,6 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb) status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; } - if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB | - MT_RXD2_NORMAL_NON_AMPDU))) { - status->flag |= RX_FLAG_AMPDU_DETAILS; - - /* all subframes of an A-MPDU have the same timestamp */ - if (dev->rx_ampdu_ts != rxd[12]) { - if (!++dev->ampdu_ref) - dev->ampdu_ref++; - } - dev->rx_ampdu_ts = rxd[12]; - - status->ampdu_ref = dev->ampdu_ref; - } - remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET; if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) @@ -579,6 +565,23 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb) return -EINVAL; } if (rxd0 & MT_RXD0_NORMAL_GROUP_2) { + status->timestamp = le32_to_cpu(rxd[0]); + status->flag |= RX_FLAG_MACTIME_START; + + if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB | + MT_RXD2_NORMAL_NON_AMPDU))) { + status->flag |= RX_FLAG_AMPDU_DETAILS; + + /* all subframes of an A-MPDU have the same timestamp */ + if (dev->rx_ampdu_ts != status->timestamp) { + if (!++dev->ampdu_ref) + dev->ampdu_ref++; + } + dev->rx_ampdu_ts = status->timestamp; + + status->ampdu_ref = dev->ampdu_ref; + } + rxd += 2; if ((u8 *)rxd - skb->data >= skb->len) return -EINVAL; @@ -651,7 +654,7 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb) status->aggr = unicast && !ieee80211_is_qos_nullfunc(hdr->frame_control); - status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + status->qos_ctl = *ieee80211_get_qos_ctl(hdr); status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); return 0; @@ -1442,6 +1445,8 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev) mt76_queue_rx_reset(dev, i); } + mt76_tx_status_check(&dev->mt76, NULL, true); + mt7603_dma_sched_reset(dev); mt7603_mac_dma_start(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c index 96b6c8916730..6abfe6b19afa 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c @@ -21,9 +21,8 @@ mt7603_mcu_parse_response(struct mt76_dev *mdev, int cmd, struct mt7603_mcu_rxd *rxd; if (!skb) { - dev_err(mdev->dev, - "MCU message %d (seq %d) timed out\n", - cmd, seq); + dev_err(mdev->dev, "MCU message %02x (seq %d) timed out\n", + abs(cmd), seq); dev->mcu_hang = MT7603_WATCHDOG_TIMEOUT; return -ETIMEDOUT; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h index b787c56fd8d6..1df5b9fed2bb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h @@ -120,7 +120,7 @@ struct mt7603_dev { unsigned long last_cca_adj; u32 ampdu_ref; - __le32 rx_ampdu_ts; + u32 rx_ampdu_ts; u8 rssi_offset[3]; u8 slottime; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/pci.c b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c index 06fa28f645f2..aa6cb668b012 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c @@ -7,7 +7,7 @@ #include "mt7603.h" static const struct pci_device_id mt76pci_device_table[] = { - { PCI_DEVICE(0x14c3, 0x7603) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7603) }, { }, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c index 7ae48b4fa564..676bb22726d6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c @@ -69,6 +69,7 @@ static int mt7615_pm_set(void *data, u64 val) { struct mt7615_dev *dev = data; + struct mt76_connac_pm *pm = &dev->pm; int ret = 0; if (!mt7615_wait_for_mcu_init(dev)) @@ -77,6 +78,9 @@ mt7615_pm_set(void *data, u64 val) if (!mt7615_firmware_offload(dev) || !mt76_is_mmio(&dev->mt76)) return -EOPNOTSUPP; + if (val == pm->enable) + return 0; + mt7615_mutex_acquire(dev); if (dev->phy.n_beacon_vif) { @@ -84,7 +88,11 @@ mt7615_pm_set(void *data, u64 val) goto out; } - dev->pm.enable = val; + if (!pm->enable) { + pm->stats.last_wake_event = jiffies; + pm->stats.last_doze_event = jiffies; + } + pm->enable = val; out: mt7615_mutex_release(dev); @@ -104,6 +112,26 @@ mt7615_pm_get(void *data, u64 *val) DEFINE_DEBUGFS_ATTRIBUTE(fops_pm, mt7615_pm_get, mt7615_pm_set, "%lld\n"); static int +mt7615_pm_stats(struct seq_file *s, void *data) +{ + struct mt7615_dev *dev = dev_get_drvdata(s->private); + struct mt76_connac_pm *pm = &dev->pm; + unsigned long awake_time = pm->stats.awake_time; + unsigned long doze_time = pm->stats.doze_time; + + if (!test_bit(MT76_STATE_PM, &dev->mphy.state)) + awake_time += jiffies - pm->stats.last_wake_event; + else + doze_time += jiffies - pm->stats.last_doze_event; + + seq_printf(s, "awake time: %14u\ndoze time: %15u\n", + jiffies_to_msecs(awake_time), + jiffies_to_msecs(doze_time)); + + return 0; +} + +static int mt7615_pm_idle_timeout_set(void *data, u64 val) { struct mt7615_dev *dev = data; @@ -515,20 +543,27 @@ int mt7615_init_debugfs(struct mt7615_dev *dev) debugfs_create_file("runtime-pm", 0600, dir, dev, &fops_pm); debugfs_create_file("idle-timeout", 0600, dir, dev, &fops_pm_idle_timeout); + debugfs_create_devm_seqfile(dev->mt76.dev, "runtime_pm_stats", dir, + mt7615_pm_stats); debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir, mt7615_radio_read); - debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern); - /* test pattern knobs */ - debugfs_create_u8("pattern_len", 0600, dir, - &dev->radar_pattern.n_pulses); - debugfs_create_u32("pulse_period", 0600, dir, - &dev->radar_pattern.period); - debugfs_create_u16("pulse_width", 0600, dir, - &dev->radar_pattern.width); - debugfs_create_u16("pulse_power", 0600, dir, - &dev->radar_pattern.power); - debugfs_create_file("radar_trigger", 0200, dir, dev, - &fops_radar_pattern); + + if (is_mt7615(&dev->mt76)) { + debugfs_create_u32("dfs_hw_pattern", 0400, dir, + &dev->hw_pattern); + /* test pattern knobs */ + debugfs_create_u8("pattern_len", 0600, dir, + &dev->radar_pattern.n_pulses); + debugfs_create_u32("pulse_period", 0600, dir, + &dev->radar_pattern.period); + debugfs_create_u16("pulse_width", 0600, dir, + &dev->radar_pattern.width); + debugfs_create_u16("pulse_power", 0600, dir, + &dev->radar_pattern.power); + debugfs_create_file("radar_trigger", 0200, dir, dev, + &fops_radar_pattern); + } + debugfs_create_file("reset_test", 0200, dir, dev, &fops_reset_test); debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c index 25e3069cf2b1..8004ae5c16a9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c @@ -71,15 +71,39 @@ static int mt7615_poll_tx(struct napi_struct *napi, int budget) struct mt7615_dev *dev; dev = container_of(napi, struct mt7615_dev, mt76.tx_napi); + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { + napi_complete(napi); + queue_work(dev->mt76.wq, &dev->pm.wake_work); + return 0; + } mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); - - if (napi_complete_done(napi, 0)) + if (napi_complete(napi)) mt7615_irq_enable(dev, mt7615_tx_mcu_int_mask(dev)); + mt76_connac_pm_unref(&dev->pm); + return 0; } +static int mt7615_poll_rx(struct napi_struct *napi, int budget) +{ + struct mt7615_dev *dev; + int done; + + dev = container_of(napi->dev, struct mt7615_dev, mt76.napi_dev); + + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { + napi_complete(napi); + queue_work(dev->mt76.wq, &dev->pm.wake_work); + return 0; + } + done = mt76_dma_rx_poll(napi, budget); + mt76_connac_pm_unref(&dev->pm); + + return done; +} + int mt7615_wait_pdma_busy(struct mt7615_dev *dev) { struct mt76_dev *mdev = &dev->mt76; @@ -176,10 +200,30 @@ static void mt7663_dma_sched_init(struct mt7615_dev *dev) mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET1), 0xedcba987); } +void mt7615_dma_start(struct mt7615_dev *dev) +{ + /* start dma engine */ + mt76_set(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_DMA_EN | + MT_WPDMA_GLO_CFG_RX_DMA_EN | + MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); + + if (is_mt7622(&dev->mt76)) + mt7622_dma_sched_init(dev); + + if (is_mt7663(&dev->mt76)) { + mt7663_dma_sched_init(dev); + + mt76_wr(dev, MT_MCU2HOST_INT_ENABLE, MT7663_MCU_CMD_ERROR_MASK); + } + +} + int mt7615_dma_init(struct mt7615_dev *dev) { int rx_ring_size = MT7615_RX_RING_SIZE; int rx_buf_size = MT_RX_BUF_SIZE; + u32 mask; int ret; /* Increase buffer size to receive large VHT MPDUs */ @@ -241,11 +285,11 @@ int mt7615_dma_init(struct mt7615_dev *dev) mt76_wr(dev, MT_DELAY_INT_CFG, 0); - ret = mt76_init_queues(dev); + ret = mt76_init_queues(dev, mt7615_poll_rx); if (ret < 0) return ret; - netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi, + netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, mt7615_poll_tx, NAPI_POLL_WEIGHT); napi_enable(&dev->mt76.tx_napi); @@ -253,20 +297,17 @@ int mt7615_dma_init(struct mt7615_dev *dev) MT_WPDMA_GLO_CFG_TX_DMA_BUSY | MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 1000); - /* start dma engine */ - mt76_set(dev, MT_WPDMA_GLO_CFG, - MT_WPDMA_GLO_CFG_TX_DMA_EN | - MT_WPDMA_GLO_CFG_RX_DMA_EN); - /* enable interrupts for TX/RX rings */ - mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | mt7615_tx_mcu_int_mask(dev) | - MT_INT_MCU_CMD); - - if (is_mt7622(&dev->mt76)) - mt7622_dma_sched_init(dev); + mask = MT_INT_RX_DONE_ALL | mt7615_tx_mcu_int_mask(dev); if (is_mt7663(&dev->mt76)) - mt7663_dma_sched_init(dev); + mask |= MT7663_INT_MCU_CMD; + else + mask |= MT_INT_MCU_CMD; + + mt7615_irq_enable(dev, mask); + + mt7615_dma_start(dev); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c index 2eab23898c77..6dbaaf95ee38 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c @@ -86,6 +86,7 @@ static int mt7615_check_eeprom(struct mt76_dev *dev) switch (val) { case 0x7615: case 0x7622: + case 0x7663: return 0; default: return -EINVAL; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c index 571390fa4de7..86341d1f82f3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c @@ -116,10 +116,10 @@ mt7615_mac_init(struct mt7615_dev *dev) mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_EN); mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_EN); - /* disable hdr translation and hw AMSDU */ mt76_wr(dev, MT_DMA_DCR0, FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 3072) | - MT_DMA_DCR0_RX_VEC_DROP); + MT_DMA_DCR0_RX_VEC_DROP | MT_DMA_DCR0_DAMSDU_EN | + MT_DMA_DCR0_RX_HDR_TRANS_EN); /* disable TDLS filtering */ mt76_clear(dev, MT_WF_PFCR, MT_WF_PFCR_TDLS_EN); mt76_set(dev, MT_WF_MIB_SCR0, MT_MIB_SCR0_AGG_CNT_RANGE_EN); @@ -129,6 +129,7 @@ mt7615_mac_init(struct mt7615_dev *dev) } else { mt7615_init_mac_chain(dev, 1); } + mt7615_mcu_set_rx_hdr_trans_blacklist(dev); } static void @@ -251,6 +252,7 @@ void mt7615_init_txpower(struct mt7615_dev *dev, int delta_idx, delta = mt76_tx_power_nss_delta(n_chains); u8 *eep = (u8 *)dev->mt76.eeprom.data; enum nl80211_band band = sband->band; + struct mt76_power_limits limits; u8 rate_val; delta_idx = mt7615_eeprom_get_power_delta_index(dev, band); @@ -279,7 +281,11 @@ void mt7615_init_txpower(struct mt7615_dev *dev, target_power = max(target_power, eep[index]); } - target_power = DIV_ROUND_UP(target_power + delta, 2); + target_power = mt76_get_rate_power_limits(&dev->mphy, chan, + &limits, + target_power); + target_power += delta; + target_power = DIV_ROUND_UP(target_power, 2); chan->max_power = min_t(int, chan->max_reg_power, target_power); chan->orig_mpwr = target_power; @@ -310,12 +316,18 @@ mt7615_regd_notifier(struct wiphy *wiphy, memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2)); dev->mt76.region = request->dfs_region; + mt7615_init_txpower(dev, &mphy->sband_2g.sband); + mt7615_init_txpower(dev, &mphy->sband_5g.sband); + mt7615_mutex_acquire(dev); if (chandef->chan->flags & IEEE80211_CHAN_RADAR) mt7615_dfs_init_radar_detector(phy); - if (mt7615_firmware_offload(phy->dev)) + + if (mt7615_firmware_offload(phy->dev)) { mt76_connac_mcu_set_channel_domain(mphy); + mt76_connac_mcu_set_rate_txpower(mphy); + } mt7615_mutex_release(dev); } @@ -330,6 +342,10 @@ mt7615_init_wiphy(struct ieee80211_hw *hw) hw->max_rates = 3; hw->max_report_rates = 7; hw->max_rate_tries = 11; + hw->netdev_features = NETIF_F_RXCSUM; + + hw->radiotap_timestamp.units_pos = + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US; phy->slottime = 9; @@ -360,11 +376,17 @@ mt7615_init_wiphy(struct ieee80211_hw *hw) ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN); ieee80211_hw_set(hw, WANT_MONITOR_VIF); + ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD); if (is_mt7615(&phy->dev->mt76)) hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM; else hw->max_tx_fragments = MT_HW_TXP_MAX_BUF_NUM; + + phy->mt76->sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; + phy->mt76->sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; + phy->mt76->sband_5g.sband.vht_cap.cap |= + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; } static void @@ -480,10 +502,13 @@ void mt7615_init_device(struct mt7615_dev *dev) dev->phy.dev = dev; dev->phy.mt76 = &dev->mt76.phy; dev->mt76.phy.priv = &dev->phy; + dev->mt76.tx_worker.fn = mt7615_tx_worker; INIT_DELAYED_WORK(&dev->pm.ps_work, mt7615_pm_power_save_work); INIT_WORK(&dev->pm.wake_work, mt7615_pm_wake_work); - init_completion(&dev->pm.wake_cmpl); + spin_lock_init(&dev->pm.wake.lock); + mutex_init(&dev->pm.mutex); + init_waitqueue_head(&dev->pm.wait); spin_lock_init(&dev->pm.txq_lock); set_bit(MT76_STATE_PM, &dev->mphy.state); INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7615_mac_work); @@ -496,16 +521,13 @@ void mt7615_init_device(struct mt7615_dev *dev) init_waitqueue_head(&dev->reset_wait); init_waitqueue_head(&dev->phy.roc_wait); - INIT_WORK(&dev->reset_work, mt7615_mac_reset_work); INIT_WORK(&dev->phy.roc_work, mt7615_roc_work); timer_setup(&dev->phy.roc_timer, mt7615_roc_timer, 0); mt7615_init_wiphy(hw); dev->pm.idle_timeout = MT7615_PM_TIMEOUT; - dev->mphy.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; - dev->mphy.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; - dev->mphy.sband_5g.sband.vht_cap.cap |= - IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; + dev->pm.stats.last_wake_event = jiffies; + dev->pm.stats.last_doze_event = jiffies; mt7615_cap_dbdc_disable(dev); dev->phy.dfs_state = -1; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 59fdd0fc2ad4..f81a17d56008 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -234,11 +234,13 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) u32 rxd0 = le32_to_cpu(rxd[0]); u32 rxd1 = le32_to_cpu(rxd[1]); u32 rxd2 = le32_to_cpu(rxd[2]); - __le32 rxd12 = rxd[12]; - bool unicast, remove_pad, insert_ccmp_hdr = false; + u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; + bool unicast, hdr_trans, remove_pad, insert_ccmp_hdr = false; int phy_idx; int i, idx; - u8 chfreq; + u8 chfreq, amsdu_info, qos_ctl = 0; + u16 seq_ctrl = 0; + __le16 fc = 0; memset(status, 0, sizeof(*status)); @@ -254,8 +256,12 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) else phy_idx = -1; + if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) + return -EINVAL; + unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M; idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2); + hdr_trans = rxd1 & MT_RXD1_NORMAL_HDR_TRANS; status->wcid = mt7615_rx_get_wcid(dev, idx, unicast); if (status->wcid) { @@ -268,6 +274,9 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) spin_unlock_bh(&dev->sta_poll_lock); } + if ((rxd0 & csum_mask) == csum_mask) + skb->ip_summed = CHECKSUM_UNNECESSARY; + if (rxd2 & MT_RXD2_NORMAL_FCS_ERR) status->flag |= RX_FLAG_FAILED_FCS_CRC; @@ -288,6 +297,13 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) rxd += 4; if (rxd0 & MT_RXD0_NORMAL_GROUP_4) { + u32 v0 = le32_to_cpu(rxd[0]); + u32 v2 = le32_to_cpu(rxd[2]); + + fc = cpu_to_le16(FIELD_GET(MT_RXD4_FRAME_CONTROL, v0)); + qos_ctl = FIELD_GET(MT_RXD6_QOS_CTL, v2); + seq_ctrl = FIELD_GET(MT_RXD6_SEQ_CTRL, v2); + rxd += 4; if ((u8 *)rxd - skb->data >= skb->len) return -EINVAL; @@ -312,6 +328,23 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) } if (rxd0 & MT_RXD0_NORMAL_GROUP_2) { + status->timestamp = le32_to_cpu(rxd[0]); + status->flag |= RX_FLAG_MACTIME_START; + + if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB | + MT_RXD2_NORMAL_NON_AMPDU))) { + status->flag |= RX_FLAG_AMPDU_DETAILS; + + /* all subframes of an A-MPDU have the same timestamp */ + if (phy->rx_ampdu_ts != status->timestamp) { + if (!++phy->ampdu_ref) + phy->ampdu_ref++; + } + phy->rx_ampdu_ts = status->timestamp; + + status->ampdu_ref = phy->ampdu_ref; + } + rxd += 2; if ((u8 *)rxd - skb->data >= skb->len) return -EINVAL; @@ -355,20 +388,6 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) if (!sband->channels) return -EINVAL; - if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB | - MT_RXD2_NORMAL_NON_AMPDU))) { - status->flag |= RX_FLAG_AMPDU_DETAILS; - - /* all subframes of an A-MPDU have the same timestamp */ - if (phy->rx_ampdu_ts != rxd12) { - if (!++phy->ampdu_ref) - phy->ampdu_ref++; - } - phy->rx_ampdu_ts = rxd12; - - status->ampdu_ref = phy->ampdu_ref; - } - if (rxd0 & MT_RXD0_NORMAL_GROUP_3) { u32 rxdg0 = le32_to_cpu(rxd[0]); u32 rxdg1 = le32_to_cpu(rxd[1]); @@ -446,20 +465,42 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad); - if (insert_ccmp_hdr) { + amsdu_info = FIELD_GET(MT_RXD1_NORMAL_PAYLOAD_FORMAT, rxd1); + status->amsdu = !!amsdu_info; + if (status->amsdu) { + status->first_amsdu = amsdu_info == MT_RXD1_FIRST_AMSDU_FRAME; + status->last_amsdu = amsdu_info == MT_RXD1_LAST_AMSDU_FRAME; + if (!hdr_trans) { + memmove(skb->data + 2, skb->data, + ieee80211_get_hdrlen_from_skb(skb)); + skb_pull(skb, 2); + } + } + + if (insert_ccmp_hdr && !hdr_trans) { u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); mt76_insert_ccmp_hdr(skb, key_id); } - hdr = (struct ieee80211_hdr *)skb->data; - if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control)) + if (!hdr_trans) { + hdr = (struct ieee80211_hdr *)skb->data; + fc = hdr->frame_control; + if (ieee80211_is_data_qos(fc)) { + seq_ctrl = le16_to_cpu(hdr->seq_ctrl); + qos_ctl = *ieee80211_get_qos_ctl(hdr); + } + } else { + status->flag |= RX_FLAG_8023; + } + + if (!status->wcid || !ieee80211_is_data_qos(fc)) return 0; status->aggr = unicast && - !ieee80211_is_qos_nullfunc(hdr->frame_control); - status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; - status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); + !ieee80211_is_qos_nullfunc(fc); + status->qos_ctl = qos_ctl; + status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); return 0; } @@ -690,7 +731,7 @@ mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp) { int i; - for (i = 1; i < txp->nbuf; i++) + for (i = 0; i < txp->nbuf; i++) dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]), le16_to_cpu(txp->len[i]), DMA_TO_DEVICE); } @@ -966,6 +1007,7 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, struct mt7615_dev *dev = phy->dev; struct mt7615_rate_desc rd; u32 w5, w27, addr; + u16 idx = sta->vif->mt76.omac_idx; if (!mt76_is_mmio(&dev->mt76)) { mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates); @@ -1017,7 +1059,10 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, mt76_wr(dev, addr + 27 * 4, w27); - mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ + idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx; + addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx); + + mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */ sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0); sta->rate_set_tsf |= rd.rateset; @@ -1033,7 +1078,7 @@ EXPORT_SYMBOL_GPL(mt7615_mac_set_rates); static int mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid, struct ieee80211_key_conf *key, - enum mt7615_cipher_type cipher, + enum mt7615_cipher_type cipher, u16 cipher_mask, enum set_key_cmd cmd) { u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4; @@ -1050,22 +1095,22 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid, memcpy(data + 16, key->key + 24, 8); memcpy(data + 24, key->key + 16, 8); } else { - if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher) - memmove(data + 16, data, 16); - if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher) + if (cipher_mask == BIT(cipher)) memcpy(data, key->key, key->keylen); - else if (cipher == MT_CIPHER_BIP_CMAC_128) + else if (cipher != MT_CIPHER_BIP_CMAC_128) + memcpy(data, key->key, 16); + if (cipher == MT_CIPHER_BIP_CMAC_128) memcpy(data + 16, key->key, 16); } } else { - if (wcid->cipher & ~BIT(cipher)) { - if (cipher != MT_CIPHER_BIP_CMAC_128) - memmove(data, data + 16, 16); + if (cipher == MT_CIPHER_BIP_CMAC_128) memset(data + 16, 0, 16); - } else { + else if (cipher_mask) + memset(data, 0, 16); + if (!cipher_mask) memset(data, 0, sizeof(data)); - } } + mt76_wr_copy(dev, addr, data, sizeof(data)); return 0; @@ -1073,7 +1118,7 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid, static int mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid, - enum mt7615_cipher_type cipher, + enum mt7615_cipher_type cipher, u16 cipher_mask, int keyidx, enum set_key_cmd cmd) { u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1; @@ -1083,20 +1128,23 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid, w0 = mt76_rr(dev, addr); w1 = mt76_rr(dev, addr + 4); - if (cmd == SET_KEY) { - w0 |= MT_WTBL_W0_RX_KEY_VALID | - FIELD_PREP(MT_WTBL_W0_RX_IK_VALID, - cipher == MT_CIPHER_BIP_CMAC_128); - if (cipher != MT_CIPHER_BIP_CMAC_128 || - !wcid->cipher) - w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx); - } else { - if (!(wcid->cipher & ~BIT(cipher))) - w0 &= ~(MT_WTBL_W0_RX_KEY_VALID | - MT_WTBL_W0_KEY_IDX); - if (cipher == MT_CIPHER_BIP_CMAC_128) - w0 &= ~MT_WTBL_W0_RX_IK_VALID; + + if (cipher_mask) + w0 |= MT_WTBL_W0_RX_KEY_VALID; + else + w0 &= ~(MT_WTBL_W0_RX_KEY_VALID | MT_WTBL_W0_KEY_IDX); + if (cipher_mask & BIT(MT_CIPHER_BIP_CMAC_128)) + w0 |= MT_WTBL_W0_RX_IK_VALID; + else + w0 &= ~MT_WTBL_W0_RX_IK_VALID; + + if (cmd == SET_KEY && + (cipher != MT_CIPHER_BIP_CMAC_128 || + cipher_mask == BIT(cipher))) { + w0 &= ~MT_WTBL_W0_KEY_IDX; + w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx); } + mt76_wr(dev, MT_WTBL_RICR0, w0); mt76_wr(dev, MT_WTBL_RICR1, w1); @@ -1109,24 +1157,25 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid, static void mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid, - enum mt7615_cipher_type cipher, + enum mt7615_cipher_type cipher, u16 cipher_mask, enum set_key_cmd cmd) { u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx); - if (cmd == SET_KEY) { - if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher) - mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE, - FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher)); - } else { - if (cipher != MT_CIPHER_BIP_CMAC_128 && - wcid->cipher & BIT(MT_CIPHER_BIP_CMAC_128)) - mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE, - FIELD_PREP(MT_WTBL_W2_KEY_TYPE, - MT_CIPHER_BIP_CMAC_128)); - else if (!(wcid->cipher & ~BIT(cipher))) - mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE); + if (!cipher_mask) { + mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE); + return; } + + if (cmd != SET_KEY) + return; + + if (cipher == MT_CIPHER_BIP_CMAC_128 && + cipher_mask & ~BIT(MT_CIPHER_BIP_CMAC_128)) + return; + + mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE, + FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher)); } int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, @@ -1135,25 +1184,30 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, enum set_key_cmd cmd) { enum mt7615_cipher_type cipher; + u16 cipher_mask = wcid->cipher; int err; cipher = mt7615_mac_get_cipher(key->cipher); if (cipher == MT_CIPHER_NONE) return -EOPNOTSUPP; - mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cmd); - err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cmd); + if (cmd == SET_KEY) + cipher_mask |= BIT(cipher); + else + cipher_mask &= ~BIT(cipher); + + mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask, cmd); + err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask, + cmd); if (err < 0) return err; - err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, cmd); + err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, cipher_mask, + key->keyidx, cmd); if (err < 0) return err; - if (cmd == SET_KEY) - wcid->cipher |= BIT(cipher); - else - wcid->cipher &= ~BIT(cipher); + wcid->cipher = cipher_mask; return 0; } @@ -1411,11 +1465,7 @@ mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token) u8 wcid; trace_mac_tx_free(dev, token); - - spin_lock_bh(&dev->token_lock); - txwi = idr_remove(&dev->token, token); - spin_unlock_bh(&dev->token_lock); - + txwi = mt76_token_put(mdev, token); if (!txwi) return; @@ -1460,14 +1510,10 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb) dev_kfree_skb(skb); - if (test_bit(MT76_STATE_PM, &dev->phy.mt76->state)) - return; - rcu_read_lock(); mt7615_mac_sta_poll(dev); rcu_read_unlock(); - mt76_connac_power_save_sched(&dev->mphy, &dev->pm); mt76_worker_schedule(&dev->mt76.tx_worker); } @@ -1821,10 +1867,8 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy) int i, aggr; u32 val, val2; - memset(mib, 0, sizeof(*mib)); - - mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy), - MT_MIB_SDR3_FCS_ERR_MASK); + mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy), + MT_MIB_SDR3_FCS_ERR_MASK); val = mt76_get_field(dev, MT_MIB_SDR14(ext_phy), MT_MIB_AMPDU_MPDU_COUNT); @@ -1837,24 +1881,16 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy) aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0; for (i = 0; i < 4; i++) { val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i)); - - val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val); - if (val2 > mib->ack_fail_cnt) - mib->ack_fail_cnt = val2; - - val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); - if (val2 > mib->ba_miss_cnt) - mib->ba_miss_cnt = val2; + mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); + mib->ack_fail_cnt += FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, + val); val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i)); - val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val); - if (val2 > mib->rts_retries_cnt) { - mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); - mib->rts_retries_cnt = val2; - } + mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); + mib->rts_retries_cnt += FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, + val); val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i)); - dev->mt76.aggr_stats[aggr++] += val & 0xffff; dev->mt76.aggr_stats[aggr++] += val >> 16; } @@ -1869,13 +1905,19 @@ void mt7615_pm_wake_work(struct work_struct *work) pm.wake_work); mphy = dev->phy.mt76; - if (!mt7615_mcu_set_drv_ctrl(dev)) + if (!mt7615_mcu_set_drv_ctrl(dev)) { + int i; + + mt76_for_each_q_rx(&dev->mt76, i) + napi_schedule(&dev->mt76.napi[i]); mt76_connac_pm_dequeue_skbs(mphy, &dev->pm); - else - dev_err(mphy->dev->dev, "failed to wake device\n"); + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); + ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, + MT7615_WATCHDOG_TIME); + } ieee80211_wake_queues(mphy->hw); - complete_all(&dev->pm.wake_cmpl); + wake_up(&dev->pm.wait); } void mt7615_pm_power_save_work(struct work_struct *work) @@ -1887,6 +1929,10 @@ void mt7615_pm_power_save_work(struct work_struct *work) pm.ps_work.work); delta = dev->pm.idle_timeout; + if (test_bit(MT76_HW_SCANNING, &dev->mphy.state) || + test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state)) + goto out; + if (time_is_after_jiffies(dev->pm.last_activity + delta)) { delta = dev->pm.last_activity + delta - jiffies; goto out; @@ -1924,179 +1970,27 @@ void mt7615_mac_work(struct work_struct *work) MT7615_WATCHDOG_TIME); } -static bool -mt7615_wait_reset_state(struct mt7615_dev *dev, u32 state) -{ - bool ret; - - ret = wait_event_timeout(dev->reset_wait, - (READ_ONCE(dev->reset_state) & state), - MT7615_RESET_TIMEOUT); - WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); - return ret; -} - -static void -mt7615_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) -{ - struct ieee80211_hw *hw = priv; - struct mt7615_dev *dev = mt7615_hw_dev(hw); - - switch (vif->type) { - case NL80211_IFTYPE_MESH_POINT: - case NL80211_IFTYPE_ADHOC: - case NL80211_IFTYPE_AP: - mt7615_mcu_add_beacon(dev, hw, vif, - vif->bss_conf.enable_beacon); - break; - default: - break; - } -} - -static void -mt7615_update_beacons(struct mt7615_dev *dev) -{ - ieee80211_iterate_active_interfaces(dev->mt76.hw, - IEEE80211_IFACE_ITER_RESUME_ALL, - mt7615_update_vif_beacon, dev->mt76.hw); - - if (!dev->mt76.phy2) - return; - - ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw, - IEEE80211_IFACE_ITER_RESUME_ALL, - mt7615_update_vif_beacon, dev->mt76.phy2->hw); -} - -void mt7615_dma_reset(struct mt7615_dev *dev) -{ - int i; - - mt76_clear(dev, MT_WPDMA_GLO_CFG, - MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN | - MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); - usleep_range(1000, 2000); - - mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true); - for (i = 0; i < __MT_TXQ_MAX; i++) - mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); - - mt76_for_each_q_rx(&dev->mt76, i) { - mt76_queue_rx_reset(dev, i); - } - - mt76_set(dev, MT_WPDMA_GLO_CFG, - MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN | - MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); -} -EXPORT_SYMBOL_GPL(mt7615_dma_reset); - void mt7615_tx_token_put(struct mt7615_dev *dev) { struct mt76_txwi_cache *txwi; int id; - spin_lock_bh(&dev->token_lock); - idr_for_each_entry(&dev->token, txwi, id) { + spin_lock_bh(&dev->mt76.token_lock); + idr_for_each_entry(&dev->mt76.token, txwi, id) { mt7615_txp_skb_unmap(&dev->mt76, txwi); - if (txwi->skb) - dev_kfree_skb_any(txwi->skb); + if (txwi->skb) { + struct ieee80211_hw *hw; + + hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb); + ieee80211_free_txskb(hw, txwi->skb); + } mt76_put_txwi(&dev->mt76, txwi); } - spin_unlock_bh(&dev->token_lock); - idr_destroy(&dev->token); + spin_unlock_bh(&dev->mt76.token_lock); + idr_destroy(&dev->mt76.token); } EXPORT_SYMBOL_GPL(mt7615_tx_token_put); -void mt7615_mac_reset_work(struct work_struct *work) -{ - struct mt7615_phy *phy2; - struct mt76_phy *ext_phy; - struct mt7615_dev *dev; - - dev = container_of(work, struct mt7615_dev, reset_work); - ext_phy = dev->mt76.phy2; - phy2 = ext_phy ? ext_phy->priv : NULL; - - if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_PDMA)) - return; - - ieee80211_stop_queues(mt76_hw(dev)); - if (ext_phy) - ieee80211_stop_queues(ext_phy->hw); - - set_bit(MT76_RESET, &dev->mphy.state); - set_bit(MT76_MCU_RESET, &dev->mphy.state); - wake_up(&dev->mt76.mcu.wait); - cancel_delayed_work_sync(&dev->mphy.mac_work); - del_timer_sync(&dev->phy.roc_timer); - cancel_work_sync(&dev->phy.roc_work); - if (phy2) { - cancel_delayed_work_sync(&phy2->mt76->mac_work); - del_timer_sync(&phy2->roc_timer); - cancel_work_sync(&phy2->roc_work); - } - - /* lock/unlock all queues to ensure that no tx is pending */ - mt76_txq_schedule_all(&dev->mphy); - if (ext_phy) - mt76_txq_schedule_all(ext_phy); - - mt76_worker_disable(&dev->mt76.tx_worker); - napi_disable(&dev->mt76.napi[0]); - napi_disable(&dev->mt76.napi[1]); - napi_disable(&dev->mt76.tx_napi); - - mt7615_mutex_acquire(dev); - - mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_STOPPED); - - mt7615_tx_token_put(dev); - idr_init(&dev->token); - - if (mt7615_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { - mt7615_dma_reset(dev); - - mt76_wr(dev, MT_WPDMA_MEM_RNG_ERR, 0); - - mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_INIT); - mt7615_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); - } - - clear_bit(MT76_MCU_RESET, &dev->mphy.state); - clear_bit(MT76_RESET, &dev->mphy.state); - - mt76_worker_enable(&dev->mt76.tx_worker); - napi_enable(&dev->mt76.tx_napi); - napi_schedule(&dev->mt76.tx_napi); - - napi_enable(&dev->mt76.napi[0]); - napi_schedule(&dev->mt76.napi[0]); - - napi_enable(&dev->mt76.napi[1]); - napi_schedule(&dev->mt76.napi[1]); - - ieee80211_wake_queues(mt76_hw(dev)); - if (ext_phy) - ieee80211_wake_queues(ext_phy->hw); - - mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); - mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); - - mt7615_update_beacons(dev); - - mt7615_mutex_release(dev); - - ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, - MT7615_WATCHDOG_TIME); - if (phy2) - ieee80211_queue_delayed_work(ext_phy->hw, - &phy2->mt76->mac_work, - MT7615_WATCHDOG_TIME); - -} - static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy) { struct mt7615_dev *dev = phy->dev; @@ -2304,8 +2198,10 @@ void mt7615_coredump_work(struct work_struct *work) break; skb_pull(skb, sizeof(struct mt7615_mcu_rxd)); - if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) - break; + if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) { + dev_kfree_skb(skb); + continue; + } memcpy(data, skb->data, skb->len); data += skb->len; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h index 169f4e17b5b4..6bf9da040196 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h @@ -33,6 +33,9 @@ enum rx_pkt_type { #define MT_RXD1_NORMAL_BSSID GENMASK(31, 26) #define MT_RXD1_NORMAL_PAYLOAD_FORMAT GENMASK(25, 24) +#define MT_RXD1_FIRST_AMSDU_FRAME GENMASK(1, 0) +#define MT_RXD1_MID_AMSDU_FRAME BIT(1) +#define MT_RXD1_LAST_AMSDU_FRAME BIT(0) #define MT_RXD1_NORMAL_HDR_TRANS BIT(23) #define MT_RXD1_NORMAL_HDR_OFFSET BIT(22) #define MT_RXD1_NORMAL_MAC_HDR_LEN GENMASK(21, 16) @@ -78,6 +81,11 @@ enum rx_pkt_type { #define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(8) #define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0) +#define MT_RXD4_FRAME_CONTROL GENMASK(15, 0) + +#define MT_RXD6_SEQ_CTRL GENMASK(15, 0) +#define MT_RXD6_QOS_CTL GENMASK(31, 16) + #define MT_RXV1_ACID_DET_H BIT(31) #define MT_RXV1_ACID_DET_L BIT(30) #define MT_RXV1_VHTA2_B8_B3 GENMASK(29, 24) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 25faf486d279..39733b351ac4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -29,6 +29,7 @@ static int mt7615_start(struct ieee80211_hw *hw) struct mt7615_dev *dev = mt7615_hw_dev(hw); struct mt7615_phy *phy = mt7615_hw_phy(hw); bool running; + int ret; if (!mt7615_wait_for_mcu_init(dev)) return -EIO; @@ -38,21 +39,42 @@ static int mt7615_start(struct ieee80211_hw *hw) running = mt7615_dev_running(dev); if (!running) { - mt7615_mcu_set_pm(dev, 0, 0); - mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, true, false); + ret = mt7615_mcu_set_pm(dev, 0, 0); + if (ret) + goto out; + + ret = mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, true, false); + if (ret) + goto out; + mt7615_mac_enable_nf(dev, 0); } if (phy != &dev->phy) { - mt7615_mcu_set_pm(dev, 1, 0); - mt76_connac_mcu_set_mac_enable(&dev->mt76, 1, true, false); + ret = mt7615_mcu_set_pm(dev, 1, 0); + if (ret) + goto out; + + ret = mt76_connac_mcu_set_mac_enable(&dev->mt76, 1, true, false); + if (ret) + goto out; + mt7615_mac_enable_nf(dev, 1); } - if (mt7615_firmware_offload(dev)) - mt76_connac_mcu_set_channel_domain(phy->mt76); + if (mt7615_firmware_offload(dev)) { + ret = mt76_connac_mcu_set_channel_domain(phy->mt76); + if (ret) + goto out; - mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH); + ret = mt76_connac_mcu_set_rate_txpower(phy->mt76); + if (ret) + goto out; + } + + ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH); + if (ret) + goto out; set_bit(MT76_STATE_RUNNING, &phy->mt76->state); @@ -62,9 +84,10 @@ static int mt7615_start(struct ieee80211_hw *hw) if (!running) mt7615_mac_reset_counters(dev); +out: mt7615_mutex_release(dev); - return 0; + return ret; } static void mt7615_stop(struct ieee80211_hw *hw) @@ -197,7 +220,9 @@ static int mt7615_add_interface(struct ieee80211_hw *hw, dev->omac_mask |= BIT_ULL(mvif->mt76.omac_idx); phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx); - mt7615_mcu_set_dbdc(dev); + ret = mt7615_mcu_set_dbdc(dev); + if (ret) + goto out; idx = MT7615_WTBL_RESERVED - mvif->mt76.idx; @@ -217,8 +242,6 @@ static int mt7615_add_interface(struct ieee80211_hw *hw, ret = mt7615_mcu_add_dev_info(phy, vif, true); if (ret) goto out; - - mt7615_mac_set_beacon_filter(phy, vif, true); out: mt7615_mutex_release(dev); @@ -234,17 +257,17 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw, struct mt7615_phy *phy = mt7615_hw_phy(hw); int idx = msta->wcid.idx; - /* TODO: disable beacon for the bss */ - mt7615_mutex_acquire(dev); + mt7615_mcu_add_bss_info(phy, vif, NULL, false); + mt7615_mcu_sta_add(phy, vif, NULL, false); + mt76_testmode_reset(phy->mt76, true); if (vif == phy->monitor_vif) phy->monitor_vif = NULL; mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid); - mt7615_mac_set_beacon_filter(phy, vif, false); mt7615_mcu_add_dev_info(phy, vif, false); rcu_assign_pointer(dev->mt76.wcid[idx], NULL); @@ -296,8 +319,13 @@ int mt7615_set_channel(struct mt7615_phy *phy) mt76_set_channel(phy->mt76); if (is_mt7615(&dev->mt76) && dev->flash_eeprom) { - mt7615_mcu_apply_rx_dcoc(phy); - mt7615_mcu_apply_tx_dpd(phy); + ret = mt7615_mcu_apply_rx_dcoc(phy); + if (ret) + goto out; + + ret = mt7615_mcu_apply_tx_dpd(phy); + if (ret) + goto out; } ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_CHANNEL_SWITCH); @@ -306,8 +334,13 @@ int mt7615_set_channel(struct mt7615_phy *phy) mt7615_mac_set_timing(phy); ret = mt7615_dfs_init_radar_detector(phy); + if (ret) + goto out; + mt7615_mac_cca_stats_reset(phy); - mt7615_mcu_set_sku_en(phy, true); + ret = mt7615_mcu_set_sku_en(phy, true); + if (ret) + goto out; mt7615_mac_reset_counters(dev); phy->noise = 0; @@ -318,8 +351,7 @@ out: mt7615_mutex_release(dev); - mt76_txq_schedule_all(phy->mt76); - + mt76_worker_schedule(&dev->mt76.tx_worker); if (!mt76_testmode_enabled(phy->mt76)) ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mt76->mac_work, @@ -337,7 +369,8 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta; struct mt76_wcid *wcid = &msta->wcid; - int idx = key->keyidx, err; + int idx = key->keyidx, err = 0; + u8 *wcid_keyidx = &wcid->hw_key_idx; /* The hardware does not support per-STA RX GTK, fallback * to software mode for these. @@ -352,6 +385,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, /* fall back to sw encryption for unsupported ciphers */ switch (key->cipher) { case WLAN_CIPHER_SUITE_AES_CMAC: + wcid_keyidx = &wcid->hw_key_idx2; key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE; break; case WLAN_CIPHER_SUITE_TKIP: @@ -369,12 +403,13 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, mt7615_mutex_acquire(dev); - if (cmd == SET_KEY) { - key->hw_key_idx = wcid->idx; - wcid->hw_key_idx = idx; - } else if (idx == wcid->hw_key_idx) { - wcid->hw_key_idx = -1; - } + if (cmd == SET_KEY) + *wcid_keyidx = idx; + else if (idx == *wcid_keyidx) + *wcid_keyidx = -1; + else + goto out; + mt76_wcid_key_setup(&dev->mt76, wcid, cmd == SET_KEY ? key : NULL); @@ -383,6 +418,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, else err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd); +out: mt7615_mutex_release(dev); return err; @@ -526,11 +562,11 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw, } } - if (changed & BSS_CHANGED_BEACON_ENABLED) { - mt7615_mcu_add_bss_info(phy, vif, NULL, info->enable_beacon); - mt7615_mcu_sta_add(phy, vif, NULL, info->enable_beacon); + if (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon) { + mt7615_mcu_add_bss_info(phy, vif, NULL, true); + mt7615_mcu_sta_add(phy, vif, NULL, true); - if (vif->p2p && info->enable_beacon) + if (vif->p2p) mt7615_mcu_set_p2p_oppps(hw, vif); } @@ -541,8 +577,16 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_PS) mt76_connac_mcu_set_vif_ps(&dev->mt76, vif); - if (changed & BSS_CHANGED_ARP_FILTER) - mt7615_mcu_update_arp_filter(hw, vif, info); + if ((changed & BSS_CHANGED_ARP_FILTER) && + mt7615_firmware_offload(dev)) { + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + + mt76_connac_mcu_update_arp_filter(&dev->mt76, &mvif->mt76, + info); + } + + if (changed & BSS_CHANGED_ASSOC) + mt7615_mac_set_beacon_filter(phy, vif, info->assoc); mt7615_mutex_release(dev); } @@ -583,15 +627,21 @@ int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, if (err) return err; - if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) - mt7615_mcu_add_bss_info(phy, vif, sta, true); + if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { + err = mt7615_mcu_add_bss_info(phy, vif, sta, true); + if (err) + return err; + } + mt7615_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - mt7615_mcu_sta_add(&dev->phy, vif, sta, true); + err = mt7615_mcu_sta_add(&dev->phy, vif, sta, true); + if (err) + return err; mt76_connac_power_save_sched(phy->mt76, &dev->pm); - return 0; + return err; } EXPORT_SYMBOL_GPL(mt7615_mac_sta_add); @@ -643,28 +693,25 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw, break; } msta->n_rates = i; - if (!test_bit(MT76_STATE_PM, &phy->mt76->state)) + if (mt76_connac_pm_ref(phy->mt76, &dev->pm)) { mt7615_mac_set_rates(phy, msta, NULL, msta->rates); + mt76_connac_pm_unref(&dev->pm); + } spin_unlock_bh(&dev->mt76.lock); } -static void -mt7615_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) +void mt7615_tx_worker(struct mt76_worker *w) { - struct mt7615_dev *dev = mt7615_hw_dev(hw); - struct mt7615_phy *phy = mt7615_hw_phy(hw); - struct mt76_phy *mphy = phy->mt76; - - if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) - return; + struct mt7615_dev *dev = container_of(w, struct mt7615_dev, + mt76.tx_worker); - if (test_bit(MT76_STATE_PM, &mphy->state)) { + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { queue_work(dev->mt76.wq, &dev->pm.wake_work); return; } - dev->pm.last_activity = jiffies; - mt76_worker_schedule(&dev->mt76.tx_worker); + mt76_tx_worker_run(&dev->mt76); + mt76_connac_pm_unref(&dev->pm); } static void mt7615_tx(struct ieee80211_hw *hw, @@ -692,9 +739,9 @@ static void mt7615_tx(struct ieee80211_hw *hw, wcid = &msta->wcid; } - if (!test_bit(MT76_STATE_PM, &mphy->state)) { - dev->pm.last_activity = jiffies; + if (mt76_connac_pm_ref(mphy, &dev->pm)) { mt76_tx(mphy, control->sta, wcid, skb); + mt76_connac_pm_unref(&dev->pm); return; } @@ -711,13 +758,13 @@ static int mt7615_set_rts_threshold(struct ieee80211_hw *hw, u32 val) { struct mt7615_dev *dev = mt7615_hw_dev(hw); struct mt7615_phy *phy = mt7615_hw_phy(hw); - int band = phy != &dev->phy; + int err, band = phy != &dev->phy; mt7615_mutex_acquire(dev); - mt76_connac_mcu_set_rts_thresh(&dev->mt76, val, band); + err = mt76_connac_mcu_set_rts_thresh(&dev->mt76, val, band); mt7615_mutex_release(dev); - return 0; + return err; } static int @@ -745,16 +792,16 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_RX_START: mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, params->buf_size); - mt7615_mcu_add_rx_ba(dev, params, true); + ret = mt7615_mcu_add_rx_ba(dev, params, true); break; case IEEE80211_AMPDU_RX_STOP: mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid); - mt7615_mcu_add_rx_ba(dev, params, false); + ret = mt7615_mcu_add_rx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_OPERATIONAL: mtxq->aggr = true; mtxq->send_bar = false; - mt7615_mcu_add_tx_ba(dev, params, true); + ret = mt7615_mcu_add_tx_ba(dev, params, true); ssn = mt7615_mac_get_sta_tid_sn(dev, msta->wcid.idx, tid); ieee80211_send_bar(vif, sta->addr, tid, IEEE80211_SN_TO_SEQ(ssn)); @@ -762,7 +809,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: mtxq->aggr = false; - mt7615_mcu_add_tx_ba(dev, params, false); + ret = mt7615_mcu_add_tx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_START: ssn = mt7615_mac_get_sta_tid_sn(dev, msta->wcid.idx, tid); @@ -771,7 +818,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, break; case IEEE80211_AMPDU_TX_STOP_CONT: mtxq->aggr = false; - mt7615_mcu_add_tx_ba(dev, params, false); + ret = mt7615_mcu_add_tx_ba(dev, params, false); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; } @@ -803,26 +850,38 @@ mt7615_get_stats(struct ieee80211_hw *hw, struct mt7615_phy *phy = mt7615_hw_phy(hw); struct mib_stats *mib = &phy->mib; + mt7615_mutex_acquire(phy->dev); + stats->dot11RTSSuccessCount = mib->rts_cnt; stats->dot11RTSFailureCount = mib->rts_retries_cnt; stats->dot11FCSErrorCount = mib->fcs_err_cnt; stats->dot11ACKFailureCount = mib->ack_fail_cnt; + memset(mib, 0, sizeof(*mib)); + + mt7615_mutex_release(phy->dev); + return 0; } static u64 mt7615_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; struct mt7615_dev *dev = mt7615_hw_dev(hw); union { u64 t64; u32 t32[2]; } tsf; + u16 idx = mvif->mt76.omac_idx; + u32 reg; + + idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx; + reg = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx); mt7615_mutex_acquire(dev); - mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ + mt76_set(dev, reg, MT_LPON_TCR_MODE); /* TSF read */ tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0); tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1); @@ -835,18 +894,24 @@ static void mt7615_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u64 timestamp) { + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; struct mt7615_dev *dev = mt7615_hw_dev(hw); union { u64 t64; u32 t32[2]; } tsf = { .t64 = timestamp, }; + u16 idx = mvif->mt76.omac_idx; + u32 reg; + + idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx; + reg = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx); mt7615_mutex_acquire(dev); mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]); mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]); /* TSF software overwrite */ - mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_WRITE); + mt76_set(dev, reg, MT_LPON_TCR_WRITE); mt7615_mutex_release(dev); } @@ -1069,6 +1134,7 @@ static int mt7615_cancel_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mt7615_phy *phy = mt7615_hw_phy(hw); + int err; if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) return 0; @@ -1077,10 +1143,26 @@ static int mt7615_cancel_remain_on_channel(struct ieee80211_hw *hw, cancel_work_sync(&phy->roc_work); mt7615_mutex_acquire(phy->dev); - mt7615_mcu_set_roc(phy, vif, NULL, 0); + err = mt7615_mcu_set_roc(phy, vif, NULL, 0); mt7615_mutex_release(phy->dev); - return 0; + return err; +} + +static void mt7615_sta_set_decap_offload(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + bool enabled) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; + + if (enabled) + set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); + else + clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); + + mt7615_mcu_sta_update_hdr_trans(dev, vif, sta); } #ifdef CONFIG_PM @@ -1183,9 +1265,10 @@ const struct ieee80211_ops mt7615_ops = { .sta_remove = mt7615_sta_remove, .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove, .set_key = mt7615_set_key, + .sta_set_decap_offload = mt7615_sta_set_decap_offload, .ampdu_action = mt7615_ampdu_action, .set_rts_threshold = mt7615_set_rts_threshold, - .wake_tx_queue = mt7615_wake_tx_queue, + .wake_tx_queue = mt76_wake_tx_queue, .sta_rate_tbl_update = mt7615_sta_rate_tbl_update, .sw_scan_start = mt76_sw_scan, .sw_scan_complete = mt76_sw_scan_complete, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index 631596fc2f36..aa42af9ebfd6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -175,8 +175,8 @@ int mt7615_mcu_parse_response(struct mt76_dev *mdev, int cmd, int ret = 0; if (!skb) { - dev_err(mdev->dev, "Message %ld (seq %d) timeout\n", - cmd & MCU_CMD_MASK, seq); + dev_err(mdev->dev, "Message %08x (seq %d) timeout\n", + cmd, seq); return -ETIMEDOUT; } @@ -274,7 +274,7 @@ int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val) sizeof(req), false); } -static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en) +void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en) { if (!is_mt7622(&dev->mt76)) return; @@ -283,20 +283,30 @@ static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en) MT_INFRACFG_MISC_AP2CONN_WAKE, !en * MT_INFRACFG_MISC_AP2CONN_WAKE); } +EXPORT_SYMBOL_GPL(mt7622_trigger_hif_int); static int mt7615_mcu_drv_pmctrl(struct mt7615_dev *dev) { struct mt76_phy *mphy = &dev->mt76.phy; + struct mt76_connac_pm *pm = &dev->pm; struct mt76_dev *mdev = &dev->mt76; u32 addr; int err; - addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST; + if (is_mt7663(mdev)) { + /* Clear firmware own via N9 eint */ + mt76_wr(dev, MT_PCIE_DOORBELL_PUSH, MT_CFG_LPCR_HOST_DRV_OWN); + mt76_poll(dev, MT_CONN_ON_MISC, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000); + + addr = MT_CONN_HIF_ON_LPCTL; + } else { + addr = MT_CFG_LPCR_HOST; + } + mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN); mt7622_trigger_hif_int(dev, true); - addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; err = !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000); mt7622_trigger_hif_int(dev, false); @@ -308,15 +318,22 @@ static int mt7615_mcu_drv_pmctrl(struct mt7615_dev *dev) clear_bit(MT76_STATE_PM, &mphy->state); + pm->stats.last_wake_event = jiffies; + pm->stats.doze_time += pm->stats.last_wake_event - + pm->stats.last_doze_event; + return 0; } static int mt7615_mcu_lp_drv_pmctrl(struct mt7615_dev *dev) { struct mt76_phy *mphy = &dev->mt76.phy; - int i; + struct mt76_connac_pm *pm = &dev->pm; + int i, err = 0; + + mutex_lock(&pm->mutex); - if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state)) + if (!test_bit(MT76_STATE_PM, &mphy->state)) goto out; for (i = 0; i < MT7615_DRV_OWN_RETRY_COUNT; i++) { @@ -328,24 +345,31 @@ static int mt7615_mcu_lp_drv_pmctrl(struct mt7615_dev *dev) if (i == MT7615_DRV_OWN_RETRY_COUNT) { dev_err(dev->mt76.dev, "driver own failed\n"); - set_bit(MT76_STATE_PM, &mphy->state); - return -EIO; + err = -EIO; + goto out; } + clear_bit(MT76_STATE_PM, &mphy->state); + pm->stats.last_wake_event = jiffies; + pm->stats.doze_time += pm->stats.last_wake_event - + pm->stats.last_doze_event; out: - dev->pm.last_activity = jiffies; + mutex_unlock(&pm->mutex); - return 0; + return err; } static int mt7615_mcu_fw_pmctrl(struct mt7615_dev *dev) { struct mt76_phy *mphy = &dev->mt76.phy; + struct mt76_connac_pm *pm = &dev->pm; int err = 0; u32 addr; - if (test_and_set_bit(MT76_STATE_PM, &mphy->state)) - return 0; + mutex_lock(&pm->mutex); + + if (mt76_connac_skip_fw_pmctrl(mphy, pm)) + goto out; mt7622_trigger_hif_int(dev, true); @@ -362,6 +386,12 @@ static int mt7615_mcu_fw_pmctrl(struct mt7615_dev *dev) mt7622_trigger_hif_int(dev, false); + pm->stats.last_doze_event = jiffies; + pm->stats.awake_time += pm->stats.last_doze_event - + pm->stats.last_wake_event; +out: + mutex_unlock(&pm->mutex); + return err; } @@ -373,6 +403,23 @@ mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) } static void +mt7615_mcu_rx_csa_notify(struct mt7615_dev *dev, struct sk_buff *skb) +{ + struct mt7615_phy *ext_phy = mt7615_ext_phy(dev); + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt7615_mcu_csa_notify *c; + + c = (struct mt7615_mcu_csa_notify *)skb->data; + + if (ext_phy && ext_phy->omac_mask & BIT_ULL(c->omac_idx)) + mphy = dev->mt76.phy2; + + ieee80211_iterate_active_interfaces_atomic(mphy->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7615_mcu_csa_finish, mphy->hw); +} + +static void mt7615_mcu_rx_radar_detected(struct mt7615_dev *dev, struct sk_buff *skb) { struct mt76_phy *mphy = &dev->mt76.phy; @@ -380,7 +427,7 @@ mt7615_mcu_rx_radar_detected(struct mt7615_dev *dev, struct sk_buff *skb) r = (struct mt7615_mcu_rdd_report *)skb->data; - if (r->idx && dev->mt76.phy2) + if (r->band_idx && dev->mt76.phy2) mphy = dev->mt76.phy2; ieee80211_radar_detected(mphy->hw); @@ -406,7 +453,8 @@ mt7615_mcu_rx_log_message(struct mt7615_dev *dev, struct sk_buff *skb) break; } - wiphy_info(mt76_hw(dev)->wiphy, "%s: %s", type, data); + wiphy_info(mt76_hw(dev)->wiphy, "%s: %.*s", type, + (int)(skb->len - sizeof(*rxd)), data); } static void @@ -419,9 +467,7 @@ mt7615_mcu_rx_ext_event(struct mt7615_dev *dev, struct sk_buff *skb) mt7615_mcu_rx_radar_detected(dev, skb); break; case MCU_EXT_EVENT_CSA_NOTIFY: - ieee80211_iterate_active_interfaces_atomic(dev->mt76.hw, - IEEE80211_IFACE_ITER_RESUME_ALL, - mt7615_mcu_csa_finish, dev); + mt7615_mcu_rx_csa_notify(dev, skb); break; case MCU_EXT_EVENT_FW_LOG_2_HOST: mt7615_mcu_rx_log_message(dev, skb); @@ -685,6 +731,9 @@ mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev, }; struct sk_buff *skb; + if (!enable) + goto out; + skb = ieee80211_beacon_get_template(hw, vif, &offs); if (!skb) return -EINVAL; @@ -714,6 +763,7 @@ mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev, } dev_kfree_skb(skb); +out: return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_BCN_OFFLOAD, &req, sizeof(req), true); } @@ -973,7 +1023,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif, mt76_connac_mcu_sta_basic_tlv(sskb, vif, sta, enable); if (enable && sta) - mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif); + mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0); wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid, WTBL_RESET_AND_SET, NULL, @@ -987,6 +1037,8 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif, if (sta) mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, wskb, sta, NULL, wtbl_hdr); + mt76_connac_mcu_wtbl_hdr_trans_tlv(wskb, &msta->wcid, NULL, + wtbl_hdr); } cmd = enable ? MCU_EXT_CMD_WTBL_UPDATE : MCU_EXT_CMD_STA_REC_UPDATE; @@ -1040,6 +1092,9 @@ mt7615_mcu_sta_ba(struct mt7615_dev *dev, wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid, WTBL_SET, sta_wtbl, &skb); + if (IS_ERR(wtbl_hdr)) + return PTR_ERR(wtbl_hdr); + mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, tx, sta_wtbl, wtbl_hdr); @@ -1068,10 +1123,15 @@ __mt7615_mcu_add_sta(struct mt76_phy *phy, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool enable, int cmd) { struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; - struct mt76_wcid *wcid; + struct mt76_sta_cmd_info info = { + .sta = sta, + .vif = vif, + .enable = enable, + .cmd = cmd, + }; - wcid = sta ? (struct mt76_wcid *)sta->drv_priv : &mvif->sta.wcid; - return mt76_connac_mcu_add_sta_cmd(phy, vif, sta, wcid, enable, cmd); + info.wcid = sta ? (struct mt76_wcid *)sta->drv_priv : &mvif->sta.wcid; + return mt76_connac_mcu_add_sta_cmd(phy, &info); } static int @@ -1094,6 +1154,25 @@ static const struct mt7615_mcu_ops sta_update_ops = { .set_fw_ctrl = mt7615_mcu_fw_pmctrl, }; +int mt7615_mcu_sta_update_hdr_trans(struct mt7615_dev *dev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; + struct wtbl_req_hdr *wtbl_hdr; + struct sk_buff *skb = NULL; + + wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid, + WTBL_SET, NULL, &skb); + if (IS_ERR(wtbl_hdr)) + return PTR_ERR(wtbl_hdr); + + mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, &msta->wcid, NULL, wtbl_hdr); + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE, + true); +} + static int mt7615_mcu_uni_ctrl_pm_state(struct mt7615_dev *dev, int band, int state) { @@ -1120,8 +1199,8 @@ mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev, __le16 tim_ie_pos; __le16 csa_ie_pos; __le16 bcc_ie_pos; - /* 0: enable beacon offload - * 1: disable beacon offload + /* 0: disable beacon offload + * 1: enable beacon offload * 2: update probe respond offload */ u8 enable; @@ -1144,6 +1223,9 @@ mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev, }; struct sk_buff *skb; + if (!enable) + goto out; + skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs); if (!skb) return -EINVAL; @@ -1168,6 +1250,7 @@ mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev, } dev_kfree_skb(skb); +out: return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE, &req, sizeof(req), true); } @@ -1279,25 +1362,26 @@ static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name) const struct firmware *fw = NULL; int len, ret, sem; + ret = firmware_request_nowarn(&fw, name, dev->mt76.dev); + if (ret) + return ret; + + if (!fw || !fw->data || fw->size < sizeof(*hdr)) { + dev_err(dev->mt76.dev, "Invalid firmware\n"); + ret = -EINVAL; + goto release_fw; + } + sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, true); switch (sem) { case PATCH_IS_DL: - return 0; + goto release_fw; case PATCH_NOT_DL_SEM_SUCCESS: break; default: dev_err(dev->mt76.dev, "Failed to get patch semaphore\n"); - return -EAGAIN; - } - - ret = firmware_request_nowarn(&fw, name, dev->mt76.dev); - if (ret) - goto out; - - if (!fw || !fw->data || fw->size < sizeof(*hdr)) { - dev_err(dev->mt76.dev, "Invalid firmware\n"); - ret = -EINVAL; - goto out; + ret = -EAGAIN; + goto release_fw; } hdr = (const struct mt7615_patch_hdr *)(fw->data); @@ -1326,8 +1410,6 @@ static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name) dev_err(dev->mt76.dev, "Failed to start patch\n"); out: - release_firmware(fw); - sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, false); switch (sem) { case PATCH_REL_SEM_SUCCESS: @@ -1338,6 +1420,9 @@ out: break; } +release_fw: + release_firmware(fw); + return ret; } @@ -1427,8 +1512,7 @@ static int mt7615_load_n9(struct mt7615_dev *dev, const char *name) sizeof(dev->mt76.hw->wiphy->fw_version), "%.10s-%.15s", hdr->fw_ver, hdr->build_date); - if (!is_mt7615(&dev->mt76) && - !strncmp(hdr->fw_ver, "2.0", sizeof(hdr->fw_ver))) { + if (!is_mt7615(&dev->mt76)) { dev->fw_ver = MT7615_FIRMWARE_V2; dev->mcu_ops = &sta_update_ops; } else { @@ -2084,16 +2168,80 @@ static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku) { struct mt76_phy *mphy = phy->mt76; struct ieee80211_hw *hw = mphy->hw; + struct mt76_power_limits limits; + s8 *limits_array = (s8 *)&limits; int n_chains = hweight8(mphy->antenna_mask); int tx_power; int i; + static const u8 sku_mapping[] = { +#define SKU_FIELD(_type, _field) \ + [MT_SKU_##_type] = offsetof(struct mt76_power_limits, _field) + SKU_FIELD(CCK_1_2, cck[0]), + SKU_FIELD(CCK_55_11, cck[2]), + SKU_FIELD(OFDM_6_9, ofdm[0]), + SKU_FIELD(OFDM_12_18, ofdm[2]), + SKU_FIELD(OFDM_24_36, ofdm[4]), + SKU_FIELD(OFDM_48, ofdm[6]), + SKU_FIELD(OFDM_54, ofdm[7]), + SKU_FIELD(HT20_0_8, mcs[0][0]), + SKU_FIELD(HT20_32, ofdm[0]), + SKU_FIELD(HT20_1_2_9_10, mcs[0][1]), + SKU_FIELD(HT20_3_4_11_12, mcs[0][3]), + SKU_FIELD(HT20_5_13, mcs[0][5]), + SKU_FIELD(HT20_6_14, mcs[0][6]), + SKU_FIELD(HT20_7_15, mcs[0][7]), + SKU_FIELD(HT40_0_8, mcs[1][0]), + SKU_FIELD(HT40_32, ofdm[0]), + SKU_FIELD(HT40_1_2_9_10, mcs[1][1]), + SKU_FIELD(HT40_3_4_11_12, mcs[1][3]), + SKU_FIELD(HT40_5_13, mcs[1][5]), + SKU_FIELD(HT40_6_14, mcs[1][6]), + SKU_FIELD(HT40_7_15, mcs[1][7]), + SKU_FIELD(VHT20_0, mcs[0][0]), + SKU_FIELD(VHT20_1_2, mcs[0][1]), + SKU_FIELD(VHT20_3_4, mcs[0][3]), + SKU_FIELD(VHT20_5_6, mcs[0][5]), + SKU_FIELD(VHT20_7, mcs[0][7]), + SKU_FIELD(VHT20_8, mcs[0][8]), + SKU_FIELD(VHT20_9, mcs[0][9]), + SKU_FIELD(VHT40_0, mcs[1][0]), + SKU_FIELD(VHT40_1_2, mcs[1][1]), + SKU_FIELD(VHT40_3_4, mcs[1][3]), + SKU_FIELD(VHT40_5_6, mcs[1][5]), + SKU_FIELD(VHT40_7, mcs[1][7]), + SKU_FIELD(VHT40_8, mcs[1][8]), + SKU_FIELD(VHT40_9, mcs[1][9]), + SKU_FIELD(VHT80_0, mcs[2][0]), + SKU_FIELD(VHT80_1_2, mcs[2][1]), + SKU_FIELD(VHT80_3_4, mcs[2][3]), + SKU_FIELD(VHT80_5_6, mcs[2][5]), + SKU_FIELD(VHT80_7, mcs[2][7]), + SKU_FIELD(VHT80_8, mcs[2][8]), + SKU_FIELD(VHT80_9, mcs[2][9]), + SKU_FIELD(VHT160_0, mcs[3][0]), + SKU_FIELD(VHT160_1_2, mcs[3][1]), + SKU_FIELD(VHT160_3_4, mcs[3][3]), + SKU_FIELD(VHT160_5_6, mcs[3][5]), + SKU_FIELD(VHT160_7, mcs[3][7]), + SKU_FIELD(VHT160_8, mcs[3][8]), + SKU_FIELD(VHT160_9, mcs[3][9]), +#undef SKU_FIELD + }; tx_power = hw->conf.power_level * 2 - mt76_tx_power_nss_delta(n_chains); + + tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan, + &limits, tx_power); mphy->txpower_cur = tx_power; + if (is_mt7663(mphy->dev)) { + memset(sku, tx_power, MT_SKU_4SS_DELTA + 1); + return; + } + for (i = 0; i < MT_SKU_1SS_DELTA; i++) - sku[i] = tx_power; + sku[i] = limits_array[sku_mapping[i]]; for (i = 0; i < 4; i++) { int delta = 0; @@ -2155,7 +2303,7 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd) .center_chan2 = ieee80211_frequency_to_channel(freq2), }; - if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) + if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) && chandef->chan->dfs_state != NL80211_DFS_AVAILABLE) @@ -2497,6 +2645,26 @@ out: return ret; } +int mt7615_mcu_set_rx_hdr_trans_blacklist(struct mt7615_dev *dev) +{ + struct { + u8 operation; + u8 count; + u8 _rsv[2]; + u8 index; + u8 enable; + __le16 etype; + } req = { + .operation = 1, + .count = 1, + .enable = 1, + .etype = cpu_to_le16(ETH_P_PAE), + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RX_HDR_TRANS, + &req, sizeof(req), false); +} + int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif, bool enable) { @@ -2557,53 +2725,6 @@ int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif, sizeof(req), false); } -int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *info) -{ - struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; - struct mt7615_dev *dev = mt7615_hw_dev(hw); - struct sk_buff *skb; - int i, len = min_t(int, info->arp_addr_cnt, - IEEE80211_BSS_ARP_ADDR_LIST_LEN); - struct { - struct { - u8 bss_idx; - u8 pad[3]; - } __packed hdr; - struct mt76_connac_arpns_tlv arp; - } req_hdr = { - .hdr = { - .bss_idx = mvif->mt76.idx, - }, - .arp = { - .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP), - .len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)), - .ips_num = len, - .mode = 2, /* update */ - .option = 1, - }, - }; - - if (!mt7615_firmware_offload(dev)) - return 0; - - skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, - sizeof(req_hdr) + len * sizeof(__be32)); - if (!skb) - return -ENOMEM; - - skb_put_data(skb, &req_hdr, sizeof(req_hdr)); - for (i = 0; i < len; i++) { - u8 *addr = (u8 *)skb_put(skb, sizeof(__be32)); - - memcpy(addr, &info->arp_addr_list[i], sizeof(__be32)); - } - - return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_UNI_CMD_OFFLOAD, - true); -} - int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h index 3874f45da9eb..98c383e400a1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h @@ -176,10 +176,18 @@ struct mt7615_mcu_rxd { u8 s2d_index; }; +struct mt7615_mcu_csa_notify { + struct mt7615_mcu_rxd rxd; + + u8 omac_idx; + u8 csa_count; + u8 rsv[2]; +} __packed; + struct mt7615_mcu_rdd_report { struct mt7615_mcu_rxd rxd; - u8 idx; + u8 band_idx; u8 long_detected; u8 constant_prf_detected; u8 staggered_prf_detected; @@ -362,30 +370,6 @@ enum { BSS_INFO_MAX_NUM }; -#define MT7615_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \ - sizeof(struct wtbl_generic) + \ - sizeof(struct wtbl_rx) + \ - sizeof(struct wtbl_ht) + \ - sizeof(struct wtbl_vht) + \ - sizeof(struct wtbl_tx_ps) + \ - sizeof(struct wtbl_hdr_trans) +\ - sizeof(struct wtbl_ba) + \ - sizeof(struct wtbl_bf) + \ - sizeof(struct wtbl_smps) + \ - sizeof(struct wtbl_pn) + \ - sizeof(struct wtbl_spe)) - -#define MT7615_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \ - sizeof(struct sta_rec_basic) + \ - sizeof(struct sta_rec_ht) + \ - sizeof(struct sta_rec_vht) + \ - sizeof(struct sta_rec_uapsd) + \ - sizeof(struct tlv) + \ - MT7615_WTBL_UPDATE_MAX_SIZE) - -#define MT7615_WTBL_UPDATE_BA_SIZE (sizeof(struct wtbl_req_hdr) + \ - sizeof(struct wtbl_ba)) - enum { CH_SWITCH_NORMAL = 0, CH_SWITCH_SCAN = 3, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c index a7f92fa0488f..202ea235415e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c @@ -1,3 +1,6 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. */ + #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> @@ -102,6 +105,7 @@ static void mt7615_irq_tasklet(struct tasklet_struct *t) { struct mt7615_dev *dev = from_tasklet(dev, t, irq_tasklet); u32 intr, mask = 0, tx_mcu_mask = mt7615_tx_mcu_int_mask(dev); + u32 mcu_int; mt76_wr(dev, MT_INT_MASK_CSR, 0); @@ -125,15 +129,23 @@ static void mt7615_irq_tasklet(struct tasklet_struct *t) if (intr & MT_INT_RX_DONE(1)) napi_schedule(&dev->mt76.napi[1]); - if (intr & MT_INT_MCU_CMD) { - u32 val = mt76_rr(dev, MT_MCU_CMD); + if (!(intr & (MT_INT_MCU_CMD | MT7663_INT_MCU_CMD))) + return; - if (val & MT_MCU_CMD_ERROR_MASK) { - dev->reset_state = val; - ieee80211_queue_work(mt76_hw(dev), &dev->reset_work); - wake_up(&dev->reset_wait); - } + if (is_mt7663(&dev->mt76)) { + mcu_int = mt76_rr(dev, MT_MCU2HOST_INT_STATUS); + mcu_int &= MT7663_MCU_CMD_ERROR_MASK; + } else { + mcu_int = mt76_rr(dev, MT_MCU_CMD); + mcu_int &= MT_MCU_CMD_ERROR_MASK; } + + if (!mcu_int) + return; + + dev->reset_state = mcu_int; + ieee80211_queue_work(mt76_hw(dev), &dev->reset_work); + wake_up(&dev->reset_wait); } static u32 __mt7615_reg_addr(struct mt7615_dev *dev, u32 addr) @@ -178,6 +190,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base, .survey_flags = SURVEY_INFO_TIME_TX | SURVEY_INFO_TIME_RX | SURVEY_INFO_TIME_BSS_RX, + .token_size = MT7615_TOKEN_SIZE, .tx_prepare_skb = mt7615_tx_prepare_skb, .tx_complete_skb = mt7615_tx_complete_skb, .rx_skb = mt7615_queue_rx_skb, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index 491841bc6291..989f05ed4377 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -133,11 +133,11 @@ struct mt7615_vif { }; struct mib_stats { - u16 ack_fail_cnt; - u16 fcs_err_cnt; - u16 rts_cnt; - u16 rts_retries_cnt; - u16 ba_miss_cnt; + u32 ack_fail_cnt; + u32 fcs_err_cnt; + u32 rts_cnt; + u32 rts_retries_cnt; + u32 ba_miss_cnt; unsigned long aggr_per; }; @@ -168,7 +168,7 @@ struct mt7615_phy { u8 rdd_state; int dfs_state; - __le32 rx_ampdu_ts; + u32 rx_ampdu_ts; u32 ampdu_ref; struct mib_stats mib; @@ -263,9 +263,6 @@ struct mt7615_dev { bool flash_eeprom; bool dbdc_support; - spinlock_t token_lock; - struct idr token; - u8 fw_ver; struct work_struct rate_work; @@ -376,6 +373,7 @@ int mt7615_eeprom_get_power_delta_index(struct mt7615_dev *dev, enum nl80211_band band); int mt7615_wait_pdma_busy(struct mt7615_dev *dev); int mt7615_dma_init(struct mt7615_dev *dev); +void mt7615_dma_start(struct mt7615_dev *dev); void mt7615_dma_cleanup(struct mt7615_dev *dev); int mt7615_mcu_init(struct mt7615_dev *dev); bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev); @@ -408,11 +406,6 @@ static inline bool is_mt7615(struct mt76_dev *dev) return mt76_chip(dev) == 0x7615 || mt76_chip(dev) == 0x7611; } -static inline bool is_mt7663(struct mt76_dev *dev) -{ - return mt76_chip(dev) == 0x7663; -} - static inline bool is_mt7611(struct mt76_dev *dev) { return mt76_chip(dev) == 0x7611; @@ -512,6 +505,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, struct ieee80211_sta *sta, struct mt76_tx_info *tx_info); +void mt7615_tx_worker(struct mt76_worker *w); void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e); void mt7615_tx_token_put(struct mt7615_dev *dev); void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, @@ -524,6 +518,10 @@ void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, void mt7615_mac_work(struct work_struct *work); void mt7615_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *txwi); +int mt7615_mcu_sta_update_hdr_trans(struct mt7615_dev *dev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int mt7615_mcu_set_rx_hdr_trans_blacklist(struct mt7615_dev *dev); int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val); int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev, const struct mt7615_dfs_pulse *pulse); @@ -549,14 +547,13 @@ int mt7615_mac_set_beacon_filter(struct mt7615_phy *phy, bool enable); int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif, bool enable); -int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *info); int __mt7663_load_firmware(struct mt7615_dev *dev); u32 mt7615_mcu_reg_rr(struct mt76_dev *dev, u32 offset); void mt7615_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val); void mt7615_coredump_work(struct work_struct *work); +void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en); + /* usb */ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, enum mt76_txq_id qid, struct mt76_wcid *wcid, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c index 71487f532f36..11f169cdd603 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c @@ -13,9 +13,9 @@ #include "mcu.h" static const struct pci_device_id mt7615_pci_device_table[] = { - { PCI_DEVICE(0x14c3, 0x7615) }, - { PCI_DEVICE(0x14c3, 0x7663) }, - { PCI_DEVICE(0x14c3, 0x7611) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7615) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7663) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7611) }, { }, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c index 72395925ddee..ec8ec1a2033f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c @@ -40,13 +40,16 @@ static int mt7615_init_hardware(struct mt7615_dev *dev) mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); INIT_WORK(&dev->mcu_work, mt7615_pci_init_work); - spin_lock_init(&dev->token_lock); - idr_init(&dev->token); - ret = mt7615_eeprom_init(dev, addr); if (ret < 0) return ret; + if (is_mt7663(&dev->mt76)) { + /* Reset RGU */ + mt76_clear(dev, MT_MCU_CIRQ_IRQ_SEL(4), BIT(1)); + mt76_set(dev, MT_MCU_CIRQ_IRQ_SEL(4), BIT(1)); + } + ret = mt7615_dma_init(dev); if (ret) return ret; @@ -76,7 +79,7 @@ mt7615_led_set_config(struct led_classdev *led_cdev, mt76 = container_of(led_cdev, struct mt76_dev, led_cdev); dev = container_of(mt76, struct mt7615_dev, mt76); - if (test_bit(MT76_STATE_PM, &mt76->phy.state)) + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) return; val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) | @@ -94,6 +97,8 @@ mt7615_led_set_config(struct led_classdev *led_cdev, val |= MT_LED_CTRL_POLARITY(mt76->led_pin); addr = mt7615_reg_map(dev, MT_LED_CTRL); mt76_wr(dev, addr, val); + + mt76_connac_pm_unref(&dev->pm); } static int @@ -126,6 +131,7 @@ int mt7615_register_device(struct mt7615_dev *dev) int ret; mt7615_init_device(dev); + INIT_WORK(&dev->reset_work, mt7615_mac_reset_work); /* init led callbacks */ if (IS_ENABLED(CONFIG_MT76_LEDS)) { @@ -163,10 +169,9 @@ void mt7615_unregister_device(struct mt7615_dev *dev) mt76_unregister_device(&dev->mt76); if (mcu_running) mt7615_mcu_exit(dev); - mt7615_dma_cleanup(dev); mt7615_tx_token_put(dev); - + mt7615_dma_cleanup(dev); tasklet_disable(&dev->irq_tasklet); mt76_free_device(&dev->mt76); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c index 1b4cb145f38e..d7cbef752f9f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c @@ -37,9 +37,7 @@ void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) token = le16_to_cpu(txp->hw.msdu_id[0]) & ~MT_MSDU_ID_VALID; - spin_lock_bh(&dev->token_lock); - t = idr_remove(&dev->token, token); - spin_unlock_bh(&dev->token_lock); + t = mt76_token_put(mdev, token); e->skb = t ? t->skb : NULL; } @@ -161,9 +159,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); t->skb = tx_info->skb; - spin_lock_bh(&dev->token_lock); - id = idr_alloc(&dev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC); - spin_unlock_bh(&dev->token_lock); + id = mt76_token_get(mdev, &t); if (id < 0) return id; @@ -181,3 +177,178 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, return 0; } + +void mt7615_dma_reset(struct mt7615_dev *dev) +{ + int i; + + mt76_clear(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN | + MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); + + usleep_range(1000, 2000); + + for (i = 0; i < __MT_TXQ_MAX; i++) + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); + + for (i = 0; i < __MT_MCUQ_MAX; i++) + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); + + mt76_for_each_q_rx(&dev->mt76, i) + mt76_queue_rx_reset(dev, i); + + mt76_tx_status_check(&dev->mt76, NULL, true); + + mt7615_dma_start(dev); +} +EXPORT_SYMBOL_GPL(mt7615_dma_reset); + +static void +mt7615_hif_int_event_trigger(struct mt7615_dev *dev, u8 event) +{ + u32 reg = MT_MCU_INT_EVENT; + + if (is_mt7663(&dev->mt76)) + reg = MT7663_MCU_INT_EVENT; + + mt76_wr(dev, reg, event); + + mt7622_trigger_hif_int(dev, true); + mt7622_trigger_hif_int(dev, false); +} + +static bool +mt7615_wait_reset_state(struct mt7615_dev *dev, u32 state) +{ + bool ret; + + ret = wait_event_timeout(dev->reset_wait, + (READ_ONCE(dev->reset_state) & state), + MT7615_RESET_TIMEOUT); + WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); + return ret; +} + +static void +mt7615_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct ieee80211_hw *hw = priv; + struct mt7615_dev *dev = mt7615_hw_dev(hw); + + switch (vif->type) { + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_AP: + mt7615_mcu_add_beacon(dev, hw, vif, + vif->bss_conf.enable_beacon); + break; + default: + break; + } +} + +static void +mt7615_update_beacons(struct mt7615_dev *dev) +{ + ieee80211_iterate_active_interfaces(dev->mt76.hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7615_update_vif_beacon, dev->mt76.hw); + + if (!dev->mt76.phy2) + return; + + ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7615_update_vif_beacon, dev->mt76.phy2->hw); +} + +void mt7615_mac_reset_work(struct work_struct *work) +{ + struct mt7615_phy *phy2; + struct mt76_phy *ext_phy; + struct mt7615_dev *dev; + + dev = container_of(work, struct mt7615_dev, reset_work); + ext_phy = dev->mt76.phy2; + phy2 = ext_phy ? ext_phy->priv : NULL; + + if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_PDMA)) + return; + + ieee80211_stop_queues(mt76_hw(dev)); + if (ext_phy) + ieee80211_stop_queues(ext_phy->hw); + + set_bit(MT76_RESET, &dev->mphy.state); + set_bit(MT76_MCU_RESET, &dev->mphy.state); + wake_up(&dev->mt76.mcu.wait); + cancel_delayed_work_sync(&dev->mphy.mac_work); + del_timer_sync(&dev->phy.roc_timer); + cancel_work_sync(&dev->phy.roc_work); + if (phy2) { + set_bit(MT76_RESET, &phy2->mt76->state); + cancel_delayed_work_sync(&phy2->mt76->mac_work); + del_timer_sync(&phy2->roc_timer); + cancel_work_sync(&phy2->roc_work); + } + + /* lock/unlock all queues to ensure that no tx is pending */ + mt76_txq_schedule_all(&dev->mphy); + if (ext_phy) + mt76_txq_schedule_all(ext_phy); + + mt76_worker_disable(&dev->mt76.tx_worker); + napi_disable(&dev->mt76.napi[0]); + napi_disable(&dev->mt76.napi[1]); + napi_disable(&dev->mt76.tx_napi); + + mt7615_mutex_acquire(dev); + + mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_PDMA_STOPPED); + + if (mt7615_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { + mt7615_dma_reset(dev); + + mt7615_tx_token_put(dev); + idr_init(&dev->mt76.token); + + mt76_wr(dev, MT_WPDMA_MEM_RNG_ERR, 0); + + mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_PDMA_INIT); + mt7615_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); + } + + clear_bit(MT76_MCU_RESET, &dev->mphy.state); + clear_bit(MT76_RESET, &dev->mphy.state); + if (phy2) + clear_bit(MT76_RESET, &phy2->mt76->state); + + mt76_worker_enable(&dev->mt76.tx_worker); + napi_enable(&dev->mt76.tx_napi); + napi_schedule(&dev->mt76.tx_napi); + + napi_enable(&dev->mt76.napi[0]); + napi_schedule(&dev->mt76.napi[0]); + + napi_enable(&dev->mt76.napi[1]); + napi_schedule(&dev->mt76.napi[1]); + + ieee80211_wake_queues(mt76_hw(dev)); + if (ext_phy) + ieee80211_wake_queues(ext_phy->hw); + + mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_RESET_DONE); + mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); + + mt7615_update_beacons(dev); + + mt7615_mutex_release(dev); + + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, + MT7615_WATCHDOG_TIME); + if (phy2) + ieee80211_queue_delayed_work(ext_phy->hw, + &phy2->mt76->mac_work, + MT7615_WATCHDOG_TIME); + +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h index 6e5db015b32c..63c081bb04d0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h @@ -61,6 +61,11 @@ enum mt7615_reg_base { #define MT_MCU_PCIE_REMAP_2_BASE GENMASK(31, 19) #define MT_PCIE_REMAP_BASE_2 ((dev)->reg_map[MT_PCIE_REMAP_BASE2]) +#define MT_MCU_CIRQ_BASE 0xc0000 +#define MT_MCU_CIRQ(ofs) (MT_MCU_CIRQ_BASE + (ofs)) + +#define MT_MCU_CIRQ_IRQ_SEL(n) MT_MCU_CIRQ((n) << 2) + #define MT_HIF(ofs) ((dev)->reg_map[MT_HIF_BASE] + (ofs)) #define MT_HIF_RST MT_HIF(0x100) #define MT_HIF_LOGIC_RST_N BIT(4) @@ -88,6 +93,10 @@ enum mt7615_reg_base { #define MT_CFG_LPCR_HOST_FW_OWN BIT(0) #define MT_CFG_LPCR_HOST_DRV_OWN BIT(1) +#define MT_MCU2HOST_INT_STATUS MT_HIF(0x1f0) +#define MT_MCU2HOST_INT_ENABLE MT_HIF(0x1f4) + +#define MT7663_MCU_INT_EVENT MT_HIF(0x108) #define MT_MCU_INT_EVENT MT_HIF(0x1f8) #define MT_MCU_INT_EVENT_PDMA_STOPPED BIT(0) #define MT_MCU_INT_EVENT_PDMA_INIT BIT(1) @@ -102,6 +111,7 @@ enum mt7615_reg_base { #define MT_INT_RX_DONE_ALL GENMASK(1, 0) #define MT_INT_TX_DONE_ALL GENMASK(19, 4) #define MT_INT_TX_DONE(_n) BIT((_n) + 4) +#define MT7663_INT_MCU_CMD BIT(29) #define MT_INT_MCU_CMD BIT(30) #define MT_WPDMA_GLO_CFG MT_HIF(0x208) @@ -138,6 +148,7 @@ enum mt7615_reg_base { #define MT_MCU_CMD_PDMA_ERROR BIT(27) #define MT_MCU_CMD_PCIE_ERROR BIT(28) #define MT_MCU_CMD_ERROR_MASK (GENMASK(5, 1) | GENMASK(28, 24)) +#define MT7663_MCU_CMD_ERROR_MASK GENMASK(5, 2) #define MT_TX_RING_BASE MT_HIF(0x300) #define MT_RX_RING_BASE MT_HIF(0x400) @@ -368,7 +379,9 @@ enum mt7615_reg_base { #define MT_DMA_DCR0 MT_WF_DMA(0x000) #define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 2) +#define MT_DMA_DCR0_DAMSDU_EN BIT(16) #define MT_DMA_DCR0_RX_VEC_DROP BIT(17) +#define MT_DMA_DCR0_RX_HDR_TRANS_EN BIT(19) #define MT_DMA_RCFR0(_band) MT_WF_DMA(0x070 + (_band) * 0x40) #define MT_DMA_RCFR0_MCU_RX_MGMT BIT(2) @@ -447,9 +460,10 @@ enum mt7615_reg_base { #define MT_LPON(_n) ((dev)->reg_map[MT_LPON_BASE] + (_n)) -#define MT_LPON_T0CR MT_LPON(0x010) -#define MT_LPON_T0CR_MODE GENMASK(1, 0) -#define MT_LPON_T0CR_WRITE BIT(0) +#define MT_LPON_TCR0(_n) MT_LPON(0x010 + ((_n) * 4)) +#define MT_LPON_TCR2(_n) MT_LPON(0x0f8 + ((_n) - 2) * 4) +#define MT_LPON_TCR_MODE GENMASK(1, 0) +#define MT_LPON_TCR_WRITE BIT(0) #define MT_LPON_UTTR0 MT_LPON(0x018) #define MT_LPON_UTTR1 MT_LPON(0x01c) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c index 9fb506f2ace6..4393dd21ebbb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c @@ -218,12 +218,15 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q) int qid, err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0; bool mcu = q == dev->q_mcu[MT_MCUQ_WM]; struct mt76_sdio *sdio = &dev->sdio; + u8 pad; qid = mcu ? ARRAY_SIZE(sdio->xmit_buf) - 1 : q->qid; while (q->first != q->head) { struct mt76_queue_entry *e = &q->entry[q->first]; struct sk_buff *iter; + smp_rmb(); + if (!test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) { __skb_put_zero(e->skb, 4); err = __mt7663s_xmit_queue(dev, e->skb->data, @@ -234,7 +237,8 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q) goto next; } - if (len + e->skb->len + 4 > MT76S_XMIT_BUF_SZ) + pad = roundup(e->skb->len, 4) - e->skb->len; + if (len + e->skb->len + pad + 4 > MT76S_XMIT_BUF_SZ) break; if (mt7663s_tx_pick_quota(sdio, mcu, e->buf_sz, &pse_sz, @@ -252,6 +256,11 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q) len += iter->len; nframes++; } + + if (unlikely(pad)) { + memset(sdio->xmit_buf[qid] + len, 0, pad); + len += pad; + } next: q->first = (q->first + 1) % q->ndesc; e->done = true; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/soc.c b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c index 9aa5183c7a56..be9a69fe1b38 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/soc.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c @@ -40,10 +40,8 @@ static int mt7622_wmac_probe(struct platform_device *pdev) return irq; mem_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(mem_base)) { - dev_err(&pdev->dev, "Failed to get memory resource\n"); + if (IS_ERR(mem_base)) return PTR_ERR(mem_base); - } return mt7615_mmio_probe(&pdev->dev, mem_base, irq, mt7615e_reg_map); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c index 203256862dfd..f8d3673c2cae 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c @@ -67,6 +67,7 @@ static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev, struct mt7615_rate_desc *rate = &wrd->rate; struct mt7615_sta *sta = wrd->sta; u32 w5, w27, addr, val; + u16 idx; lockdep_assert_held(&dev->mt76.mutex); @@ -118,7 +119,11 @@ static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev, sta->rate_probe = sta->rateset[rate->rateset].probe_rate.idx != -1; - mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ + idx = sta->vif->mt76.omac_idx; + idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx; + addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx); + + mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */ val = mt76_rr(dev, MT_LPON_UTTR0); sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset; diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h index 0d58606391b0..6c889b90fd12 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h @@ -53,11 +53,25 @@ struct mt76_connac_pm { } tx_q[IEEE80211_NUM_ACS]; struct work_struct wake_work; - struct completion wake_cmpl; + wait_queue_head_t wait; + + struct { + spinlock_t lock; + u32 count; + } wake; + struct mutex mutex; struct delayed_work ps_work; unsigned long last_activity; unsigned long idle_timeout; + + struct { + unsigned long last_wake_event; + unsigned long awake_time; + unsigned long last_doze_event; + unsigned long doze_time; + unsigned int lp_wake; + } stats; }; struct mt76_connac_coredump { @@ -73,12 +87,55 @@ static inline bool is_mt7921(struct mt76_dev *dev) return mt76_chip(dev) == 0x7961; } +static inline bool is_mt7663(struct mt76_dev *dev) +{ + return mt76_chip(dev) == 0x7663; +} + int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm); void mt76_connac_power_save_sched(struct mt76_phy *phy, struct mt76_connac_pm *pm); void mt76_connac_free_pending_tx_skbs(struct mt76_connac_pm *pm, struct mt76_wcid *wcid); +static inline bool +mt76_connac_pm_ref(struct mt76_phy *phy, struct mt76_connac_pm *pm) +{ + bool ret = false; + + spin_lock_bh(&pm->wake.lock); + if (test_bit(MT76_STATE_PM, &phy->state)) + goto out; + + pm->wake.count++; + ret = true; +out: + spin_unlock_bh(&pm->wake.lock); + + return ret; +} + +static inline void +mt76_connac_pm_unref(struct mt76_connac_pm *pm) +{ + spin_lock_bh(&pm->wake.lock); + pm->wake.count--; + pm->last_activity = jiffies; + spin_unlock_bh(&pm->wake.lock); +} + +static inline bool +mt76_connac_skip_fw_pmctrl(struct mt76_phy *phy, struct mt76_connac_pm *pm) +{ + bool ret; + + spin_lock_bh(&pm->wake.lock); + ret = pm->wake.count || test_and_set_bit(MT76_STATE_PM, &phy->state); + spin_unlock_bh(&pm->wake.lock); + + return ret; +} + static inline void mt76_connac_mutex_acquire(struct mt76_dev *dev, struct mt76_connac_pm *pm) __acquires(&dev->mutex) diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c index c5f5037f5757..6f180c92d413 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c @@ -13,17 +13,14 @@ int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm) if (!mt76_is_mmio(dev)) return 0; + cancel_delayed_work_sync(&pm->ps_work); if (!test_bit(MT76_STATE_PM, &phy->state)) return 0; - if (test_bit(MT76_HW_SCANNING, &phy->state) || - test_bit(MT76_HW_SCHED_SCANNING, &phy->state)) - return 0; - - if (queue_work(dev->wq, &pm->wake_work)) - reinit_completion(&pm->wake_cmpl); - - if (!wait_for_completion_timeout(&pm->wake_cmpl, 3 * HZ)) { + queue_work(dev->wq, &pm->wake_work); + if (!wait_event_timeout(pm->wait, + !test_bit(MT76_STATE_PM, &phy->state), + 3 * HZ)) { ieee80211_wake_queues(phy->hw); return -ETIMEDOUT; } @@ -40,17 +37,15 @@ void mt76_connac_power_save_sched(struct mt76_phy *phy, if (!mt76_is_mmio(dev)) return; - if (!pm->enable || !test_bit(MT76_STATE_RUNNING, &phy->state)) + if (!pm->enable) return; pm->last_activity = jiffies; - if (test_bit(MT76_HW_SCANNING, &phy->state) || - test_bit(MT76_HW_SCHED_SCANNING, &phy->state)) - return; - - if (!test_bit(MT76_STATE_PM, &phy->state)) + if (!test_bit(MT76_STATE_PM, &phy->state)) { + cancel_delayed_work(&phy->mac_work); queue_delayed_work(dev->wq, &pm->ps_work, pm->idle_timeout); + } } EXPORT_SYMBOL_GPL(mt76_connac_power_save_sched); diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index 6cbccfb05f8b..fe0ab5e5ff81 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -287,7 +287,7 @@ mt76_connac_mcu_alloc_wtbl_req(struct mt76_dev *dev, struct mt76_wcid *wcid, &hdr.wlan_idx_hi); if (!nskb) { nskb = mt76_mcu_msg_alloc(dev, NULL, - MT76_CONNAC_WTBL_UPDATE_BA_SIZE); + MT76_CONNAC_WTBL_UPDATE_MAX_SIZE); if (!nskb) return ERR_PTR(-ENOMEM); @@ -392,6 +392,21 @@ mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif, uapsd->max_sp = sta->max_sp; } +void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb, + struct mt76_wcid *wcid, + void *sta_wtbl, void *wtbl_tlv) +{ + struct wtbl_hdr_trans *htr; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_HDR_TRANS, + sizeof(*htr), + wtbl_tlv, sta_wtbl); + htr = (struct wtbl_hdr_trans *)tlv; + htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags); +} +EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_hdr_trans_tlv); + void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev, struct sk_buff *skb, struct ieee80211_vif *vif, @@ -496,7 +511,7 @@ mt76_connac_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) if (elem->mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL) cap |= STA_REC_HE_CAP_OM; - if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU) + if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU) cap |= STA_REC_HE_CAP_AMSDU_IN_AMPDU; if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) @@ -655,7 +670,8 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif, void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb, struct ieee80211_sta *sta, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + u8 rcpi) { struct cfg80211_chan_def *chandef = &mphy->chandef; enum nl80211_band band = chandef->chan->band; @@ -704,6 +720,7 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb, phy = (struct sta_rec_phy *)tlv; phy->phy_type = mt76_connac_get_phy_mode_v2(mphy, vif, band, sta); phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates); + phy->rcpi = rcpi; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info)); ra_info = (struct sta_rec_ra_info *)tlv; @@ -808,40 +825,42 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb, EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_ht_tlv); int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct mt76_wcid *wcid, - bool enable, int cmd) + struct mt76_sta_cmd_info *info) { - struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv; struct mt76_dev *dev = phy->dev; struct wtbl_req_hdr *wtbl_hdr; struct tlv *sta_wtbl; struct sk_buff *skb; - skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid); + skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid); if (IS_ERR(skb)) return PTR_ERR(skb); - mt76_connac_mcu_sta_basic_tlv(skb, vif, sta, enable); - if (enable && sta) - mt76_connac_mcu_sta_tlv(phy, skb, sta, vif); + mt76_connac_mcu_sta_basic_tlv(skb, info->vif, info->sta, info->enable); + if (info->enable && info->sta) + mt76_connac_mcu_sta_tlv(phy, skb, info->sta, info->vif, + info->rcpi); sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv)); - wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, wcid, + wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, info->wcid, WTBL_RESET_AND_SET, sta_wtbl, &skb); - if (enable) { - mt76_connac_mcu_wtbl_generic_tlv(dev, skb, vif, sta, sta_wtbl, + if (IS_ERR(wtbl_hdr)) + return PTR_ERR(wtbl_hdr); + + if (info->enable) { + mt76_connac_mcu_wtbl_generic_tlv(dev, skb, info->vif, + info->sta, sta_wtbl, wtbl_hdr); - if (sta) - mt76_connac_mcu_wtbl_ht_tlv(dev, skb, sta, sta_wtbl, - wtbl_hdr); + if (info->sta) + mt76_connac_mcu_wtbl_ht_tlv(dev, skb, info->sta, + sta_wtbl, wtbl_hdr); } - return mt76_mcu_skb_send_msg(dev, skb, cmd, true); + return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_sta_cmd); @@ -946,6 +965,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy, switch (vif->type) { case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_AP: basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP); break; @@ -1195,6 +1215,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy, .center_chan = ieee80211_frequency_to_channel(freq1), .center_chan2 = ieee80211_frequency_to_channel(freq2), .tx_streams = hweight8(phy->antenna_mask), + .ht_op_info = 4, /* set HT 40M allowed */ .rx_streams = phy->chainmask, .short_st = true, }, @@ -1287,6 +1308,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy, case NL80211_CHAN_WIDTH_20: default: rlm_req.rlm.bw = CMD_CBW_20MHZ; + rlm_req.rlm.ht_op_info = 0; break; } @@ -1306,7 +1328,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, { struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; struct cfg80211_scan_request *sreq = &scan_req->req; - int n_ssids = 0, err, i, duration = MT76_CONNAC_SCAN_CHANNEL_TIME; + int n_ssids = 0, err, i, duration; int ext_channels_num = max_t(int, sreq->n_channels - 32, 0); struct ieee80211_channel **scan_list = sreq->channels; struct mt76_dev *mdev = phy->dev; @@ -1343,6 +1365,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, req->ssid_type_ext = n_ssids ? BIT(0) : 0; req->ssids_num = n_ssids; + duration = is_mt7921(phy->dev) ? 0 : MT76_CONNAC_SCAN_CHANNEL_TIME; /* increase channel time for passive scan */ if (!sreq->n_ssids) duration *= 2; @@ -1368,11 +1391,14 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, req->ies_len = cpu_to_le16(sreq->ie_len); } + if (is_mt7921(phy->dev)) + req->scan_func |= SCAN_FUNC_SPLIT_SCAN; + memcpy(req->bssid, sreq->bssid, ETH_ALEN); if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { get_random_mask_addr(req->random_mac, sreq->mac_addr, sreq->mac_addr_mask); - req->scan_func = 1; + req->scan_func |= SCAN_FUNC_RANDOM_MAC; } err = mt76_mcu_skb_send_msg(mdev, skb, MCU_CMD_START_HW_SCAN, false); @@ -1433,10 +1459,13 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy, req->version = 1; req->seq_num = mvif->scan_seq_num | ext_phy << 7; - if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { - get_random_mask_addr(req->random_mac, sreq->mac_addr, + if (is_mt7663(phy->dev) && + (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)) { + get_random_mask_addr(req->mt7663.random_mac, sreq->mac_addr, sreq->mac_addr_mask); req->scan_func = 1; + } else if (is_mt7921(phy->dev)) { + req->mt7921.bss_idx = mvif->idx; } req->ssids_num = sreq->n_ssids; @@ -1499,14 +1528,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_sched_scan_enable); int mt76_connac_mcu_chip_config(struct mt76_dev *dev) { - struct { - __le16 id; - u8 type; - u8 resp_type; - __le16 data_size; - __le16 resv; - u8 data[320]; - } req = { + struct mt76_connac_config req = { .resp_type = 0, }; @@ -1517,6 +1539,19 @@ int mt76_connac_mcu_chip_config(struct mt76_dev *dev) } EXPORT_SYMBOL_GPL(mt76_connac_mcu_chip_config); +int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable) +{ + struct mt76_connac_config req = { + .resp_type = 0, + }; + + snprintf(req.data, sizeof(req.data), "KeepFullPwr %d", !enable); + + return mt76_mcu_send_msg(dev, MCU_CMD_CHIP_CONFIG, &req, sizeof(req), + false); +} +EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_deep_sleep); + void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb, struct mt76_connac_coredump *coredump) { @@ -1531,6 +1566,181 @@ void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb, } EXPORT_SYMBOL_GPL(mt76_connac_mcu_coredump_event); +static void +mt76_connac_mcu_build_sku(struct mt76_dev *dev, s8 *sku, + struct mt76_power_limits *limits, + enum nl80211_band band) +{ + int max_power = is_mt7921(dev) ? 127 : 63; + int i, offset = sizeof(limits->cck); + + memset(sku, max_power, MT_SKU_POWER_LIMIT); + + if (band == NL80211_BAND_2GHZ) { + /* cck */ + memcpy(sku, limits->cck, sizeof(limits->cck)); + } + + /* ofdm */ + memcpy(&sku[offset], limits->ofdm, sizeof(limits->ofdm)); + offset += sizeof(limits->ofdm); + + /* ht */ + for (i = 0; i < 2; i++) { + memcpy(&sku[offset], limits->mcs[i], 8); + offset += 8; + } + sku[offset++] = limits->mcs[0][0]; + + /* vht */ + for (i = 0; i < ARRAY_SIZE(limits->mcs); i++) { + memcpy(&sku[offset], limits->mcs[i], + ARRAY_SIZE(limits->mcs[i])); + offset += 12; + } + + if (!is_mt7921(dev)) + return; + + /* he */ + for (i = 0; i < ARRAY_SIZE(limits->ru); i++) { + memcpy(&sku[offset], limits->ru[i], ARRAY_SIZE(limits->ru[i])); + offset += ARRAY_SIZE(limits->ru[i]); + } +} + +static int +mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy, + enum nl80211_band band) +{ + struct mt76_dev *dev = phy->dev; + int sku_len, batch_len = is_mt7921(dev) ? 8 : 16; + static const u8 chan_list_2ghz[] = { + 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14 + }; + static const u8 chan_list_5ghz[] = { + 36, 38, 40, 42, 44, 46, 48, + 50, 52, 54, 56, 58, 60, 62, + 64, 100, 102, 104, 106, 108, 110, + 112, 114, 116, 118, 120, 122, 124, + 126, 128, 132, 134, 136, 138, 140, + 142, 144, 149, 151, 153, 155, 157, + 159, 161, 165 + }; + struct mt76_connac_sku_tlv sku_tlbv; + int i, n_chan, batch_size, idx = 0; + struct mt76_power_limits limits; + const u8 *ch_list; + + sku_len = is_mt7921(dev) ? sizeof(sku_tlbv) : sizeof(sku_tlbv) - 92; + + if (band == NL80211_BAND_2GHZ) { + n_chan = ARRAY_SIZE(chan_list_2ghz); + ch_list = chan_list_2ghz; + } else { + n_chan = ARRAY_SIZE(chan_list_5ghz); + ch_list = chan_list_5ghz; + } + batch_size = DIV_ROUND_UP(n_chan, batch_len); + + for (i = 0; i < batch_size; i++) { + bool last_msg = i == batch_size - 1; + int num_ch = last_msg ? n_chan % batch_len : batch_len; + struct mt76_connac_tx_power_limit_tlv tx_power_tlv = { + .band = band == NL80211_BAND_2GHZ ? 1 : 2, + .n_chan = num_ch, + .last_msg = last_msg, + }; + struct sk_buff *skb; + int j, err, msg_len; + + msg_len = sizeof(tx_power_tlv) + num_ch * sizeof(sku_tlbv); + skb = mt76_mcu_msg_alloc(dev, NULL, msg_len); + if (!skb) + return -ENOMEM; + + BUILD_BUG_ON(sizeof(dev->alpha2) > sizeof(tx_power_tlv.alpha2)); + memcpy(tx_power_tlv.alpha2, dev->alpha2, sizeof(dev->alpha2)); + + skb_put_data(skb, &tx_power_tlv, sizeof(tx_power_tlv)); + for (j = 0; j < num_ch; j++, idx++) { + struct ieee80211_channel chan = { + .hw_value = ch_list[idx], + .band = band, + }; + + mt76_get_rate_power_limits(phy, &chan, &limits, 127); + + sku_tlbv.channel = ch_list[idx]; + mt76_connac_mcu_build_sku(dev, sku_tlbv.pwr_limit, + &limits, band); + skb_put_data(skb, &sku_tlbv, sku_len); + } + + err = mt76_mcu_skb_send_msg(dev, skb, + MCU_CMD_SET_RATE_TX_POWER, false); + if (err < 0) + return err; + } + + return 0; +} + +int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy) +{ + int err; + + err = mt76_connac_mcu_rate_txpower_band(phy, NL80211_BAND_2GHZ); + if (err < 0) + return err; + + return mt76_connac_mcu_rate_txpower_band(phy, NL80211_BAND_5GHZ); +} +EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_rate_txpower); + +int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev, + struct mt76_vif *vif, + struct ieee80211_bss_conf *info) +{ + struct sk_buff *skb; + int i, len = min_t(int, info->arp_addr_cnt, + IEEE80211_BSS_ARP_ADDR_LIST_LEN); + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt76_connac_arpns_tlv arp; + } req_hdr = { + .hdr = { + .bss_idx = vif->idx, + }, + .arp = { + .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP), + .len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)), + .ips_num = len, + .mode = 2, /* update */ + .option = 1, + }, + }; + + skb = mt76_mcu_msg_alloc(dev, NULL, + sizeof(req_hdr) + len * sizeof(__be32)); + if (!skb) + return -ENOMEM; + + skb_put_data(skb, &req_hdr, sizeof(req_hdr)); + for (i = 0; i < len; i++) { + u8 *addr = (u8 *)skb_put(skb, sizeof(__be32)); + + memcpy(addr, &info->arp_addr_list[i], sizeof(__be32)); + } + + return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_OFFLOAD, true); +} +EXPORT_SYMBOL_GPL(mt76_connac_mcu_update_arp_filter); + #ifdef CONFIG_PM const struct wiphy_wowlan_support mt76_connac_wowlan_support = { diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h index c1e1df5f7cd7..a1096861d04a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h @@ -210,7 +210,7 @@ struct wtbl_hdr_trans { __le16 len; u8 to_ds; u8 from_ds; - u8 disable_rx_trans; + u8 no_rx_trans; u8 rsv; } __packed; @@ -304,9 +304,6 @@ struct wtbl_raw { sizeof(struct tlv) + \ MT76_CONNAC_WTBL_UPDATE_MAX_SIZE) -#define MT76_CONNAC_WTBL_UPDATE_BA_SIZE (sizeof(struct wtbl_req_hdr) + \ - sizeof(struct wtbl_ba)) - enum { STA_REC_BASIC, STA_REC_RA, @@ -365,6 +362,9 @@ enum { #define NETWORK_IBSS BIT(18) #define NETWORK_WDS BIT(21) +#define SCAN_FUNC_RANDOM_MAC BIT(0) +#define SCAN_FUNC_SPLIT_SCAN BIT(5) + #define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA) #define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA) #define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P) @@ -564,6 +564,7 @@ enum { MCU_CMD_CHIP_CONFIG = MCU_CE_PREFIX | 0xca, MCU_CMD_FWLOG_2_HOST = MCU_CE_PREFIX | 0xc5, MCU_CMD_GET_WTBL = MCU_CE_PREFIX | 0xcd, + MCU_CMD_GET_TXPWR = MCU_CE_PREFIX | 0xd0, }; enum { @@ -759,11 +760,19 @@ struct mt76_connac_sched_scan_req { u8 channel_type; u8 channels_num; u8 intervals_num; - u8 scan_func; /* BIT(0) eable random mac address */ + u8 scan_func; /* MT7663: BIT(0) eable random mac address */ struct mt76_connac_mcu_scan_channel channels[64]; __le16 intervals[MT76_CONNAC_MAX_SCHED_SCAN_INTERVAL]; - u8 random_mac[ETH_ALEN]; /* valid when BIT(0) in scan_func is set */ - u8 pad2[58]; + union { + struct { + u8 random_mac[ETH_ALEN]; + u8 pad2[58]; + } mt7663; + struct { + u8 bss_idx; + u8 pad2[63]; + } mt7921; + }; } __packed; struct mt76_connac_sched_scan_done { @@ -876,6 +885,48 @@ struct mt76_connac_suspend_tlv { u8 pad[5]; } __packed; +struct mt76_sta_cmd_info { + struct ieee80211_sta *sta; + struct mt76_wcid *wcid; + + struct ieee80211_vif *vif; + + bool enable; + int cmd; + u8 rcpi; +}; + +#define MT_SKU_POWER_LIMIT 161 + +struct mt76_connac_sku_tlv { + u8 channel; + s8 pwr_limit[MT_SKU_POWER_LIMIT]; +} __packed; + +struct mt76_connac_tx_power_limit_tlv { + /* DW0 - common info*/ + u8 ver; + u8 pad0; + __le16 len; + /* DW1 - cmd hint */ + u8 n_chan; /* # channel */ + u8 band; /* 2.4GHz - 5GHz */ + u8 last_msg; + u8 pad1; + /* DW3 */ + u8 alpha2[4]; /* regulatory_request.alpha2 */ + u8 pad2[32]; +} __packed; + +struct mt76_connac_config { + __le16 id; + u8 type; + u8 resp_type; + __le16 data_size; + __le16 resv; + u8 data[320]; +} __packed; + #define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id) #define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id) @@ -917,9 +968,13 @@ void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev, struct sk_buff *skb, struct ieee80211_vif *vif, struct ieee80211_sta *sta, void *sta_wtbl, void *wtbl_tlv); +void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb, + struct mt76_wcid *wcid, + void *sta_wtbl, void *wtbl_tlv); void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb, struct ieee80211_sta *sta, - struct ieee80211_vif *vif); + struct ieee80211_vif *vif, + u8 rcpi); void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb, struct ieee80211_sta *sta, void *sta_wtbl, void *wtbl_tlv); @@ -942,10 +997,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy, struct mt76_wcid *wcid, bool enable); int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct mt76_wcid *wcid, - bool enable, int cmd); + struct mt76_sta_cmd_info *info); void mt76_connac_mcu_beacon_loss_iter(void *priv, u8 *mac, struct ieee80211_vif *vif); int mt76_connac_mcu_set_rts_thresh(struct mt76_dev *dev, u32 val, u8 band); @@ -967,6 +1019,9 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy, int mt76_connac_mcu_sched_scan_enable(struct mt76_phy *phy, struct ieee80211_vif *vif, bool enable); +int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev, + struct mt76_vif *vif, + struct ieee80211_bss_conf *info); int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_gtk_rekey_data *key); @@ -974,6 +1029,8 @@ int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend); void mt76_connac_mcu_set_suspend_iter(void *priv, u8 *mac, struct ieee80211_vif *vif); int mt76_connac_mcu_chip_config(struct mt76_dev *dev); +int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable); void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb, struct mt76_connac_coredump *coredump); +int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy); #endif /* __MT76_CONNAC_MCU_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c index 02d0aa0b815e..5847f943e8da 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c @@ -221,9 +221,9 @@ mt76x0e_remove(struct pci_dev *pdev) } static const struct pci_device_id mt76x0e_device_table[] = { - { PCI_DEVICE(0x14c3, 0x7610) }, - { PCI_DEVICE(0x14c3, 0x7630) }, - { PCI_DEVICE(0x14c3, 0x7650) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7610) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7630) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7650) }, { }, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index a593a7796d23..f2b2fa733845 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -32,7 +32,8 @@ static struct usb_device_id mt76x0_device_table[] = { { USB_DEVICE(0x20f4, 0x806b) }, /* TRENDnet TEW-806UBH */ { USB_DEVICE(0x7392, 0xc711) }, /* Devolo Wifi ac Stick */ { USB_DEVICE(0x0df6, 0x0079) }, /* Sitecom Europe B.V. ac Stick */ - { USB_DEVICE(0x2357, 0x0123) }, /* TP-LINK T2UHP */ + { USB_DEVICE(0x2357, 0x0123) }, /* TP-LINK T2UHP_US_v1 */ + { USB_DEVICE(0x2357, 0x010b) }, /* TP-LINK T2UHP_UN_v1 */ /* TP-LINK Archer T1U */ { USB_DEVICE(0x2357, 0x0105), .driver_info = 1, }, /* MT7630U */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c index 771bad60e1bc..0da37867cb64 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c @@ -770,6 +770,7 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb, void *rxi) { struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + struct ieee80211_hdr *hdr; struct mt76x02_rxwi *rxwi = rxi; struct mt76x02_sta *sta; u32 rxinfo = le32_to_cpu(rxwi->rxinfo); @@ -864,7 +865,8 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb, status->freq = dev->mphy.chandef.chan->center_freq; status->band = dev->mphy.chandef.chan->band; - status->tid = FIELD_GET(MT_RXWI_TID, tid_sn); + hdr = (struct ieee80211_hdr *)skb->data; + status->qos_ctl = *ieee80211_get_qos_ctl(hdr); status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn); return mt76x02_mac_process_rate(dev, status, rate); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c index 4aa5c36afeaf..75978820a260 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c @@ -17,9 +17,8 @@ int mt76x02_mcu_parse_response(struct mt76_dev *mdev, int cmd, u32 *rxfce; if (!skb) { - dev_err(mdev->dev, - "MCU message %d (seq %d) timed out\n", cmd, - seq); + dev_err(mdev->dev, "MCU message %02x (seq %d) timed out\n", + abs(cmd), seq); dev->mcu_timeout = 1; return -ETIMEDOUT; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index e7a46ac97f51..b50084bbe83d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -226,11 +226,11 @@ int mt76x02_dma_init(struct mt76x02_dev *dev) if (ret) return ret; - ret = mt76_init_queues(dev); + ret = mt76_init_queues(dev, mt76_dma_rx_poll); if (ret) return ret; - netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi, + netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, mt76x02_poll_tx, NAPI_POLL_WEIGHT); napi_enable(&dev->mt76.tx_napi); @@ -472,6 +472,8 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) mt76_queue_rx_reset(dev, i); } + mt76_tx_status_check(&dev->mt76, NULL, true); + mt76x02_mac_start(dev); if (dev->ed_monitor) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c index ab671e21f882..02db5d66735d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c @@ -447,6 +447,10 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) return -EOPNOTSUPP; + /* MT76x0 GTK offloading does not work with more than one VIF */ + if (is_mt76x0(dev) && !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + return -EOPNOTSUPP; + msta = sta ? (struct mt76x02_sta *)sta->drv_priv : NULL; wcid = msta ? &msta->wcid : &mvif->group_wcid; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c index ecaf85b483ac..adf288e50e21 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c @@ -10,9 +10,9 @@ #include "mt76x2.h" static const struct pci_device_id mt76x2e_device_table[] = { - { PCI_DEVICE(0x14c3, 0x7662) }, - { PCI_DEVICE(0x14c3, 0x7612) }, - { PCI_DEVICE(0x14c3, 0x7602) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7662) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7612) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7602) }, { }, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile index cc2054dffa98..40c8061787e9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile +++ b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile @@ -3,6 +3,6 @@ obj-$(CONFIG_MT7915E) += mt7915e.o mt7915e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o \ - debugfs.o + debugfs.o mmio.o mt7915e-$(CONFIG_NL80211_TESTMODE) += testmode.o diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c index 77dcd71e49a5..6a8ddeeecbe9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c @@ -124,7 +124,7 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy, range[i] = mt76_rr(dev, MT_MIB_ARNG(ext_phy, i)); for (i = 0; i < ARRAY_SIZE(bound); i++) - bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i) + 1; + bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1; seq_printf(file, "\nPhy %d\n", ext_phy); @@ -192,7 +192,7 @@ mt7915_txbf_stat_read_phy(struct mt7915_phy *phy, struct seq_file *s) } static int -mt7915_tx_stats_read(struct seq_file *file, void *data) +mt7915_tx_stats_show(struct seq_file *file, void *data) { struct mt7915_dev *dev = file->private; int stat[8], i, n; @@ -222,19 +222,7 @@ mt7915_tx_stats_read(struct seq_file *file, void *data) return 0; } -static int -mt7915_tx_stats_open(struct inode *inode, struct file *f) -{ - return single_open(f, mt7915_tx_stats_read, inode->i_private); -} - -static const struct file_operations fops_tx_stats = { - .open = mt7915_tx_stats_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .owner = THIS_MODULE, -}; +DEFINE_SHOW_ATTRIBUTE(mt7915_tx_stats); static int mt7915_read_temperature(struct seq_file *s, void *data) { @@ -311,8 +299,7 @@ mt7915_queues_read(struct seq_file *s, void *data) } static void -mt7915_puts_rate_txpower(struct seq_file *s, s8 *delta, - s8 txpower_cur, int band) +mt7915_puts_rate_txpower(struct seq_file *s, struct mt7915_phy *phy) { static const char * const sku_group_name[] = { "CCK", "OFDM", "HT20", "HT40", @@ -320,24 +307,54 @@ mt7915_puts_rate_txpower(struct seq_file *s, s8 *delta, "RU26", "RU52", "RU106", "RU242/SU20", "RU484/SU40", "RU996/SU80", "RU2x996/SU160" }; - s8 txpower[MT7915_SKU_RATE_NUM]; + struct mt7915_dev *dev = dev_get_drvdata(s->private); + bool ext_phy = phy != &dev->phy; + u32 reg_base; int i, idx = 0; - for (i = 0; i < MT7915_SKU_RATE_NUM; i++) - txpower[i] = DIV_ROUND_UP(txpower_cur + delta[i], 2); + if (!phy) + return; - for (i = 0; i < MAX_SKU_RATE_GROUP_NUM; i++) { - const struct sku_group *sku = &mt7915_sku_groups[i]; - u32 offset = sku->offset[band]; + reg_base = MT_TMAC_FP0R0(ext_phy); + seq_printf(s, "\nBand %d\n", ext_phy); + + for (i = 0; i < ARRAY_SIZE(mt7915_sku_group_len); i++) { + u8 cnt, mcs_num = mt7915_sku_group_len[i]; + s8 txpower[12]; + int j; + + if (i == SKU_HT_BW20 || i == SKU_HT_BW40) { + mcs_num = 8; + } else if (i >= SKU_VHT_BW20 && i <= SKU_VHT_BW160) { + mcs_num = 10; + } else if (i == SKU_HE_RU26) { + reg_base = MT_TMAC_FP0R18(ext_phy); + idx = 0; + } - if (!offset) { - idx += sku->len; - continue; + for (j = 0, cnt = 0; j < DIV_ROUND_UP(mcs_num, 4); j++) { + u32 val; + + if (i == SKU_VHT_BW160 && idx == 60) { + reg_base = MT_TMAC_FP0R15(ext_phy); + idx = 0; + } + + val = mt76_rr(dev, reg_base + (idx / 4) * 4); + + if (idx && idx % 4) + val >>= (idx % 4) * 8; + + while (val > 0 && cnt < mcs_num) { + s8 pwr = FIELD_GET(MT_TMAC_FP_MASK, val); + + txpower[cnt++] = pwr; + val >>= 8; + idx++; + } } - mt76_seq_puts_array(s, sku_group_name[i], - txpower + idx, sku->len); - idx += sku->len; + mt76_seq_puts_array(s, sku_group_name[i], txpower, mcs_num); } } @@ -345,24 +362,9 @@ static int mt7915_read_rate_txpower(struct seq_file *s, void *data) { struct mt7915_dev *dev = dev_get_drvdata(s->private); - struct mt76_phy *mphy = &dev->mphy; - enum nl80211_band band = mphy->chandef.chan->band; - s8 *delta = dev->rate_power[band]; - s8 txpower_base = mphy->txpower_cur - delta[MT7915_SKU_MAX_DELTA_IDX]; - - seq_puts(s, "Band 0:\n"); - mt7915_puts_rate_txpower(s, delta, txpower_base, band); - - if (dev->mt76.phy2) { - mphy = dev->mt76.phy2; - band = mphy->chandef.chan->band; - delta = dev->rate_power[band]; - txpower_base = mphy->txpower_cur - - delta[MT7915_SKU_MAX_DELTA_IDX]; - - seq_puts(s, "Band 1:\n"); - mt7915_puts_rate_txpower(s, delta, txpower_base, band); - } + + mt7915_puts_rate_txpower(s, &dev->phy); + mt7915_puts_rate_txpower(s, mt7915_ext_phy(dev)); return 0; } @@ -379,7 +381,7 @@ int mt7915_init_debugfs(struct mt7915_dev *dev) mt7915_queues_read); debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir, mt7915_queues_acq); - debugfs_create_file("tx_stats", 0400, dir, dev, &fops_tx_stats); + debugfs_create_file("tx_stats", 0400, dir, dev, &mt7915_tx_stats_fops); debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug); debugfs_create_file("implicit_txbf", 0600, dir, dev, &fops_implicit_txbf); @@ -412,7 +414,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_fixed_rate, NULL, mt7915_sta_fixed_rate_set, "%llx\n"); static int -mt7915_sta_stats_read(struct seq_file *s, void *data) +mt7915_sta_stats_show(struct seq_file *s, void *data) { struct ieee80211_sta *sta = s->private; struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; @@ -455,24 +457,12 @@ mt7915_sta_stats_read(struct seq_file *s, void *data) return 0; } -static int -mt7915_sta_stats_open(struct inode *inode, struct file *f) -{ - return single_open(f, mt7915_sta_stats_read, inode->i_private); -} - -static const struct file_operations fops_sta_stats = { - .open = mt7915_sta_stats_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .owner = THIS_MODULE, -}; +DEFINE_SHOW_ATTRIBUTE(mt7915_sta_stats); void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir) { debugfs_create_file("fixed_rate", 0600, dir, sta, &fops_fixed_rate); - debugfs_create_file("stats", 0400, dir, sta, &fops_sta_stats); + debugfs_create_file("stats", 0400, dir, sta, &mt7915_sta_stats_fops); } #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c index bf51304a770b..11d0b760abd7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c @@ -110,121 +110,13 @@ void mt7915_dma_prefetch(struct mt7915_dev *dev) __mt7915_dma_prefetch(dev, MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE); } -static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr) -{ - static const struct { - u32 phys; - u32 mapped; - u32 size; - } fixed_map[] = { - { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */ - { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */ - { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */ - { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */ - { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */ - { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */ - { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */ - { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */ - { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */ - { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */ - { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */ - { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */ - { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */ - { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */ - { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */ - { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */ - { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */ - { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */ - { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */ - { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */ - { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */ - { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */ - { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */ - { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */ - { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */ - { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */ - { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */ - { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */ - { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */ - { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */ - { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */ - { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */ - { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */ - { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */ - { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */ - { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */ - { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */ - { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */ - }; - int i; - - if (addr < 0x100000) - return addr; - - for (i = 0; i < ARRAY_SIZE(fixed_map); i++) { - u32 ofs; - - if (addr < fixed_map[i].phys) - continue; - - ofs = addr - fixed_map[i].phys; - if (ofs > fixed_map[i].size) - continue; - - return fixed_map[i].mapped + ofs; - } - - if ((addr >= 0x18000000 && addr < 0x18c00000) || - (addr >= 0x70000000 && addr < 0x78000000) || - (addr >= 0x7c000000 && addr < 0x7c400000)) - return mt7915_reg_map_l1(dev, addr); - - return mt7915_reg_map_l2(dev, addr); -} - -static u32 mt7915_rr(struct mt76_dev *mdev, u32 offset) -{ - struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); - u32 addr = __mt7915_reg_addr(dev, offset); - - return dev->bus_ops->rr(mdev, addr); -} - -static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val) -{ - struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); - u32 addr = __mt7915_reg_addr(dev, offset); - - dev->bus_ops->wr(mdev, addr, val); -} - -static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val) -{ - struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); - u32 addr = __mt7915_reg_addr(dev, offset); - - return dev->bus_ops->rmw(mdev, addr, mask, val); -} - int mt7915_dma_init(struct mt7915_dev *dev) { /* Increase buffer size to receive large VHT/HE MPDUs */ - struct mt76_bus_ops *bus_ops; int rx_buf_size = MT_RX_BUF_SIZE * 2; u32 hif1_ofs = 0; int ret; - dev->bus_ops = dev->mt76.bus; - bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops), - GFP_KERNEL); - if (!bus_ops) - return -ENOMEM; - - bus_ops->rr = mt7915_rr; - bus_ops->wr = mt7915_wr; - bus_ops->rmw = mt7915_rmw; - dev->mt76.bus = bus_ops; - mt76_dma_attach(&dev->mt76); if (dev->hif2) @@ -321,11 +213,11 @@ int mt7915_dma_init(struct mt7915_dev *dev) return ret; } - ret = mt76_init_queues(dev); + ret = mt76_init_queues(dev, mt76_dma_rx_poll); if (ret < 0) return ret; - netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi, + netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, mt7915_poll_tx, NAPI_POLL_WEIGHT); napi_enable(&dev->mt76.tx_napi); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c index 660398ac53c2..8ededf2e5279 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c @@ -8,12 +8,29 @@ static u32 mt7915_eeprom_read(struct mt7915_dev *dev, u32 offset) { u8 *data = dev->mt76.eeprom.data; - if (data[offset] == 0xff) + if (data[offset] == 0xff && !dev->flash_mode) mt7915_mcu_get_eeprom(dev, offset); return data[offset]; } +static int mt7915_eeprom_load_precal(struct mt7915_dev *dev) +{ + struct mt76_dev *mdev = &dev->mt76; + u32 val; + + val = mt7915_eeprom_read(dev, MT_EE_DO_PRE_CAL); + if (val != (MT_EE_WIFI_CAL_DPD | MT_EE_WIFI_CAL_GROUP)) + return 0; + + val = MT_EE_CAL_GROUP_SIZE + MT_EE_CAL_DPD_SIZE; + dev->cal = devm_kzalloc(mdev->dev, val, GFP_KERNEL); + if (!dev->cal) + return -ENOMEM; + + return mt76_get_of_eeprom(mdev, dev->cal, MT_EE_PRECAL, val); +} + static int mt7915_eeprom_load(struct mt7915_dev *dev) { int ret; @@ -22,12 +39,14 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev) if (ret < 0) return ret; - if (ret) + if (ret) { dev->flash_mode = true; - else + ret = mt7915_eeprom_load_precal(dev); + } else { memset(dev->mt76.eeprom.data, -1, MT7915_EEPROM_SIZE); + } - return 0; + return ret; } static int mt7915_check_eeprom(struct mt7915_dev *dev) @@ -124,7 +143,7 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev, struct ieee80211_channel *chan, u8 chain_idx) { - int index; + int index, target_power; bool tssi_on; if (chain_idx > 3) @@ -133,131 +152,56 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev, tssi_on = mt7915_tssi_enabled(dev, chan->band); if (chan->band == NL80211_BAND_2GHZ) { - index = MT_EE_TX0_POWER_2G + chain_idx * 3 + !tssi_on; - } else { - int group = tssi_on ? - mt7915_get_channel_group(chan->hw_value) : 8; - - index = MT_EE_TX0_POWER_5G + chain_idx * 12 + group; - } - - return mt7915_eeprom_read(dev, index); -} + index = MT_EE_TX0_POWER_2G + chain_idx * 3; + target_power = mt7915_eeprom_read(dev, index); -static const u8 sku_cck_delta_map[] = { - SKU_CCK_GROUP0, - SKU_CCK_GROUP0, - SKU_CCK_GROUP1, - SKU_CCK_GROUP1, -}; + if (!tssi_on) + target_power += mt7915_eeprom_read(dev, index + 1); + } else { + int group = mt7915_get_channel_group(chan->hw_value); -static const u8 sku_ofdm_delta_map[] = { - SKU_OFDM_GROUP0, - SKU_OFDM_GROUP0, - SKU_OFDM_GROUP1, - SKU_OFDM_GROUP1, - SKU_OFDM_GROUP2, - SKU_OFDM_GROUP2, - SKU_OFDM_GROUP3, - SKU_OFDM_GROUP4, -}; + index = MT_EE_TX0_POWER_5G + chain_idx * 12; + target_power = mt7915_eeprom_read(dev, index + group); -static const u8 sku_mcs_delta_map[] = { - SKU_MCS_GROUP0, - SKU_MCS_GROUP1, - SKU_MCS_GROUP1, - SKU_MCS_GROUP2, - SKU_MCS_GROUP2, - SKU_MCS_GROUP3, - SKU_MCS_GROUP4, - SKU_MCS_GROUP5, - SKU_MCS_GROUP6, - SKU_MCS_GROUP7, - SKU_MCS_GROUP8, - SKU_MCS_GROUP9, -}; + if (!tssi_on) + target_power += mt7915_eeprom_read(dev, index + 8); + } -#define SKU_GROUP(_mode, _len, _ofs_2g, _ofs_5g, _map) \ - [_mode] = { \ - .len = _len, \ - .offset = { \ - _ofs_2g, \ - _ofs_5g, \ - }, \ - .delta_map = _map \ + return target_power; } -const struct sku_group mt7915_sku_groups[] = { - SKU_GROUP(SKU_CCK, 4, 0x252, 0, sku_cck_delta_map), - SKU_GROUP(SKU_OFDM, 8, 0x254, 0x29d, sku_ofdm_delta_map), - - SKU_GROUP(SKU_HT_BW20, 8, 0x259, 0x2a2, sku_mcs_delta_map), - SKU_GROUP(SKU_HT_BW40, 9, 0x262, 0x2ab, sku_mcs_delta_map), - SKU_GROUP(SKU_VHT_BW20, 12, 0x259, 0x2a2, sku_mcs_delta_map), - SKU_GROUP(SKU_VHT_BW40, 12, 0x262, 0x2ab, sku_mcs_delta_map), - SKU_GROUP(SKU_VHT_BW80, 12, 0, 0x2b4, sku_mcs_delta_map), - SKU_GROUP(SKU_VHT_BW160, 12, 0, 0, sku_mcs_delta_map), - - SKU_GROUP(SKU_HE_RU26, 12, 0x27f, 0x2dd, sku_mcs_delta_map), - SKU_GROUP(SKU_HE_RU52, 12, 0x289, 0x2e7, sku_mcs_delta_map), - SKU_GROUP(SKU_HE_RU106, 12, 0x293, 0x2f1, sku_mcs_delta_map), - SKU_GROUP(SKU_HE_RU242, 12, 0x26b, 0x2bf, sku_mcs_delta_map), - SKU_GROUP(SKU_HE_RU484, 12, 0x275, 0x2c9, sku_mcs_delta_map), - SKU_GROUP(SKU_HE_RU996, 12, 0, 0x2d3, sku_mcs_delta_map), - SKU_GROUP(SKU_HE_RU2x996, 12, 0, 0, sku_mcs_delta_map), -}; - -static s8 -mt7915_get_sku_delta(struct mt7915_dev *dev, u32 addr) +s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band) { - u32 val = mt7915_eeprom_read(dev, addr); - s8 delta = FIELD_GET(SKU_DELTA_VAL, val); + u32 val; + s8 delta; - if (!(val & SKU_DELTA_EN)) - return 0; + if (band == NL80211_BAND_2GHZ) + val = mt7915_eeprom_read(dev, MT_EE_RATE_DELTA_2G); + else + val = mt7915_eeprom_read(dev, MT_EE_RATE_DELTA_5G); - return val & SKU_DELTA_ADD ? delta : -delta; -} + if (!(val & MT_EE_RATE_DELTA_EN)) + return 0; -static void -mt7915_eeprom_init_sku_band(struct mt7915_dev *dev, - struct ieee80211_supported_band *sband) -{ - int i, band = sband->band; - s8 *rate_power = dev->rate_power[band], max_delta = 0; - u8 idx = 0; - - for (i = 0; i < ARRAY_SIZE(mt7915_sku_groups); i++) { - const struct sku_group *sku = &mt7915_sku_groups[i]; - u32 offset = sku->offset[band]; - int j; - - if (!offset) { - idx += sku->len; - continue; - } - - rate_power[idx++] = mt7915_get_sku_delta(dev, offset); - if (rate_power[idx - 1] > max_delta) - max_delta = rate_power[idx - 1]; - - if (i == SKU_HT_BW20 || i == SKU_VHT_BW20) - offset += 1; - - for (j = 1; j < sku->len; j++) { - u32 addr = offset + sku->delta_map[j]; - - rate_power[idx++] = mt7915_get_sku_delta(dev, addr); - if (rate_power[idx - 1] > max_delta) - max_delta = rate_power[idx - 1]; - } - } + delta = FIELD_GET(MT_EE_RATE_DELTA_MASK, val); - rate_power[idx] = max_delta; + return val & MT_EE_RATE_DELTA_SIGN ? delta : -delta; } -void mt7915_eeprom_init_sku(struct mt7915_dev *dev) -{ - mt7915_eeprom_init_sku_band(dev, &dev->mphy.sband_2g.sband); - mt7915_eeprom_init_sku_band(dev, &dev->mphy.sband_5g.sband); -} +const u8 mt7915_sku_group_len[] = { + [SKU_CCK] = 4, + [SKU_OFDM] = 8, + [SKU_HT_BW20] = 8, + [SKU_HT_BW40] = 9, + [SKU_VHT_BW20] = 12, + [SKU_VHT_BW40] = 12, + [SKU_VHT_BW80] = 12, + [SKU_VHT_BW160] = 12, + [SKU_HE_RU26] = 12, + [SKU_HE_RU52] = 12, + [SKU_HE_RU106] = 12, + [SKU_HE_RU242] = 12, + [SKU_HE_RU484] = 12, + [SKU_HE_RU996] = 12, + [SKU_HE_RU2x996] = 12 +}; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h index 3ee8c27bb61b..033fb592bdf0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h @@ -17,14 +17,25 @@ enum mt7915_eeprom_field { MT_EE_MAC_ADDR = 0x004, MT_EE_MAC_ADDR2 = 0x00a, MT_EE_DDIE_FT_VERSION = 0x050, + MT_EE_DO_PRE_CAL = 0x062, MT_EE_WIFI_CONF = 0x190, + MT_EE_RATE_DELTA_2G = 0x252, + MT_EE_RATE_DELTA_5G = 0x29d, MT_EE_TX0_POWER_2G = 0x2fc, MT_EE_TX0_POWER_5G = 0x34b, MT_EE_ADIE_FT_VERSION = 0x9a0, - __MT_EE_MAX = 0xe00 + __MT_EE_MAX = 0xe00, + /* 0xe10 ~ 0x5780 used to save group cal data */ + MT_EE_PRECAL = 0xe10 }; +#define MT_EE_WIFI_CAL_GROUP BIT(0) +#define MT_EE_WIFI_CAL_DPD GENMASK(2, 1) +#define MT_EE_CAL_UNIT 1024 +#define MT_EE_CAL_GROUP_SIZE (44 * MT_EE_CAL_UNIT) +#define MT_EE_CAL_DPD_SIZE (54 * MT_EE_CAL_UNIT) + #define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0) #define MT_EE_WIFI_CONF0_BAND_SEL GENMASK(7, 6) #define MT_EE_WIFI_CONF1_BAND_SEL GENMASK(7, 6) @@ -34,6 +45,10 @@ enum mt7915_eeprom_field { #define MT_EE_WIFI_CONF7_TSSI0_5G BIT(2) #define MT_EE_WIFI_CONF7_TSSI1_5G BIT(4) +#define MT_EE_RATE_DELTA_MASK GENMASK(5, 0) +#define MT_EE_RATE_DELTA_SIGN BIT(6) +#define MT_EE_RATE_DELTA_EN BIT(7) + enum mt7915_eeprom_band { MT_EE_BAND_SEL_DEFAULT, MT_EE_BAND_SEL_5GHZ, @@ -41,32 +56,6 @@ enum mt7915_eeprom_band { MT_EE_BAND_SEL_DUAL, }; -#define SKU_DELTA_VAL GENMASK(5, 0) -#define SKU_DELTA_ADD BIT(6) -#define SKU_DELTA_EN BIT(7) - -enum mt7915_sku_delta_group { - SKU_CCK_GROUP0, - SKU_CCK_GROUP1, - - SKU_OFDM_GROUP0 = 0, - SKU_OFDM_GROUP1, - SKU_OFDM_GROUP2, - SKU_OFDM_GROUP3, - SKU_OFDM_GROUP4, - - SKU_MCS_GROUP0 = 0, - SKU_MCS_GROUP1, - SKU_MCS_GROUP2, - SKU_MCS_GROUP3, - SKU_MCS_GROUP4, - SKU_MCS_GROUP5, - SKU_MCS_GROUP6, - SKU_MCS_GROUP7, - SKU_MCS_GROUP8, - SKU_MCS_GROUP9, -}; - enum mt7915_sku_rate_group { SKU_CCK, SKU_OFDM, @@ -86,12 +75,6 @@ enum mt7915_sku_rate_group { MAX_SKU_RATE_GROUP_NUM, }; -struct sku_group { - u8 len; - u16 offset[2]; - const u8 *delta_map; -}; - static inline int mt7915_get_channel_group(int channel) { @@ -124,6 +107,6 @@ mt7915_tssi_enabled(struct mt7915_dev *dev, enum nl80211_band band) return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF7_TSSI0_2G; } -extern const struct sku_group mt7915_sku_groups[]; +extern const u8 mt7915_sku_group_len[MAX_SKU_RATE_GROUP_NUM]; #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c index ad4e5b95158b..822f3aa6bb8b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -4,6 +4,7 @@ #include <linux/etherdevice.h> #include "mt7915.h" #include "mac.h" +#include "mcu.h" #include "eeprom.h" #define CCK_RATE(_idx, _rate) { \ @@ -67,6 +68,39 @@ static const struct ieee80211_iface_combination if_comb[] = { }; static void +mt7915_init_txpower(struct mt7915_dev *dev, + struct ieee80211_supported_band *sband) +{ + int i, n_chains = hweight8(dev->mphy.antenna_mask); + int nss_delta = mt76_tx_power_nss_delta(n_chains); + int pwr_delta = mt7915_eeprom_get_power_delta(dev, sband->band); + struct mt76_power_limits limits; + + for (i = 0; i < sband->n_channels; i++) { + struct ieee80211_channel *chan = &sband->channels[i]; + u32 target_power = 0; + int j; + + for (j = 0; j < n_chains; j++) { + u32 val; + + val = mt7915_eeprom_get_target_power(dev, chan, j); + target_power = max(target_power, val); + } + + target_power += pwr_delta; + target_power = mt76_get_rate_power_limits(&dev->mphy, chan, + &limits, + target_power); + target_power += nss_delta; + target_power = DIV_ROUND_UP(target_power, 2); + chan->max_power = min_t(int, chan->max_reg_power, + target_power); + chan->orig_mpwr = target_power; + } +} + +static void mt7915_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request) { @@ -76,8 +110,12 @@ mt7915_regd_notifier(struct wiphy *wiphy, struct mt7915_phy *phy = mphy->priv; struct cfg80211_chan_def *chandef = &mphy->chandef; + memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2)); dev->mt76.region = request->dfs_region; + mt7915_init_txpower(dev, &mphy->sband_2g.sband); + mt7915_init_txpower(dev, &mphy->sband_5g.sband); + if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR)) return; @@ -93,6 +131,10 @@ mt7915_init_wiphy(struct ieee80211_hw *hw) hw->queues = 4; hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; + hw->netdev_features = NETIF_F_RXCSUM; + + hw->radiotap_timestamp.units_pos = + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US; phy->slottime = 9; @@ -108,9 +150,28 @@ mt7915_init_wiphy(struct ieee80211_hw *hw) ieee80211_hw_set(hw, HAS_RATE_CONTROL); ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD); + ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD); ieee80211_hw_set(hw, WANT_MONITOR_VIF); hw->max_tx_fragments = 4; + + if (phy->mt76->cap.has_2ghz) + phy->mt76->sband_2g.sband.ht_cap.cap |= + IEEE80211_HT_CAP_LDPC_CODING | + IEEE80211_HT_CAP_MAX_AMSDU; + + if (phy->mt76->cap.has_5ghz) { + phy->mt76->sband_5g.sband.ht_cap.cap |= + IEEE80211_HT_CAP_LDPC_CODING | + IEEE80211_HT_CAP_MAX_AMSDU; + phy->mt76->sband_5g.sband.vht_cap.cap |= + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 | + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; + } + + mt76_set_stream_caps(phy->mt76, true); + mt7915_set_stream_vht_txbf_caps(phy); + mt7915_set_stream_he_caps(phy); } static void @@ -153,16 +214,14 @@ static void mt7915_mac_init(struct mt7915_dev *dev) int i; mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, 1536); - /* disable hardware de-agg */ - mt76_clear(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN); + /* enable hardware de-agg */ + mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN); for (i = 0; i < MT7915_WTBL_SIZE; i++) mt7915_mac_wtbl_update(dev, i, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); for (i = 0; i < 2; i++) mt7915_mac_init_band(dev, i); - - mt7915_mcu_set_rts_thresh(&dev->phy, 0x92b); } static int mt7915_txbf_init(struct mt7915_dev *dev) @@ -185,38 +244,6 @@ static int mt7915_txbf_init(struct mt7915_dev *dev) return mt7915_mcu_set_txbf_type(dev); } -static void -mt7915_init_txpower_band(struct mt7915_dev *dev, - struct ieee80211_supported_band *sband) -{ - int i, n_chains = hweight8(dev->mphy.antenna_mask); - - for (i = 0; i < sband->n_channels; i++) { - struct ieee80211_channel *chan = &sband->channels[i]; - u32 target_power = 0; - int j; - - for (j = 0; j < n_chains; j++) { - u32 val; - - val = mt7915_eeprom_get_target_power(dev, chan, j); - target_power = max(target_power, val); - } - - chan->max_power = min_t(int, chan->max_reg_power, - target_power / 2); - chan->orig_mpwr = target_power / 2; - } -} - -static void mt7915_init_txpower(struct mt7915_dev *dev) -{ - mt7915_init_txpower_band(dev, &dev->mphy.sband_2g.sband); - mt7915_init_txpower_band(dev, &dev->mphy.sband_5g.sband); - - mt7915_eeprom_init_sku(dev); -} - static int mt7915_register_ext_phy(struct mt7915_dev *dev) { struct mt7915_phy *phy = mt7915_ext_phy(dev); @@ -238,22 +265,17 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev) phy->mt76 = mphy; mphy->chainmask = dev->chainmask & ~dev->mphy.chainmask; mphy->antenna_mask = BIT(hweight8(mphy->chainmask)) - 1; - mt7915_init_wiphy(mphy->hw); INIT_LIST_HEAD(&phy->stats_list); INIT_DELAYED_WORK(&mphy->mac_work, mt7915_mac_work); mt7915_eeprom_parse_band_config(phy); - mt7915_set_stream_vht_txbf_caps(phy); - mt7915_set_stream_he_caps(phy); + mt7915_init_wiphy(mphy->hw); memcpy(mphy->macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR2, ETH_ALEN); mt76_eeprom_override(mphy); - /* The second interface does not get any packets unless it has a vif */ - ieee80211_hw_set(mphy->hw, WANT_MONITOR_VIF); - ret = mt7915_init_tx_queues(phy, MT7915_TXQ_BAND1, MT7915_TX_RING_SIZE); if (ret) @@ -278,9 +300,48 @@ static void mt7915_init_work(struct work_struct *work) mt7915_mcu_set_eeprom(dev); mt7915_mac_init(dev); - mt7915_init_txpower(dev); + mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband); + mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband); mt7915_txbf_init(dev); - mt7915_register_ext_phy(dev); +} + +static void mt7915_wfsys_reset(struct mt7915_dev *dev) +{ + u32 val = MT_TOP_PWR_KEY | MT_TOP_PWR_SW_PWR_ON | MT_TOP_PWR_PWR_ON; + +#define MT_MCU_DUMMY_RANDOM GENMASK(15, 0) +#define MT_MCU_DUMMY_DEFAULT GENMASK(31, 16) + + mt76_wr(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_RANDOM); + + /* change to software control */ + val |= MT_TOP_PWR_SW_RST; + mt76_wr(dev, MT_TOP_PWR_CTRL, val); + + /* reset wfsys */ + val &= ~MT_TOP_PWR_SW_RST; + mt76_wr(dev, MT_TOP_PWR_CTRL, val); + + /* release wfsys then mcu re-excutes romcode */ + val |= MT_TOP_PWR_SW_RST; + mt76_wr(dev, MT_TOP_PWR_CTRL, val); + + /* switch to hw control */ + val &= ~MT_TOP_PWR_SW_RST; + val |= MT_TOP_PWR_HW_CTRL; + mt76_wr(dev, MT_TOP_PWR_CTRL, val); + + /* check whether mcu resets to default */ + if (!mt76_poll_msec(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_DEFAULT, + MT_MCU_DUMMY_DEFAULT, 1000)) { + dev_err(dev->mt76.dev, "wifi subsystem reset failure\n"); + return; + } + + /* wfsys reset won't clear host registers */ + mt76_clear(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE); + + msleep(100); } static int mt7915_init_hardware(struct mt7915_dev *dev) @@ -290,10 +351,12 @@ static int mt7915_init_hardware(struct mt7915_dev *dev) mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); INIT_WORK(&dev->init_work, mt7915_init_work); - spin_lock_init(&dev->token_lock); - idr_init(&dev->token); + dev->dbdc_support = !!(mt76_rr(dev, MT_HW_BOUND) & BIT(5)); - dev->dbdc_support = !!(mt7915_l1_rr(dev, MT_HW_BOUND) & BIT(5)); + /* If MCU was already running, it is likely in a bad state */ + if (mt76_get_field(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE) > + FW_STATE_FW_DOWNLOAD) + mt7915_wfsys_reset(dev); ret = mt7915_dma_init(dev); if (ret) @@ -308,13 +371,26 @@ static int mt7915_init_hardware(struct mt7915_dev *dev) mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE); ret = mt7915_mcu_init(dev); - if (ret) - return ret; + if (ret) { + /* Reset and try again */ + mt7915_wfsys_reset(dev); + + ret = mt7915_mcu_init(dev); + if (ret) + return ret; + } ret = mt7915_eeprom_init(dev); if (ret < 0) return ret; + + if (dev->flash_mode) { + ret = mt7915_mcu_apply_group_cal(dev); + if (ret) + return ret; + } + /* Beacon and mgmt frames should occupy wcid 0 */ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA - 1); if (idx) @@ -330,8 +406,14 @@ static int mt7915_init_hardware(struct mt7915_dev *dev) void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy) { - int nss = hweight8(phy->mt76->chainmask); - u32 *cap = &phy->mt76->sband_5g.sband.vht_cap.cap; + int nss; + u32 *cap; + + if (!phy->mt76->cap.has_5ghz) + return; + + nss = hweight8(phy->mt76->chainmask); + cap = &phy->mt76->sband_5g.sband.vht_cap.cap; *cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | @@ -370,8 +452,8 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap, IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK; elem->phy_cap_info[5] &= ~c; - c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB | - IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB; + c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB | + IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB; elem->phy_cap_info[6] &= ~c; elem->phy_cap_info[7] &= ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK; @@ -408,8 +490,8 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap, c = (nss - 1) | (max_t(int, le16_to_cpu(mcs->tx_mcs_160), 1) << 3); elem->phy_cap_info[5] |= c; - c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB | - IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB; + c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB | + IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB; elem->phy_cap_info[6] |= c; /* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */ @@ -476,9 +558,9 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band, IEEE80211_HE_MAC_CAP0_HTC_HE; he_cap_elem->mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | - IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED; + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3; he_cap_elem->mac_cap_info[4] = - IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU; + IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU; if (band == NL80211_BAND_2GHZ) he_cap_elem->phy_cap_info[0] = @@ -535,7 +617,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band, IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE | IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT; he_cap_elem->phy_cap_info[7] |= - IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR | + IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP | IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI; he_cap_elem->phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | @@ -635,25 +717,14 @@ int mt7915_register_device(struct mt7915_dev *dev) return ret; mt7915_init_wiphy(hw); - dev->mphy.sband_2g.sband.ht_cap.cap |= - IEEE80211_HT_CAP_LDPC_CODING | - IEEE80211_HT_CAP_MAX_AMSDU; - dev->mphy.sband_5g.sband.ht_cap.cap |= - IEEE80211_HT_CAP_LDPC_CODING | - IEEE80211_HT_CAP_MAX_AMSDU; - dev->mphy.sband_5g.sband.vht_cap.cap |= - IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 | - IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; + if (!dev->dbdc_support) dev->mphy.sband_5g.sband.vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_160 | IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ; + dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask; dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask; - - mt76_set_stream_caps(&dev->mphy, true); - mt7915_set_stream_vht_txbf_caps(&dev->phy); - mt7915_set_stream_he_caps(&dev->phy); dev->phy.dfs_state = -1; #ifdef CONFIG_NL80211_TESTMODE @@ -667,6 +738,10 @@ int mt7915_register_device(struct mt7915_dev *dev) ieee80211_queue_work(mt76_hw(dev), &dev->init_work); + ret = mt7915_register_ext_phy(dev); + if (ret) + return ret; + return mt7915_init_debugfs(dev); } @@ -675,9 +750,8 @@ void mt7915_unregister_device(struct mt7915_dev *dev) mt7915_unregister_ext_phy(dev); mt76_unregister_device(&dev->mt76); mt7915_mcu_exit(dev); - mt7915_dma_cleanup(dev); - mt7915_tx_token_put(dev); + mt7915_dma_cleanup(dev); mt76_free_device(&dev->mt76); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index e5a258958ac9..7a9759fb79d8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -317,11 +317,18 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) __le32 *rxd = (__le32 *)skb->data; __le32 *rxv = NULL; u32 mode = 0; + u32 rxd0 = le32_to_cpu(rxd[0]); u32 rxd1 = le32_to_cpu(rxd[1]); u32 rxd2 = le32_to_cpu(rxd[2]); u32 rxd3 = le32_to_cpu(rxd[3]); + u32 rxd4 = le32_to_cpu(rxd[4]); + u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; bool unicast, insert_ccmp_hdr = false; - u8 remove_pad; + u8 remove_pad, amsdu_info; + bool hdr_trans; + u16 seq_ctrl = 0; + u8 qos_ctl = 0; + __le16 fc = 0; int i, idx; memset(status, 0, sizeof(*status)); @@ -338,8 +345,12 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) return -EINVAL; + if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) + return -EINVAL; + unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); + hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; status->wcid = mt7915_rx_get_wcid(dev, idx, unicast); if (status->wcid) { @@ -362,6 +373,9 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) if (!sband->channels) return -EINVAL; + if ((rxd0 & csum_mask) == csum_mask) + skb->ip_summed = CHECKSUM_UNNECESSARY; + if (rxd1 & MT_RXD1_NORMAL_FCS_ERR) status->flag |= RX_FLAG_FAILED_FCS_CRC; @@ -375,19 +389,6 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; } - if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { - status->flag |= RX_FLAG_AMPDU_DETAILS; - - /* all subframes of an A-MPDU have the same timestamp */ - if (phy->rx_ampdu_ts != rxd[14]) { - if (!++phy->ampdu_ref) - phy->ampdu_ref++; - } - phy->rx_ampdu_ts = rxd[14]; - - status->ampdu_ref = phy->ampdu_ref; - } - remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) @@ -395,6 +396,13 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) rxd += 6; if (rxd1 & MT_RXD1_NORMAL_GROUP_4) { + u32 v0 = le32_to_cpu(rxd[0]); + u32 v2 = le32_to_cpu(rxd[2]); + + fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0)); + qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2); + seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2); + rxd += 4; if ((u8 *)rxd - skb->data >= skb->len) return -EINVAL; @@ -419,6 +427,22 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) } if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { + status->timestamp = le32_to_cpu(rxd[0]); + status->flag |= RX_FLAG_MACTIME_START; + + if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { + status->flag |= RX_FLAG_AMPDU_DETAILS; + + /* all subframes of an A-MPDU have the same timestamp */ + if (phy->rx_ampdu_ts != status->timestamp) { + if (!++phy->ampdu_ref) + phy->ampdu_ref++; + } + phy->rx_ampdu_ts = status->timestamp; + + status->ampdu_ref = phy->ampdu_ref; + } + rxd += 2; if ((u8 *)rxd - skb->data >= skb->len) return -EINVAL; @@ -541,23 +565,47 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad); - if (insert_ccmp_hdr) { + amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4); + status->amsdu = !!amsdu_info; + if (status->amsdu) { + status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME; + status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; + if (!hdr_trans) { + memmove(skb->data + 2, skb->data, + ieee80211_get_hdrlen_from_skb(skb)); + skb_pull(skb, 2); + } + } + + if (insert_ccmp_hdr && !hdr_trans) { u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); mt76_insert_ccmp_hdr(skb, key_id); } + if (!hdr_trans) { + hdr = mt76_skb_get_hdr(skb); + fc = hdr->frame_control; + if (ieee80211_is_data_qos(fc)) { + seq_ctrl = le16_to_cpu(hdr->seq_ctrl); + qos_ctl = *ieee80211_get_qos_ctl(hdr); + } + } else { + status->flag &= ~(RX_FLAG_RADIOTAP_HE | + RX_FLAG_RADIOTAP_HE_MU); + status->flag |= RX_FLAG_8023; + } + if (rxv && status->flag & RX_FLAG_RADIOTAP_HE) mt7915_mac_decode_he_radiotap(skb, status, rxv, mode); - hdr = mt76_skb_get_hdr(skb); - if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control)) + if (!status->wcid || !ieee80211_is_data_qos(fc)) return 0; status->aggr = unicast && - !ieee80211_is_qos_nullfunc(hdr->frame_control); - status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; - status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); + !ieee80211_is_qos_nullfunc(fc); + status->qos_ctl = qos_ctl; + status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); return 0; } @@ -613,19 +661,18 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi, { #ifdef CONFIG_NL80211_TESTMODE struct mt76_testmode_data *td = &phy->mt76->test; + const struct ieee80211_rate *r; + u8 bw, mode, nss = td->tx_rate_nss; u8 rate_idx = td->tx_rate_idx; - u8 nss = td->tx_rate_nss; - u8 bw, mode; u16 rateval = 0; u32 val; + bool cck = false; + int band; if (skb != phy->mt76->test.tx_skb) return; switch (td->tx_rate_mode) { - case MT76_TM_TX_MODE_CCK: - mode = MT_PHY_TYPE_CCK; - break; case MT76_TM_TX_MODE_HT: nss = 1 + (rate_idx >> 3); mode = MT_PHY_TYPE_HT; @@ -645,7 +692,20 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi, case MT76_TM_TX_MODE_HE_MU: mode = MT_PHY_TYPE_HE_MU; break; + case MT76_TM_TX_MODE_CCK: + cck = true; + fallthrough; case MT76_TM_TX_MODE_OFDM: + band = phy->mt76->chandef.chan->band; + if (band == NL80211_BAND_2GHZ && !cck) + rate_idx += 4; + + r = &phy->mt76->hw->wiphy->bands[band]->bitrates[rate_idx]; + val = cck ? r->hw_value_short : r->hw_value; + + mode = val >> 8; + rate_idx = val & 0xff; + break; default: mode = MT_PHY_TYPE_OFDM; break; @@ -700,9 +760,10 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi, if (mode >= MT_PHY_TYPE_HE_SU) val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf); - if (td->tx_rate_ldpc || bw > 0) + if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU)) val |= MT_TXD6_LDPC; + txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID); txwi[6] |= cpu_to_le32(val); txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX, phy->test.spe_idx)); @@ -913,26 +974,6 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb); } -static void -mt7915_set_tx_blocked(struct mt7915_dev *dev, bool blocked) -{ - struct mt76_phy *mphy = &dev->mphy, *mphy2 = dev->mt76.phy2; - struct mt76_queue *q, *q2 = NULL; - - q = mphy->q_tx[0]; - if (blocked == q->blocked) - return; - - q->blocked = blocked; - if (mphy2) { - q2 = mphy2->q_tx[0]; - q2->blocked = blocked; - } - - if (!blocked) - mt76_worker_schedule(&dev->mt76.tx_worker); -} - int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, enum mt76_txq_id qid, struct mt76_wcid *wcid, struct ieee80211_sta *sta, @@ -985,15 +1026,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); t->skb = tx_info->skb; - spin_lock_bh(&dev->token_lock); - id = idr_alloc(&dev->token, t, 0, MT7915_TOKEN_SIZE, GFP_ATOMIC); - if (id >= 0) - dev->token_count++; - - if (dev->token_count >= MT7915_TOKEN_SIZE - MT7915_TOKEN_FREE_THR) - mt7915_set_tx_blocked(dev, true); - spin_unlock_bh(&dev->token_lock); - + id = mt76_token_consume(mdev, &t); if (id < 0) return id; @@ -1091,7 +1124,7 @@ void mt7915_txp_skb_unmap(struct mt76_dev *dev, int i; txp = mt7915_txwi_to_txp(dev, t); - for (i = 1; i < txp->nbuf; i++) + for (i = 0; i < txp->nbuf; i++) dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]), le16_to_cpu(txp->len[i]), DMA_TO_DEVICE); } @@ -1157,15 +1190,7 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb) msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info); stat = FIELD_GET(MT_TX_FREE_STATUS, info); - spin_lock_bh(&dev->token_lock); - txwi = idr_remove(&dev->token, msdu); - if (txwi) - dev->token_count--; - if (dev->token_count < MT7915_TOKEN_SIZE - MT7915_TOKEN_FREE_THR && - dev->mphy.q_tx[0]->blocked) - wake = true; - spin_unlock_bh(&dev->token_lock); - + txwi = mt76_token_release(mdev, msdu, &wake); if (!txwi) continue; @@ -1195,11 +1220,8 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb) mt7915_mac_sta_poll(dev); - if (wake) { - spin_lock_bh(&dev->token_lock); - mt7915_set_tx_blocked(dev, false); - spin_unlock_bh(&dev->token_lock); - } + if (wake) + mt76_set_tx_blocked(&dev->mt76, false); mt76_worker_schedule(&dev->mt76.tx_worker); @@ -1228,10 +1250,7 @@ void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) struct mt7915_txp *txp; txp = mt7915_txwi_to_txp(mdev, e->txwi); - - spin_lock_bh(&dev->token_lock); - t = idr_remove(&dev->token, le16_to_cpu(txp->token)); - spin_unlock_bh(&dev->token_lock); + t = mt76_token_put(mdev, le16_to_cpu(txp->token)); e->skb = t ? t->skb : NULL; } @@ -1252,8 +1271,8 @@ void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy) bool ext_phy = phy != &dev->phy; u32 reg = MT_WF_PHY_RX_CTRL1(ext_phy); - mt7915_l2_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN); - mt7915_l2_set(dev, reg, BIT(11) | BIT(9)); + mt76_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN); + mt76_set(dev, reg, BIT(11) | BIT(9)); } void mt7915_mac_reset_counters(struct mt7915_phy *phy) @@ -1346,12 +1365,12 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy) void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy) { - mt7915_l2_set(dev, MT_WF_PHY_RXTD12(ext_phy), - MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY | - MT_WF_PHY_RXTD12_IRPI_SW_CLR); + mt76_set(dev, MT_WF_PHY_RXTD12(ext_phy), + MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY | + MT_WF_PHY_RXTD12_IRPI_SW_CLR); - mt7915_l2_set(dev, MT_WF_PHY_RX_CTRL1(ext_phy), - FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5)); + mt76_set(dev, MT_WF_PHY_RX_CTRL1(ext_phy), + FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5)); } static u8 @@ -1366,7 +1385,7 @@ mt7915_phy_get_nf(struct mt7915_phy *phy, int idx) u32 reg = MT_WF_IRPI(nss + (idx << dev->dbdc_support)); for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { - val = mt7915_l2_rr(dev, reg); + val = mt76_rr(dev, reg); sum += val * nf_power[i]; n += val; } @@ -1470,9 +1489,8 @@ mt7915_update_beacons(struct mt7915_dev *dev) } static void -mt7915_dma_reset(struct mt7915_phy *phy) +mt7915_dma_reset(struct mt7915_dev *dev) { - struct mt7915_dev *dev = phy->dev; struct mt76_phy *mphy_ext = dev->mt76.phy2; u32 hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE; int i; @@ -1489,18 +1507,22 @@ mt7915_dma_reset(struct mt7915_phy *phy) (MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN)); } + usleep_range(1000, 2000); - mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], true); for (i = 0; i < __MT_TXQ_MAX; i++) { - mt76_queue_tx_cleanup(dev, phy->mt76->q_tx[i], true); + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); if (mphy_ext) mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true); } - mt76_for_each_q_rx(&dev->mt76, i) { + for (i = 0; i < __MT_MCUQ_MAX; i++) + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); + + mt76_for_each_q_rx(&dev->mt76, i) mt76_queue_rx_reset(dev, i); - } + + mt76_tx_status_check(&dev->mt76, NULL, true); /* re-init prefetch settings after reset */ mt7915_dma_prefetch(dev); @@ -1524,8 +1546,8 @@ void mt7915_tx_token_put(struct mt7915_dev *dev) struct mt76_txwi_cache *txwi; int id; - spin_lock_bh(&dev->token_lock); - idr_for_each_entry(&dev->token, txwi, id) { + spin_lock_bh(&dev->mt76.token_lock); + idr_for_each_entry(&dev->mt76.token, txwi, id) { mt7915_txp_skb_unmap(&dev->mt76, txwi); if (txwi->skb) { struct ieee80211_hw *hw; @@ -1534,10 +1556,10 @@ void mt7915_tx_token_put(struct mt7915_dev *dev) ieee80211_free_txskb(hw, txwi->skb); } mt76_put_txwi(&dev->mt76, txwi); - dev->token_count--; + dev->mt76.token_count--; } - spin_unlock_bh(&dev->token_lock); - idr_destroy(&dev->token); + spin_unlock_bh(&dev->mt76.token_lock); + idr_destroy(&dev->mt76.token); } /* system error recovery */ @@ -1562,9 +1584,10 @@ void mt7915_mac_reset_work(struct work_struct *work) set_bit(MT76_MCU_RESET, &dev->mphy.state); wake_up(&dev->mt76.mcu.wait); cancel_delayed_work_sync(&dev->mphy.mac_work); - if (phy2) + if (phy2) { + set_bit(MT76_RESET, &phy2->mt76->state); cancel_delayed_work_sync(&phy2->mt76->mac_work); - + } /* lock/unlock all queues to ensure that no tx is pending */ mt76_txq_schedule_all(&dev->mphy); if (ext_phy) @@ -1580,11 +1603,11 @@ void mt7915_mac_reset_work(struct work_struct *work) mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); - mt7915_tx_token_put(dev); - idr_init(&dev->token); - if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { - mt7915_dma_reset(&dev->phy); + mt7915_dma_reset(dev); + + mt7915_tx_token_put(dev); + idr_init(&dev->mt76.token); mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); @@ -1592,6 +1615,8 @@ void mt7915_mac_reset_work(struct work_struct *work) clear_bit(MT76_MCU_RESET, &dev->mphy.state); clear_bit(MT76_RESET, &dev->mphy.state); + if (phy2) + clear_bit(MT76_RESET, &phy2->mt76->state); mt76_worker_enable(&dev->mt76.tx_worker); napi_enable(&dev->mt76.tx_napi); @@ -1633,39 +1658,30 @@ mt7915_mac_update_mib_stats(struct mt7915_phy *phy) bool ext_phy = phy != &dev->phy; int i, aggr0, aggr1; - memset(mib, 0, sizeof(*mib)); - - mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy), - MT_MIB_SDR3_FCS_ERR_MASK); + mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy), + MT_MIB_SDR3_FCS_ERR_MASK); aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0; for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) { - u32 val, val2; + u32 val; val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i)); - - val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val); - if (val2 > mib->ack_fail_cnt) - mib->ack_fail_cnt = val2; - - val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); - if (val2 > mib->ba_miss_cnt) - mib->ba_miss_cnt = val2; + mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); + mib->ack_fail_cnt += + FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val); val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i)); - val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val); - if (val2 > mib->rts_retries_cnt) { - mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); - mib->rts_retries_cnt = val2; - } + mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); + mib->rts_retries_cnt += + FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val); val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i)); - val2 = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i)); - dev->mt76.aggr_stats[aggr0++] += val & 0xffff; dev->mt76.aggr_stats[aggr0++] += val >> 16; - dev->mt76.aggr_stats[aggr1++] += val2 & 0xffff; - dev->mt76.aggr_stats[aggr1++] += val2 >> 16; + + val = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i)); + dev->mt76.aggr_stats[aggr1++] += val & 0xffff; + dev->mt76.aggr_stats[aggr1++] += val >> 16; } } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h index 96ff3fb0d1f3..0f929fb53027 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h @@ -86,6 +86,10 @@ enum rx_pkt_type { /* RXD DW4 */ #define MT_RXD4_NORMAL_PAYLOAD_FORMAT GENMASK(1, 0) +#define MT_RXD4_FIRST_AMSDU_FRAME GENMASK(1, 0) +#define MT_RXD4_MID_AMSDU_FRAME BIT(1) +#define MT_RXD4_LAST_AMSDU_FRAME BIT(0) + #define MT_RXD4_NORMAL_PATTERN_DROP BIT(9) #define MT_RXD4_NORMAL_CLS BIT(10) #define MT_RXD4_NORMAL_OFLD GENMASK(12, 11) @@ -97,6 +101,17 @@ enum rx_pkt_type { #define MT_RXV_HDR_BAND_IDX BIT(24) +/* RXD GROUP4 */ +#define MT_RXD6_FRAME_CONTROL GENMASK(15, 0) +#define MT_RXD6_TA_LO GENMASK(31, 16) + +#define MT_RXD7_TA_HI GENMASK(31, 0) + +#define MT_RXD8_SEQ_CTRL GENMASK(15, 0) +#define MT_RXD8_QOS_CTL GENMASK(31, 16) + +#define MT_RXD9_HT_CONTROL GENMASK(31, 0) + /* P-RXV */ #define MT_PRXV_TX_RATE GENMASK(6, 0) #define MT_PRXV_TX_DCM BIT(4) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index d4969b2e1ffb..e5bd687546b6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -25,6 +25,7 @@ static int mt7915_start(struct ieee80211_hw *hw) struct mt7915_dev *dev = mt7915_hw_dev(hw); struct mt7915_phy *phy = mt7915_hw_phy(hw); bool running; + int ret; flush_work(&dev->init_work); @@ -33,21 +34,48 @@ static int mt7915_start(struct ieee80211_hw *hw) running = mt7915_dev_running(dev); if (!running) { - mt7915_mcu_set_pm(dev, 0, 0); - mt7915_mcu_set_mac(dev, 0, true, false); - mt7915_mcu_set_scs(dev, 0, true); + ret = mt7915_mcu_set_pm(dev, 0, 0); + if (ret) + goto out; + + ret = mt7915_mcu_set_mac(dev, 0, true, true); + if (ret) + goto out; + + ret = mt7915_mcu_set_scs(dev, 0, true); + if (ret) + goto out; + mt7915_mac_enable_nf(dev, 0); } if (phy != &dev->phy) { - mt7915_mcu_set_pm(dev, 1, 0); - mt7915_mcu_set_mac(dev, 1, true, false); - mt7915_mcu_set_scs(dev, 1, true); + ret = mt7915_mcu_set_pm(dev, 1, 0); + if (ret) + goto out; + + ret = mt7915_mcu_set_mac(dev, 1, true, true); + if (ret) + goto out; + + ret = mt7915_mcu_set_scs(dev, 1, true); + if (ret) + goto out; + mt7915_mac_enable_nf(dev, 1); } - mt7915_mcu_set_sku_en(phy, true); - mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH)); + ret = mt7915_mcu_set_rts_thresh(phy, 0x92b); + if (ret) + goto out; + + ret = mt7915_mcu_set_sku_en(phy, true); + if (ret) + goto out; + + ret = mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH)); + if (ret) + goto out; set_bit(MT76_STATE_RUNNING, &phy->mt76->state); @@ -58,9 +86,10 @@ static int mt7915_start(struct ieee80211_hw *hw) if (!running) mt7915_mac_reset_counters(phy); +out: mutex_unlock(&dev->mt76.mutex); - return 0; + return ret; } static void mt7915_stop(struct ieee80211_hw *hw) @@ -227,7 +256,8 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw, struct mt7915_phy *phy = mt7915_hw_phy(hw); int idx = msta->wcid.idx; - /* TODO: disable beacon for the bss */ + mt7915_mcu_add_bss_info(phy, vif, false); + mt7915_mcu_add_sta(dev, vif, NULL, false); mutex_lock(&dev->mt76.mutex); mt76_testmode_reset(phy->mt76, true); @@ -283,6 +313,12 @@ int mt7915_set_channel(struct mt7915_phy *phy) mt7915_init_dfs_state(phy); mt76_set_channel(phy->mt76); + if (dev->flash_mode) { + ret = mt7915_mcu_apply_tx_dpd(phy); + if (ret) + goto out; + } + ret = mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(CHANNEL_SWITCH)); if (ret) goto out; @@ -317,7 +353,9 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct mt7915_sta *msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta; struct mt76_wcid *wcid = &msta->wcid; + u8 *wcid_keyidx = &wcid->hw_key_idx; int idx = key->keyidx; + int err = 0; /* The hardware does not support per-STA RX GTK, fallback * to software mode for these. @@ -332,6 +370,7 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, /* fall back to sw encryption for unsupported ciphers */ switch (key->cipher) { case WLAN_CIPHER_SUITE_AES_CMAC: + wcid_keyidx = &wcid->hw_key_idx2; key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE; break; case WLAN_CIPHER_SUITE_TKIP: @@ -347,16 +386,24 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return -EOPNOTSUPP; } - if (cmd == SET_KEY) { - key->hw_key_idx = wcid->idx; - wcid->hw_key_idx = idx; - } else if (idx == wcid->hw_key_idx) { - wcid->hw_key_idx = -1; - } + mutex_lock(&dev->mt76.mutex); + + if (cmd == SET_KEY) + *wcid_keyidx = idx; + else if (idx == *wcid_keyidx) + *wcid_keyidx = -1; + else + goto out; + mt76_wcid_key_setup(&dev->mt76, wcid, cmd == SET_KEY ? key : NULL); - return mt7915_mcu_add_key(dev, vif, msta, key, cmd); + err = mt7915_mcu_add_key(dev, vif, msta, key, cmd); + +out: + mutex_unlock(&dev->mt76.mutex); + + return err; } static int mt7915_config(struct ieee80211_hw *hw, u32 changed) @@ -382,7 +429,7 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed) } if (changed & IEEE80211_CONF_CHANGE_POWER) { - ret = mt7915_mcu_set_sku(phy); + ret = mt7915_mcu_set_txpower_sku(phy); if (ret) return ret; } @@ -515,9 +562,9 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw, } } - if (changed & BSS_CHANGED_BEACON_ENABLED) { - mt7915_mcu_add_bss_info(phy, vif, info->enable_beacon); - mt7915_mcu_add_sta(dev, vif, NULL, info->enable_beacon); + if (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon) { + mt7915_mcu_add_bss_info(phy, vif, true); + mt7915_mcu_add_sta(dev, vif, NULL, true); } /* ensure that enable txcmd_mode after bss_info */ @@ -631,12 +678,13 @@ static int mt7915_set_rts_threshold(struct ieee80211_hw *hw, u32 val) { struct mt7915_dev *dev = mt7915_hw_dev(hw); struct mt7915_phy *phy = mt7915_hw_phy(hw); + int ret; mutex_lock(&dev->mt76.mutex); - mt7915_mcu_set_rts_thresh(phy, val); + ret = mt7915_mcu_set_rts_thresh(phy, val); mutex_unlock(&dev->mt76.mutex); - return 0; + return ret; } static int @@ -663,22 +711,22 @@ mt7915_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_RX_START: mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, params->buf_size); - mt7915_mcu_add_rx_ba(dev, params, true); + ret = mt7915_mcu_add_rx_ba(dev, params, true); break; case IEEE80211_AMPDU_RX_STOP: mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid); - mt7915_mcu_add_rx_ba(dev, params, false); + ret = mt7915_mcu_add_rx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_OPERATIONAL: mtxq->aggr = true; mtxq->send_bar = false; - mt7915_mcu_add_tx_ba(dev, params, true); + ret = mt7915_mcu_add_tx_ba(dev, params, true); break; case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: mtxq->aggr = false; clear_bit(tid, &msta->ampdu_state); - mt7915_mcu_add_tx_ba(dev, params, false); + ret = mt7915_mcu_add_tx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_START: set_bit(tid, &msta->ampdu_state); @@ -687,7 +735,7 @@ mt7915_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_TX_STOP_CONT: mtxq->aggr = false; clear_bit(tid, &msta->ampdu_state); - mt7915_mcu_add_tx_ba(dev, params, false); + ret = mt7915_mcu_add_tx_ba(dev, params, false); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; } @@ -717,13 +765,19 @@ mt7915_get_stats(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats) { struct mt7915_phy *phy = mt7915_hw_phy(hw); + struct mt7915_dev *dev = mt7915_hw_dev(hw); struct mib_stats *mib = &phy->mib; + mutex_lock(&dev->mt76.mutex); stats->dot11RTSSuccessCount = mib->rts_cnt; stats->dot11RTSFailureCount = mib->rts_retries_cnt; stats->dot11FCSErrorCount = mib->fcs_err_cnt; stats->dot11ACKFailureCount = mib->ack_fail_cnt; + memset(mib, 0, sizeof(*mib)); + + mutex_unlock(&dev->mt76.mutex); + return 0; } @@ -833,9 +887,12 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw, struct mt7915_phy *phy = mt7915_hw_phy(hw); struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; struct mt7915_sta_stats *stats = &msta->stats; + struct rate_info rxrate = {}; - if (mt7915_mcu_get_rx_rate(phy, vif, sta, &sinfo->rxrate) == 0) + if (!mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) { + sinfo->rxrate = rxrate; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); + } if (!stats->tx_rate.legacy && !stats->tx_rate.flags) return; @@ -888,6 +945,22 @@ static void mt7915_sta_set_4addr(struct ieee80211_hw *hw, mt7915_mcu_sta_update_hdr_trans(dev, vif, sta); } +static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + bool enabled) +{ + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + + if (enabled) + set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); + else + clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); + + mt7915_mcu_sta_update_hdr_trans(dev, vif, sta); +} + const struct ieee80211_ops mt7915_ops = { .tx = mt7915_tx, .start = mt7915_start, @@ -920,6 +993,7 @@ const struct ieee80211_ops mt7915_ops = { .set_coverage_class = mt7915_set_coverage_class, .sta_statistics = mt7915_sta_statistics, .sta_set_4addr = mt7915_sta_set_4addr, + .sta_set_decap_offload = mt7915_sta_set_decap_offload, CFG80211_TESTMODE_CMD(mt76_testmode_cmd) CFG80211_TESTMODE_DUMP(mt76_testmode_dump) #ifdef CONFIG_MAC80211_DEBUGFS diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 195929242b72..b3f14ff67c5a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -147,9 +147,10 @@ mt7915_get_he_phy_cap(struct mt7915_phy *phy, struct ieee80211_vif *vif) } static u8 -mt7915_get_phy_mode(struct mt7915_dev *dev, struct ieee80211_vif *vif, - enum nl80211_band band, struct ieee80211_sta *sta) +mt7915_get_phy_mode(struct mt76_phy *mphy, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) { + enum nl80211_band band = mphy->chandef.chan->band; struct ieee80211_sta_ht_cap *ht_cap; struct ieee80211_sta_vht_cap *vht_cap; const struct ieee80211_sta_he_cap *he_cap; @@ -161,12 +162,8 @@ mt7915_get_phy_mode(struct mt7915_dev *dev, struct ieee80211_vif *vif, he_cap = &sta->he_cap; } else { struct ieee80211_supported_band *sband; - struct mt7915_phy *phy; - struct mt7915_vif *mvif; - mvif = (struct mt7915_vif *)vif->drv_priv; - phy = mvif->band_idx ? mt7915_ext_phy(dev) : &dev->phy; - sband = phy->mt76->hw->wiphy->bands[band]; + sband = mphy->hw->wiphy->bands[band]; ht_cap = &sband->ht_cap; vht_cap = &sband->vht_cap; @@ -220,7 +217,7 @@ mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd, int ret = 0; if (!skb) { - dev_err(mdev->dev, "Message %d (seq %d) timeout\n", + dev_err(mdev->dev, "Message %08x (seq %d) timeout\n", cmd, seq); return -ETIMEDOUT; } @@ -337,6 +334,22 @@ mt7915_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) } static void +mt7915_mcu_rx_csa_notify(struct mt7915_dev *dev, struct sk_buff *skb) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt7915_mcu_csa_notify *c; + + c = (struct mt7915_mcu_csa_notify *)skb->data; + + if (c->band_idx && dev->mt76.phy2) + mphy = dev->mt76.phy2; + + ieee80211_iterate_active_interfaces_atomic(mphy->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7915_mcu_csa_finish, mphy->hw); +} + +static void mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb) { struct mt76_phy *mphy = &dev->mt76.phy; @@ -344,61 +357,69 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb) r = (struct mt7915_mcu_rdd_report *)skb->data; - if (r->idx && dev->mt76.phy2) + if (r->band_idx && dev->mt76.phy2) mphy = dev->mt76.phy2; ieee80211_radar_detected(mphy->hw); dev->hw_pattern++; } -static void +static int mt7915_mcu_tx_rate_parse(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra, struct rate_info *rate, u16 r) { struct ieee80211_supported_band *sband; u16 ru_idx = le16_to_cpu(ra->ru_idx); - u16 flags = 0; + bool cck = false; rate->mcs = FIELD_GET(MT_RA_RATE_MCS, r); rate->nss = FIELD_GET(MT_RA_RATE_NSS, r) + 1; switch (FIELD_GET(MT_RA_RATE_TX_MODE, r)) { case MT_PHY_TYPE_CCK: + cck = true; + fallthrough; case MT_PHY_TYPE_OFDM: if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) sband = &mphy->sband_5g.sband; else sband = &mphy->sband_2g.sband; + rate->mcs = mt76_get_rate(mphy->dev, sband, rate->mcs, cck); rate->legacy = sband->bitrates[rate->mcs].bitrate; break; case MT_PHY_TYPE_HT: case MT_PHY_TYPE_HT_GF: rate->mcs += (rate->nss - 1) * 8; - flags |= RATE_INFO_FLAGS_MCS; + if (rate->mcs > 31) + return -EINVAL; + rate->flags = RATE_INFO_FLAGS_MCS; if (ra->gi) - flags |= RATE_INFO_FLAGS_SHORT_GI; + rate->flags |= RATE_INFO_FLAGS_SHORT_GI; break; case MT_PHY_TYPE_VHT: - flags |= RATE_INFO_FLAGS_VHT_MCS; + if (rate->mcs > 9) + return -EINVAL; + rate->flags = RATE_INFO_FLAGS_VHT_MCS; if (ra->gi) - flags |= RATE_INFO_FLAGS_SHORT_GI; + rate->flags |= RATE_INFO_FLAGS_SHORT_GI; break; case MT_PHY_TYPE_HE_SU: case MT_PHY_TYPE_HE_EXT_SU: case MT_PHY_TYPE_HE_TB: case MT_PHY_TYPE_HE_MU: + if (ra->gi > NL80211_RATE_INFO_HE_GI_3_2 || rate->mcs > 11) + return -EINVAL; + rate->he_gi = ra->gi; rate->he_dcm = FIELD_GET(MT_RA_RATE_DCM_EN, r); - - flags |= RATE_INFO_FLAGS_HE_MCS; + rate->flags = RATE_INFO_FLAGS_HE_MCS; break; default: - break; + return -EINVAL; } - rate->flags = flags; if (ru_idx) { switch (ru_idx) { @@ -435,6 +456,8 @@ mt7915_mcu_tx_rate_parse(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra, break; } } + + return 0; } static void @@ -465,12 +488,12 @@ mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb) mphy = dev->mt76.phy2; /* current rate */ - mt7915_mcu_tx_rate_parse(mphy, ra, &rate, curr); - stats->tx_rate = rate; + if (!mt7915_mcu_tx_rate_parse(mphy, ra, &rate, curr)) + stats->tx_rate = rate; /* probing rate */ - mt7915_mcu_tx_rate_parse(mphy, ra, &prob_rate, probe); - stats->prob_rate = prob_rate; + if (!mt7915_mcu_tx_rate_parse(mphy, ra, &prob_rate, probe)) + stats->prob_rate = prob_rate; if (attempts) { u16 success = le16_to_cpu(ra->success); @@ -498,7 +521,8 @@ mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb) break; } - wiphy_info(mt76_hw(dev)->wiphy, "%s: %s", type, data); + wiphy_info(mt76_hw(dev)->wiphy, "%s: %.*s", type, + (int)(skb->len - sizeof(*rxd)), data); } static void @@ -511,9 +535,7 @@ mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb) mt7915_mcu_rx_radar_detected(dev, skb); break; case MCU_EXT_EVENT_CSA_NOTIFY: - ieee80211_iterate_active_interfaces_atomic(dev->mt76.hw, - IEEE80211_IFACE_ITER_RESUME_ALL, - mt7915_mcu_csa_finish, dev); + mt7915_mcu_rx_csa_notify(dev, skb); break; case MCU_EXT_EVENT_RATE_REPORT: mt7915_mcu_tx_rate_report(dev, skb); @@ -592,7 +614,7 @@ mt7915_mcu_alloc_wtbl_req(struct mt7915_dev *dev, struct mt7915_sta *msta, if (!nskb) { nskb = mt76_mcu_msg_alloc(&dev->mt76, NULL, - MT7915_WTBL_UPDATE_BA_SIZE); + MT7915_WTBL_UPDATE_MAX_SIZE); if (!nskb) return ERR_PTR(-ENOMEM); @@ -662,8 +684,6 @@ mt7915_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, struct mt7915_phy *phy, bool enable) { struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; - struct cfg80211_chan_def *chandef = &phy->mt76->chandef; - enum nl80211_band band = chandef->chan->band; struct bss_info_basic *bss; u16 wlan_idx = mvif->sta.wcid.idx; u32 type = NETWORK_INFRA; @@ -713,7 +733,7 @@ mt7915_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN); bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int); bss->dtim_period = vif->bss_conf.dtim_period; - bss->phy_mode = mt7915_get_phy_mode(phy->dev, vif, band, NULL); + bss->phy_mode = mt7915_get_phy_mode(phy->mt76, vif, NULL); } else { memcpy(bss->bssid, phy->mt76->macaddr, ETH_ALEN); } @@ -989,8 +1009,10 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy, struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; struct sk_buff *skb; - if (mvif->omac_idx >= REPEATER_BSSID_START) + if (mvif->omac_idx >= REPEATER_BSSID_START) { + mt7915_mcu_muar_config(phy, vif, false, enable); mt7915_mcu_muar_config(phy, vif, true, enable); + } skb = mt7915_mcu_alloc_sta_req(phy->dev, mvif, NULL, MT7915_BSS_UPDATE_MAX_SIZE); @@ -1188,6 +1210,9 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev, wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl, &skb); + if (IS_ERR(wtbl_hdr)) + return PTR_ERR(wtbl_hdr); + mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr); ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, @@ -1330,7 +1355,7 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) if (elem->mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL) cap |= STA_REC_HE_CAP_OM; - if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU) + if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU) cap |= STA_REC_HE_CAP_AMSDU_IN_AMPDU; if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) @@ -1685,6 +1710,7 @@ mt7915_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, return; msta = (struct mt7915_sta *)sta->drv_priv; + htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); if (test_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags)) { htr->to_ds = true; htr->from_ds = true; @@ -1704,6 +1730,9 @@ int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev, return -ENOMEM; wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, NULL, &skb); + if (IS_ERR(wtbl_hdr)) + return PTR_ERR(wtbl_hdr); + mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, NULL, wtbl_hdr); return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD(WTBL_UPDATE), @@ -1728,6 +1757,9 @@ int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif, wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl, &skb); + if (IS_ERR(wtbl_hdr)) + return PTR_ERR(wtbl_hdr); + mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr); return mt76_mcu_skb_send_msg(&dev->mt76, skb, @@ -1821,9 +1853,9 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif, bf->tx_mode = MT_PHY_TYPE_HE_SU; mt7915_mcu_sta_sounding_rate(bf); - bf->trigger_su = HE_PHY(CAP6_TRIG_SU_BEAMFORMER_FB, + bf->trigger_su = HE_PHY(CAP6_TRIG_SU_BEAMFORMING_FB, pe->phy_cap_info[6]); - bf->trigger_mu = HE_PHY(CAP6_TRIG_MU_BEAMFORMER_FB, + bf->trigger_mu = HE_PHY(CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB, pe->phy_cap_info[6]); bfer_nr = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, ve->phy_cap_info[5]); @@ -2045,25 +2077,30 @@ mt7915_mcu_add_txbf(struct mt7915_dev *dev, struct ieee80211_vif *vif, static void mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) + struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - struct cfg80211_chan_def *chandef = &dev->mphy.chandef; + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + struct mt76_phy *mphy = &dev->mphy; + enum nl80211_band band; struct sta_rec_ra *ra; struct tlv *tlv; - enum nl80211_band band = chandef->chan->band; - u32 supp_rate = sta->supp_rates[band]; - int n_rates = hweight32(supp_rate); - u32 cap = sta->wme ? STA_CAP_WMM : 0; + u32 supp_rate, n_rates, cap = sta->wme ? STA_CAP_WMM : 0; u8 i, nss = sta->rx_nss, mcs = 0; tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra)); - ra = (struct sta_rec_ra *)tlv; + + if (msta->wcid.ext_phy && dev->mt76.phy2) + mphy = dev->mt76.phy2; + + band = mphy->chandef.chan->band; + supp_rate = sta->supp_rates[band]; + n_rates = hweight32(supp_rate); + ra->valid = true; ra->auto_rate = true; - ra->phy_mode = mt7915_get_phy_mode(dev, vif, band, sta); - ra->channel = chandef->chan->hw_value; + ra->phy_mode = mt7915_get_phy_mode(mphy, vif, sta); + ra->channel = mphy->chandef.chan->hw_value; ra->bw = sta->bandwidth; ra->rate_len = n_rates; ra->phy.bw = sta->bandwidth; @@ -2253,6 +2290,9 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif, wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET, sta_wtbl, &skb); + if (IS_ERR(wtbl_hdr)) + return PTR_ERR(wtbl_hdr); + if (enable) { mt7915_mcu_wtbl_generic_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr); mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr); @@ -2411,6 +2451,17 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct bss_info_bcn *bcn; int len = MT7915_BEACON_UPDATE_SIZE + MAX_BEACON_SIZE; + rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len); + if (IS_ERR(rskb)) + return PTR_ERR(rskb); + + tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn)); + bcn = (struct bss_info_bcn *)tlv; + bcn->enable = en; + + if (!en) + goto out; + skb = ieee80211_beacon_get_template(hw, vif, &offs); if (!skb) return -EINVAL; @@ -2421,16 +2472,6 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, return -EINVAL; } - rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len); - if (IS_ERR(rskb)) { - dev_kfree_skb(skb); - return PTR_ERR(rskb); - } - - tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn)); - bcn = (struct bss_info_bcn *)tlv; - bcn->enable = en; - if (mvif->band_idx) { info = IEEE80211_SKB_CB(skb); info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY; @@ -2441,6 +2482,7 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, mt7915_mcu_beacon_cont(dev, rskb, skb, bcn, &offs); dev_kfree_skb(skb); +out: return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb, MCU_EXT_CMD(BSS_INFO_UPDATE), true); } @@ -2500,11 +2542,9 @@ static int mt7915_mcu_start_patch(struct mt7915_dev *dev) static int mt7915_driver_own(struct mt7915_dev *dev) { - u32 reg = mt7915_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0); - - mt76_wr(dev, reg, MT_TOP_LPCR_HOST_DRV_OWN); - if (!mt76_poll_msec(dev, reg, MT_TOP_LPCR_HOST_FW_OWN, - 0, 500)) { + mt76_wr(dev, MT_TOP_LPCR_HOST_BAND0, MT_TOP_LPCR_HOST_DRV_OWN); + if (!mt76_poll_msec(dev, MT_TOP_LPCR_HOST_BAND0, + MT_TOP_LPCR_HOST_FW_OWN, 0, 500)) { dev_err(dev->mt76.dev, "Timeout for driver own\n"); return -EIO; } @@ -2743,20 +2783,6 @@ out: static int mt7915_load_firmware(struct mt7915_dev *dev) { int ret; - u32 val, reg = mt7915_reg_map_l1(dev, MT_TOP_MISC); - - val = FIELD_PREP(MT_TOP_MISC_FW_STATE, FW_STATE_FW_DOWNLOAD); - - if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE, val, 1000)) { - /* restart firmware once */ - __mt76_mcu_restart(&dev->mt76); - if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE, - val, 1000)) { - dev_err(dev->mt76.dev, - "Firmware is not ready for download\n"); - return -EIO; - } - } ret = mt7915_load_patch(dev); if (ret) @@ -2766,7 +2792,7 @@ static int mt7915_load_firmware(struct mt7915_dev *dev) if (ret) return ret; - if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE, + if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE, FIELD_PREP(MT_TOP_MISC_FW_STATE, FW_STATE_WACPU_RDY), 1000)) { dev_err(dev->mt76.dev, "Timeout for initializing firmware\n"); @@ -2854,21 +2880,39 @@ int mt7915_mcu_init(struct mt7915_dev *dev) void mt7915_mcu_exit(struct mt7915_dev *dev) { - u32 reg = mt7915_reg_map_l1(dev, MT_TOP_MISC); - __mt76_mcu_restart(&dev->mt76); - if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE, + if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE, FIELD_PREP(MT_TOP_MISC_FW_STATE, FW_STATE_FW_DOWNLOAD), 1000)) { dev_err(dev->mt76.dev, "Failed to exit mcu\n"); return; } - reg = mt7915_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0); - mt76_wr(dev, reg, MT_TOP_LPCR_HOST_FW_OWN); + mt76_wr(dev, MT_TOP_LPCR_HOST_BAND0, MT_TOP_LPCR_HOST_FW_OWN); skb_queue_purge(&dev->mt76.mcu.res_q); } +static int +mt7915_mcu_set_rx_hdr_trans_blacklist(struct mt7915_dev *dev, int band) +{ + struct { + u8 operation; + u8 count; + u8 _rsv[2]; + u8 index; + u8 enable; + __le16 etype; + } req = { + .operation = 1, + .count = 1, + .enable = 1, + .etype = cpu_to_le16(ETH_P_PAE), + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_HDR_TRANS), + &req, sizeof(req), false); +} + int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable, bool hdr_trans) { @@ -2899,6 +2943,9 @@ int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, if (ret) return ret; + if (hdr_trans) + mt7915_mcu_set_rx_hdr_trans_blacklist(dev, band); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MAC_INIT_CTRL), &req_mac, sizeof(req_mac), true); } @@ -3182,7 +3229,7 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd) } #endif - if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) + if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) && chandef->chan->dfs_state != NL80211_DFS_AVAILABLE) @@ -3280,6 +3327,148 @@ int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset) return 0; } +static int mt7915_mcu_set_pre_cal(struct mt7915_dev *dev, u8 idx, + u8 *data, u32 len, int cmd) +{ + struct { + u8 dir; + u8 valid; + __le16 bitmap; + s8 precal; + u8 action; + u8 band; + u8 idx; + u8 rsv[4]; + __le32 len; + } req; + struct sk_buff *skb; + + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req) + len); + if (!skb) + return -ENOMEM; + + req.idx = idx; + req.len = cpu_to_le32(len); + skb_put_data(skb, &req, sizeof(req)); + skb_put_data(skb, data, len); + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, false); +} + +int mt7915_mcu_apply_group_cal(struct mt7915_dev *dev) +{ + u8 idx = 0, *cal = dev->cal, *eep = dev->mt76.eeprom.data; + u32 total = MT_EE_CAL_GROUP_SIZE; + + if (!(eep[MT_EE_DO_PRE_CAL] & MT_EE_WIFI_CAL_GROUP)) + return 0; + + /* + * Items: Rx DCOC, RSSI DCOC, Tx TSSI DCOC, Tx LPFG + * Tx FDIQ, Tx DCIQ, Rx FDIQ, Rx FIIQ, ADCDCOC + */ + while (total > 0) { + int ret, len; + + len = min_t(u32, total, MT_EE_CAL_UNIT); + + ret = mt7915_mcu_set_pre_cal(dev, idx, cal, len, + MCU_EXT_CMD(GROUP_PRE_CAL_INFO)); + if (ret) + return ret; + + total -= len; + cal += len; + idx++; + } + + return 0; +} + +static int mt7915_find_freq_idx(const u16 *freqs, int n_freqs, u16 cur) +{ + int i; + + for (i = 0; i < n_freqs; i++) + if (cur == freqs[i]) + return i; + + return -1; +} + +static int mt7915_dpd_freq_idx(u16 freq, u8 bw) +{ + static const u16 freq_list[] = { + 5180, 5200, 5220, 5240, + 5260, 5280, 5300, 5320, + 5500, 5520, 5540, 5560, + 5580, 5600, 5620, 5640, + 5660, 5680, 5700, 5745, + 5765, 5785, 5805, 5825 + }; + int offset_2g = ARRAY_SIZE(freq_list); + int idx; + + if (freq < 4000) { + if (freq < 2432) + return offset_2g; + if (freq < 2457) + return offset_2g + 1; + + return offset_2g + 2; + } + + if (bw == NL80211_CHAN_WIDTH_80P80 || bw == NL80211_CHAN_WIDTH_160) + return -1; + + if (bw != NL80211_CHAN_WIDTH_20) { + idx = mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), + freq + 10); + if (idx >= 0) + return idx; + + idx = mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), + freq - 10); + if (idx >= 0) + return idx; + } + + return mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), freq); +} + +int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy) +{ + struct mt7915_dev *dev = phy->dev; + struct cfg80211_chan_def *chandef = &phy->mt76->chandef; + u16 total = 2, idx, center_freq = chandef->center_freq1; + u8 *cal = dev->cal, *eep = dev->mt76.eeprom.data; + + if (!(eep[MT_EE_DO_PRE_CAL] & MT_EE_WIFI_CAL_DPD)) + return 0; + + idx = mt7915_dpd_freq_idx(center_freq, chandef->width); + if (idx < 0) + return -EINVAL; + + /* Items: Tx DPD, Tx Flatness */ + idx = idx * 2; + cal += MT_EE_CAL_GROUP_SIZE; + + while (total--) { + int ret; + + cal += (idx * MT_EE_CAL_UNIT); + ret = mt7915_mcu_set_pre_cal(dev, idx, cal, MT_EE_CAL_UNIT, + MCU_EXT_CMD(DPD_PRE_CAL_INFO)); + if (ret) + return ret; + + idx++; + } + + return 0; +} + int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index) { struct { @@ -3314,8 +3503,9 @@ int mt7915_mcu_get_tx_rate(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx) sizeof(req), false); } -int mt7915_mcu_set_sku(struct mt7915_phy *phy) +int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy) { +#define MT7915_SKU_RATE_NUM 161 struct mt7915_dev *dev = phy->dev; struct mt76_phy *mphy = phy->mt76; struct ieee80211_hw *hw = mphy->hw; @@ -3328,15 +3518,37 @@ int mt7915_mcu_set_sku(struct mt7915_phy *phy) .format_id = 4, .dbdc_idx = phy != &dev->phy, }; - int i; - s8 *delta; + struct mt76_power_limits limits_array; + s8 *la = (s8 *)&limits_array; + int i, idx, n_chains = hweight8(mphy->antenna_mask); + int tx_power; - delta = dev->rate_power[mphy->chandef.chan->band]; - mphy->txpower_cur = hw->conf.power_level * 2 + - delta[MT7915_SKU_MAX_DELTA_IDX]; + tx_power = hw->conf.power_level * 2 - + mt76_tx_power_nss_delta(n_chains); - for (i = 0; i < MT7915_SKU_RATE_NUM; i++) - req.val[i] = hw->conf.power_level * 2 + delta[i]; + tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan, + &limits_array, tx_power); + mphy->txpower_cur = tx_power; + + for (i = 0, idx = 0; i < ARRAY_SIZE(mt7915_sku_group_len); i++) { + u8 mcs_num, len = mt7915_sku_group_len[i]; + int j; + + if (i >= SKU_HT_BW20 && i <= SKU_VHT_BW160) { + mcs_num = 10; + + if (i == SKU_HT_BW20 || i == SKU_VHT_BW20) + la = (s8 *)&limits_array + 12; + } else { + mcs_num = len; + } + + for (j = 0; j < min_t(u8, mcs_num, len); j++) + req.val[idx + j] = la[j]; + + la += mcs_num; + idx += len; + } return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req, @@ -3501,9 +3713,8 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif, struct ieee80211_supported_band *sband; struct mt7915_mcu_phy_rx_info *res; struct sk_buff *skb; - u16 flags = 0; int ret; - int i; + bool cck = false; ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(PHY_STAT_INFO), &req, sizeof(req), true, &skb); @@ -3517,48 +3728,53 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif, switch (res->mode) { case MT_PHY_TYPE_CCK: + cck = true; + fallthrough; case MT_PHY_TYPE_OFDM: if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) sband = &mphy->sband_5g.sband; else sband = &mphy->sband_2g.sband; - for (i = 0; i < sband->n_bitrates; i++) { - if (rate->mcs != (sband->bitrates[i].hw_value & 0xf)) - continue; - - rate->legacy = sband->bitrates[i].bitrate; - break; - } + rate->mcs = mt76_get_rate(&dev->mt76, sband, rate->mcs, cck); + rate->legacy = sband->bitrates[rate->mcs].bitrate; break; case MT_PHY_TYPE_HT: case MT_PHY_TYPE_HT_GF: - if (rate->mcs > 31) - return -EINVAL; - - flags |= RATE_INFO_FLAGS_MCS; + if (rate->mcs > 31) { + ret = -EINVAL; + goto out; + } + rate->flags = RATE_INFO_FLAGS_MCS; if (res->gi) - flags |= RATE_INFO_FLAGS_SHORT_GI; + rate->flags |= RATE_INFO_FLAGS_SHORT_GI; break; case MT_PHY_TYPE_VHT: - flags |= RATE_INFO_FLAGS_VHT_MCS; + if (rate->mcs > 9) { + ret = -EINVAL; + goto out; + } + rate->flags = RATE_INFO_FLAGS_VHT_MCS; if (res->gi) - flags |= RATE_INFO_FLAGS_SHORT_GI; + rate->flags |= RATE_INFO_FLAGS_SHORT_GI; break; case MT_PHY_TYPE_HE_SU: case MT_PHY_TYPE_HE_EXT_SU: case MT_PHY_TYPE_HE_TB: case MT_PHY_TYPE_HE_MU: + if (res->gi > NL80211_RATE_INFO_HE_GI_3_2 || rate->mcs > 11) { + ret = -EINVAL; + goto out; + } rate->he_gi = res->gi; - - flags |= RATE_INFO_FLAGS_HE_MCS; + rate->flags = RATE_INFO_FLAGS_HE_MCS; break; default: - break; + ret = -EINVAL; + goto out; } - rate->flags = flags; switch (res->bw) { case IEEE80211_STA_RX_BW_160: @@ -3575,7 +3791,8 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif, break; } +out: dev_kfree_skb(skb); - return 0; + return ret; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h index 2d584142c27b..42582a66e42d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h @@ -68,10 +68,19 @@ struct mt7915_mcu_rxd { u8 s2d_index; }; +struct mt7915_mcu_csa_notify { + struct mt7915_mcu_rxd rxd; + + u8 omac_idx; + u8 csa_count; + u8 band_idx; + u8 rsv; +} __packed; + struct mt7915_mcu_rdd_report { struct mt7915_mcu_rxd rxd; - u8 idx; + u8 band_idx; u8 long_detected; u8 constant_prf_detected; u8 staggered_prf_detected; @@ -275,6 +284,8 @@ enum { MCU_EXT_CMD_FW_DBG_CTRL = 0x95, MCU_EXT_CMD_SET_RDD_TH = 0x9d, MCU_EXT_CMD_SET_SPR = 0xa8, + MCU_EXT_CMD_GROUP_PRE_CAL_INFO = 0xab, + MCU_EXT_CMD_DPD_PRE_CAL_INFO = 0xac, MCU_EXT_CMD_PHY_STAT_INFO = 0xad, }; @@ -1080,9 +1091,6 @@ enum { sizeof(struct tlv) + \ MT7915_WTBL_UPDATE_MAX_SIZE) -#define MT7915_WTBL_UPDATE_BA_SIZE (sizeof(struct wtbl_req_hdr) + \ - sizeof(struct wtbl_ba)) - #define MT7915_BSS_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \ sizeof(struct bss_info_omac) + \ sizeof(struct bss_info_basic) +\ diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c new file mode 100644 index 000000000000..af712a936ef6 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. */ + +#include "mt7915.h" + +static u32 mt7915_reg_map_l1(struct mt7915_dev *dev, u32 addr) +{ + u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr); + u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr); + + mt76_rmw_field(dev, MT_HIF_REMAP_L1, MT_HIF_REMAP_L1_MASK, base); + /* use read to push write */ + mt76_rr(dev, MT_HIF_REMAP_L1); + + return MT_HIF_REMAP_BASE_L1 + offset; +} + +static u32 mt7915_reg_map_l2(struct mt7915_dev *dev, u32 addr) +{ + u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr); + u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr); + + mt76_rmw_field(dev, MT_HIF_REMAP_L2, MT_HIF_REMAP_L2_MASK, base); + /* use read to push write */ + mt76_rr(dev, MT_HIF_REMAP_L2); + + return MT_HIF_REMAP_BASE_L2 + offset; +} + +static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr) +{ + static const struct { + u32 phys; + u32 mapped; + u32 size; + } fixed_map[] = { + { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */ + { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */ + { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */ + { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */ + { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */ + { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */ + { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */ + { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */ + { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */ + { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */ + { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */ + { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */ + { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */ + { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */ + { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */ + { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */ + { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */ + { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */ + { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */ + { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */ + { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */ + { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */ + { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */ + { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */ + { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */ + { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */ + { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */ + { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */ + { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */ + { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */ + { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */ + { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */ + { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */ + { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */ + { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */ + { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */ + { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */ + { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */ + }; + int i; + + if (addr < 0x100000) + return addr; + + for (i = 0; i < ARRAY_SIZE(fixed_map); i++) { + u32 ofs; + + if (addr < fixed_map[i].phys) + continue; + + ofs = addr - fixed_map[i].phys; + if (ofs > fixed_map[i].size) + continue; + + return fixed_map[i].mapped + ofs; + } + + if ((addr >= 0x18000000 && addr < 0x18c00000) || + (addr >= 0x70000000 && addr < 0x78000000) || + (addr >= 0x7c000000 && addr < 0x7c400000)) + return mt7915_reg_map_l1(dev, addr); + + return mt7915_reg_map_l2(dev, addr); +} + +static u32 mt7915_rr(struct mt76_dev *mdev, u32 offset) +{ + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); + u32 addr = __mt7915_reg_addr(dev, offset); + + return dev->bus_ops->rr(mdev, addr); +} + +static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val) +{ + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); + u32 addr = __mt7915_reg_addr(dev, offset); + + dev->bus_ops->wr(mdev, addr, val); +} + +static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val) +{ + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); + u32 addr = __mt7915_reg_addr(dev, offset); + + return dev->bus_ops->rmw(mdev, addr, mask, val); +} + +int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq) +{ + struct mt76_bus_ops *bus_ops; + struct mt7915_dev *dev; + + dev = container_of(mdev, struct mt7915_dev, mt76); + mt76_mmio_init(&dev->mt76, mem_base); + + dev->bus_ops = dev->mt76.bus; + bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops), + GFP_KERNEL); + if (!bus_ops) + return -ENOMEM; + + bus_ops->rr = mt7915_rr; + bus_ops->wr = mt7915_wr; + bus_ops->rmw = mt7915_rmw; + dev->mt76.bus = bus_ops; + + mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) | + (mt76_rr(dev, MT_HW_REV) & 0xff); + dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev); + + mt76_wr(dev, MT_INT_MASK_CSR, 0); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index 5c7eefdf2013..4ea8972d4e2f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -32,17 +32,12 @@ #define MT7915_EEPROM_SIZE 3584 #define MT7915_TOKEN_SIZE 8192 -#define MT7915_TOKEN_FREE_THR 64 #define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */ #define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */ #define MT7915_5G_RATE_DEFAULT 0x4b /* OFDM 6M */ #define MT7915_2G_RATE_DEFAULT 0x0 /* CCK 1M */ -#define MT7915_SKU_RATE_NUM 161 -#define MT7915_SKU_MAX_DELTA_IDX MT7915_SKU_RATE_NUM -#define MT7915_SKU_TABLE_SIZE (MT7915_SKU_RATE_NUM + 1) - struct mt7915_vif; struct mt7915_sta; struct mt7915_dfs_pulse; @@ -108,11 +103,11 @@ struct mt7915_vif { }; struct mib_stats { - u16 ack_fail_cnt; - u16 fcs_err_cnt; - u16 rts_cnt; - u16 rts_retries_cnt; - u16 ba_miss_cnt; + u32 ack_fail_cnt; + u32 fcs_err_cnt; + u32 rts_cnt; + u32 rts_retries_cnt; + u32 ba_miss_cnt; }; struct mt7915_hif { @@ -142,7 +137,7 @@ struct mt7915_phy { u8 rdd_state; int dfs_state; - __le32 rx_ampdu_ts; + u32 rx_ampdu_ts; u32 ampdu_ref; struct mib_stats mib; @@ -191,16 +186,12 @@ struct mt7915_dev { u32 hw_pattern; - spinlock_t token_lock; - int token_count; - struct idr token; - - s8 **rate_power; /* TODO: use mt76_rate_power */ - bool dbdc_support; bool flash_mode; bool fw_debug; bool ibf; + + void *cal; }; enum { @@ -300,7 +291,7 @@ void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy); int mt7915_eeprom_get_target_power(struct mt7915_dev *dev, struct ieee80211_channel *chan, u8 chain_idx); -void mt7915_eeprom_init_sku(struct mt7915_dev *dev); +s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band); int mt7915_dma_init(struct mt7915_dev *dev); void mt7915_dma_prefetch(struct mt7915_dev *dev); void mt7915_dma_cleanup(struct mt7915_dev *dev); @@ -350,7 +341,7 @@ int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band); int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val); int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter); int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable); -int mt7915_mcu_set_sku(struct mt7915_phy *phy); +int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy); int mt7915_mcu_set_txbf_type(struct mt7915_dev *dev); int mt7915_mcu_set_txbf_module(struct mt7915_dev *dev); int mt7915_mcu_set_txbf_sounding(struct mt7915_dev *dev); @@ -359,6 +350,8 @@ int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev, const struct mt7915_dfs_pulse *pulse); int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index, const struct mt7915_dfs_pattern *pattern); +int mt7915_mcu_apply_group_cal(struct mt7915_dev *dev); +int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy); int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index); int mt7915_mcu_get_tx_rate(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx); int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif, @@ -394,80 +387,6 @@ static inline void mt7915_irq_disable(struct mt7915_dev *dev, u32 mask) mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0); } -static inline u32 -mt7915_reg_map_l1(struct mt7915_dev *dev, u32 addr) -{ - u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr); - u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr); - - mt76_rmw_field(dev, MT_HIF_REMAP_L1, MT_HIF_REMAP_L1_MASK, base); - /* use read to push write */ - mt76_rr(dev, MT_HIF_REMAP_L1); - - return MT_HIF_REMAP_BASE_L1 + offset; -} - -static inline u32 -mt7915_l1_rr(struct mt7915_dev *dev, u32 addr) -{ - return mt76_rr(dev, mt7915_reg_map_l1(dev, addr)); -} - -static inline void -mt7915_l1_wr(struct mt7915_dev *dev, u32 addr, u32 val) -{ - mt76_wr(dev, mt7915_reg_map_l1(dev, addr), val); -} - -static inline u32 -mt7915_l1_rmw(struct mt7915_dev *dev, u32 addr, u32 mask, u32 val) -{ - val |= mt7915_l1_rr(dev, addr) & ~mask; - mt7915_l1_wr(dev, addr, val); - - return val; -} - -#define mt7915_l1_set(dev, addr, val) mt7915_l1_rmw(dev, addr, 0, val) -#define mt7915_l1_clear(dev, addr, val) mt7915_l1_rmw(dev, addr, val, 0) - -static inline u32 -mt7915_reg_map_l2(struct mt7915_dev *dev, u32 addr) -{ - u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr); - u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr); - - mt76_rmw_field(dev, MT_HIF_REMAP_L2, MT_HIF_REMAP_L2_MASK, base); - /* use read to push write */ - mt76_rr(dev, MT_HIF_REMAP_L2); - - return MT_HIF_REMAP_BASE_L2 + offset; -} - -static inline u32 -mt7915_l2_rr(struct mt7915_dev *dev, u32 addr) -{ - return mt76_rr(dev, mt7915_reg_map_l2(dev, addr)); -} - -static inline void -mt7915_l2_wr(struct mt7915_dev *dev, u32 addr, u32 val) -{ - mt76_wr(dev, mt7915_reg_map_l2(dev, addr), val); -} - -static inline u32 -mt7915_l2_rmw(struct mt7915_dev *dev, u32 addr, u32 mask, u32 val) -{ - val |= mt7915_l2_rr(dev, addr) & ~mask; - mt7915_l2_wr(dev, addr, val); - - return val; -} - -#define mt7915_l2_set(dev, addr, val) mt7915_l2_rmw(dev, addr, 0, val) -#define mt7915_l2_clear(dev, addr, val) mt7915_l2_rmw(dev, addr, val, 0) - bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask); void mt7915_mac_reset_counters(struct mt7915_phy *phy); void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy); @@ -486,6 +405,7 @@ void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, void mt7915_mac_work(struct work_struct *work); void mt7915_mac_reset_work(struct work_struct *work); void mt7915_mac_sta_rc_work(struct work_struct *work); +int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq); int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, enum mt76_txq_id qid, struct mt76_wcid *wcid, struct ieee80211_sta *sta, diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c index 13880cc9c9e8..643f171884cf 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c @@ -154,28 +154,6 @@ static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance) return IRQ_HANDLED; } -static int -mt7915_alloc_device(struct pci_dev *pdev, struct mt7915_dev *dev) -{ -#define NUM_BANDS 2 - int i; - s8 **sku; - - sku = devm_kzalloc(&pdev->dev, NUM_BANDS * sizeof(*sku), GFP_KERNEL); - if (!sku) - return -ENOMEM; - - for (i = 0; i < NUM_BANDS; i++) { - sku[i] = devm_kzalloc(&pdev->dev, MT7915_SKU_TABLE_SIZE * - sizeof(**sku), GFP_KERNEL); - if (!sku[i]) - return -ENOMEM; - } - dev->rate_power = sku; - - return 0; -} - static void mt7915_pci_init_hif2(struct mt7915_dev *dev) { struct mt7915_hif *hif; @@ -201,7 +179,7 @@ static void mt7915_pci_init_hif2(struct mt7915_dev *dev) } /* master switch of PCIe tnterrupt enable */ - mt7915_l1_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff); + mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff); } static int mt7915_pci_hif2_probe(struct pci_dev *pdev) @@ -234,6 +212,7 @@ static int mt7915_pci_probe(struct pci_dev *pdev, .survey_flags = SURVEY_INFO_TIME_TX | SURVEY_INFO_TIME_RX | SURVEY_INFO_TIME_BSS_RX, + .token_size = MT7915_TOKEN_SIZE, .tx_prepare_skb = mt7915_tx_prepare_skb, .tx_complete_skb = mt7915_tx_complete_skb, .rx_skb = mt7915_queue_rx_skb, @@ -270,19 +249,13 @@ static int mt7915_pci_probe(struct pci_dev *pdev, return -ENOMEM; dev = container_of(mdev, struct mt7915_dev, mt76); - ret = mt7915_alloc_device(pdev, dev); + + ret = mt7915_mmio_init(mdev, pcim_iomap_table(pdev)[0], pdev->irq); if (ret) goto error; - mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]); - mdev->rev = (mt7915_l1_rr(dev, MT_HW_CHIPID) << 16) | - (mt7915_l1_rr(dev, MT_HW_REV) & 0xff); - dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev); - - mt76_wr(dev, MT_INT_MASK_CSR, 0); - /* master switch of PCIe tnterrupt enable */ - mt7915_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); ret = devm_request_irq(mdev->dev, pdev->irq, mt7915_irq_handler, IRQF_SHARED, KBUILD_MODNAME, dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h index ed0c9a24bb53..efe0f2904c66 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h @@ -4,6 +4,11 @@ #ifndef __MT7915_REGS_H #define __MT7915_REGS_H +/* MCU WFDMA0 */ +#define MT_MCU_WFDMA0_BASE 0x2000 +#define MT_MCU_WFDMA0(ofs) (MT_MCU_WFDMA0_BASE + (ofs)) +#define MT_MCU_WFDMA0_DUMMY_CR MT_MCU_WFDMA0(0x120) + /* MCU WFDMA1 */ #define MT_MCU_WFDMA1_BASE 0x3000 #define MT_MCU_WFDMA1(ofs) (MT_MCU_WFDMA1_BASE + (ofs)) @@ -77,6 +82,11 @@ #define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17) #define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18) +#define MT_TMAC_FP0R0(_band) MT_WF_TMAC(_band, 0x020) +#define MT_TMAC_FP0R15(_band) MT_WF_TMAC(_band, 0x080) +#define MT_TMAC_FP0R18(_band) MT_WF_TMAC(_band, 0x270) +#define MT_TMAC_FP_MASK GENMASK(7, 0) + #define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, 0x1e0) #define MT_WF_DMA_BASE(_band) ((_band) ? 0xa1e00 : 0x21e00) @@ -396,6 +406,14 @@ #define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1) #define MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO BIT(2) +#define MT_TOP_RGU_BASE 0xf0000 +#define MT_TOP_PWR_CTRL (MT_TOP_RGU_BASE + (0x0)) +#define MT_TOP_PWR_KEY (0x5746 << 16) +#define MT_TOP_PWR_SW_RST BIT(0) +#define MT_TOP_PWR_SW_PWR_ON GENMASK(3, 2) +#define MT_TOP_PWR_HW_CTRL BIT(4) +#define MT_TOP_PWR_PWR_ON BIT(7) + #define MT_INFRA_CFG_BASE 0xf1000 #define MT_INFRA(ofs) (MT_INFRA_CFG_BASE + (ofs)) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c index bd798df748ba..f9d81e36ef09 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c @@ -257,13 +257,13 @@ mt7915_tm_set_tx_len(struct mt7915_phy *phy, u32 tx_time) { struct mt76_phy *mphy = phy->mt76; struct mt76_testmode_data *td = &mphy->test; - struct sk_buff *old = td->tx_skb, *new; struct ieee80211_supported_band *sband; struct rate_info rate = {}; u16 flags = 0, tx_len; u32 bitrate; + int ret; - if (!tx_time || !old) + if (!tx_time) return 0; rate.mcs = td->tx_rate_idx; @@ -323,21 +323,9 @@ mt7915_tm_set_tx_len(struct mt7915_phy *phy, u32 tx_time) bitrate = cfg80211_calculate_bitrate(&rate); tx_len = bitrate * tx_time / 10 / 8; - if (tx_len < sizeof(struct ieee80211_hdr)) - tx_len = sizeof(struct ieee80211_hdr); - else if (tx_len > IEEE80211_MAX_FRAME_LEN) - tx_len = IEEE80211_MAX_FRAME_LEN; - - new = alloc_skb(tx_len, GFP_KERNEL); - if (!new) - return -ENOMEM; - - skb_copy_header(new, old); - __skb_put_zero(new, tx_len); - memcpy(new->data, old->data, sizeof(struct ieee80211_hdr)); - - dev_kfree_skb(old); - td->tx_skb = new; + ret = mt76_testmode_alloc_skb(phy->mt76, tx_len); + if (ret) + return ret; return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/Makefile b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile index 09d1446ad933..e531666f9fb4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/Makefile +++ b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile @@ -2,4 +2,6 @@ obj-$(CONFIG_MT7921E) += mt7921e.o -mt7921e-y := pci.o mac.o mcu.o dma.o eeprom.o main.o init.o debugfs.o +CFLAGS_trace.o := -I$(src) + +mt7921e-y := pci.o mac.o mcu.o dma.o eeprom.o main.o init.o debugfs.o trace.o diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c index 0dc8e25e18e4..6ee423dd4027 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c @@ -9,10 +9,13 @@ mt7921_fw_debug_set(void *data, u64 val) { struct mt7921_dev *dev = data; - dev->fw_debug = (u8)val; + mt7921_mutex_acquire(dev); + dev->fw_debug = (u8)val; mt7921_mcu_fw_log_2_host(dev, dev->fw_debug); + mt7921_mutex_release(dev); + return 0; } @@ -44,14 +47,13 @@ mt7921_ampdu_stat_read_phy(struct mt7921_phy *phy, range[i] = mt76_rr(dev, MT_MIB_ARNG(0, i)); for (i = 0; i < ARRAY_SIZE(bound); i++) - bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i) + 1; + bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1; seq_printf(file, "\nPhy0\n"); seq_printf(file, "Length: %8d | ", bound[0]); for (i = 0; i < ARRAY_SIZE(bound) - 1; i++) - seq_printf(file, "%3d -%3d | ", - bound[i] + 1, bound[i + 1]); + seq_printf(file, "%3d %3d | ", bound[i] + 1, bound[i + 1]); seq_puts(file, "\nCount: "); for (i = 0; i < ARRAY_SIZE(bound); i++) @@ -62,7 +64,7 @@ mt7921_ampdu_stat_read_phy(struct mt7921_phy *phy, } static int -mt7921_tx_stats_read(struct seq_file *file, void *data) +mt7921_tx_stats_show(struct seq_file *file, void *data) { struct mt7921_dev *dev = file->private; int stat[8], i, n; @@ -88,19 +90,7 @@ mt7921_tx_stats_read(struct seq_file *file, void *data) return 0; } -static int -mt7921_tx_stats_open(struct inode *inode, struct file *f) -{ - return single_open(f, mt7921_tx_stats_read, inode->i_private); -} - -static const struct file_operations fops_tx_stats = { - .open = mt7921_tx_stats_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .owner = THIS_MODULE, -}; +DEFINE_SHOW_ATTRIBUTE(mt7921_tx_stats); static int mt7921_queues_acq(struct seq_file *s, void *data) @@ -159,23 +149,107 @@ mt7921_queues_read(struct seq_file *s, void *data) return 0; } +static void +mt7921_seq_puts_array(struct seq_file *file, const char *str, + s8 *val, int len) +{ + int i; + + seq_printf(file, "%-16s:", str); + for (i = 0; i < len; i++) + if (val[i] == 127) + seq_printf(file, " %6s", "N.A"); + else + seq_printf(file, " %6d", val[i]); + seq_puts(file, "\n"); +} + +#define mt7921_print_txpwr_entry(prefix, rate) \ +({ \ + mt7921_seq_puts_array(s, #prefix " (user)", \ + txpwr.data[TXPWR_USER].rate, \ + ARRAY_SIZE(txpwr.data[TXPWR_USER].rate)); \ + mt7921_seq_puts_array(s, #prefix " (eeprom)", \ + txpwr.data[TXPWR_EEPROM].rate, \ + ARRAY_SIZE(txpwr.data[TXPWR_EEPROM].rate)); \ + mt7921_seq_puts_array(s, #prefix " (tmac)", \ + txpwr.data[TXPWR_MAC].rate, \ + ARRAY_SIZE(txpwr.data[TXPWR_MAC].rate)); \ +}) + +static int +mt7921_txpwr(struct seq_file *s, void *data) +{ + struct mt7921_dev *dev = dev_get_drvdata(s->private); + struct mt7921_txpwr txpwr; + int ret; + + ret = mt7921_get_txpwr_info(dev, &txpwr); + if (ret) + return ret; + + seq_printf(s, "Tx power table (channel %d)\n", txpwr.ch); + seq_printf(s, "%-16s %6s %6s %6s %6s\n", + " ", "1m", "2m", "5m", "11m"); + mt7921_print_txpwr_entry(CCK, cck); + + seq_printf(s, "%-16s %6s %6s %6s %6s %6s %6s %6s %6s\n", + " ", "6m", "9m", "12m", "18m", "24m", "36m", + "48m", "54m"); + mt7921_print_txpwr_entry(OFDM, ofdm); + + seq_printf(s, "%-16s %6s %6s %6s %6s %6s %6s %6s %6s\n", + " ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5", + "mcs6", "mcs7"); + mt7921_print_txpwr_entry(HT20, ht20); + + seq_printf(s, "%-16s %6s %6s %6s %6s %6s %6s %6s %6s %6s\n", + " ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5", + "mcs6", "mcs7", "mcs32"); + mt7921_print_txpwr_entry(HT40, ht40); + + seq_printf(s, "%-16s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s\n", + " ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5", + "mcs6", "mcs7", "mcs8", "mcs9", "mcs10", "mcs11"); + mt7921_print_txpwr_entry(VHT20, vht20); + mt7921_print_txpwr_entry(VHT40, vht40); + mt7921_print_txpwr_entry(VHT80, vht80); + mt7921_print_txpwr_entry(VHT160, vht160); + mt7921_print_txpwr_entry(HE26, he26); + mt7921_print_txpwr_entry(HE52, he52); + mt7921_print_txpwr_entry(HE106, he106); + mt7921_print_txpwr_entry(HE242, he242); + mt7921_print_txpwr_entry(HE484, he484); + mt7921_print_txpwr_entry(HE996, he996); + mt7921_print_txpwr_entry(HE996x2, he996x2); + + return 0; +} + static int mt7921_pm_set(void *data, u64 val) { struct mt7921_dev *dev = data; + struct mt76_connac_pm *pm = &dev->pm; struct mt76_phy *mphy = dev->phy.mt76; - int ret = 0; + + if (val == pm->enable) + return 0; mt7921_mutex_acquire(dev); - dev->pm.enable = val; + if (!pm->enable) { + pm->stats.last_wake_event = jiffies; + pm->stats.last_doze_event = jiffies; + } + pm->enable = val; ieee80211_iterate_active_interfaces(mphy->hw, IEEE80211_IFACE_ITER_RESUME_ALL, mt7921_pm_interface_iter, mphy->priv); mt7921_mutex_release(dev); - return ret; + return 0; } static int @@ -191,6 +265,29 @@ mt7921_pm_get(void *data, u64 *val) DEFINE_DEBUGFS_ATTRIBUTE(fops_pm, mt7921_pm_get, mt7921_pm_set, "%lld\n"); static int +mt7921_pm_stats(struct seq_file *s, void *data) +{ + struct mt7921_dev *dev = dev_get_drvdata(s->private); + struct mt76_connac_pm *pm = &dev->pm; + + unsigned long awake_time = pm->stats.awake_time; + unsigned long doze_time = pm->stats.doze_time; + + if (!test_bit(MT76_STATE_PM, &dev->mphy.state)) + awake_time += jiffies - pm->stats.last_wake_event; + else + doze_time += jiffies - pm->stats.last_doze_event; + + seq_printf(s, "awake time: %14u\ndoze time: %15u\n", + jiffies_to_msecs(awake_time), + jiffies_to_msecs(doze_time)); + + seq_printf(s, "low power wakes: %9d\n", pm->stats.lp_wake); + + return 0; +} + +static int mt7921_pm_idle_timeout_set(void *data, u64 val) { struct mt7921_dev *dev = data; @@ -213,19 +310,28 @@ mt7921_pm_idle_timeout_get(void *data, u64 *val) DEFINE_DEBUGFS_ATTRIBUTE(fops_pm_idle_timeout, mt7921_pm_idle_timeout_get, mt7921_pm_idle_timeout_set, "%lld\n"); -static int mt7921_config(void *data, u64 val) +static int mt7921_chip_reset(void *data, u64 val) { struct mt7921_dev *dev = data; - int ret; + int ret = 0; - mt7921_mutex_acquire(dev); - ret = mt76_connac_mcu_chip_config(&dev->mt76); - mt7921_mutex_release(dev); + switch (val) { + case 1: + /* Reset wifisys directly. */ + mt7921_reset(&dev->mt76); + break; + default: + /* Collect the core dump before reset wifisys. */ + mt7921_mutex_acquire(dev); + ret = mt76_connac_mcu_chip_config(&dev->mt76); + mt7921_mutex_release(dev); + break; + } return ret; } -DEFINE_DEBUGFS_ATTRIBUTE(fops_config, NULL, mt7921_config, "%lld\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_reset, NULL, mt7921_chip_reset, "%lld\n"); int mt7921_init_debugfs(struct mt7921_dev *dev) { @@ -239,12 +345,16 @@ int mt7921_init_debugfs(struct mt7921_dev *dev) mt7921_queues_read); debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir, mt7921_queues_acq); - debugfs_create_file("tx_stats", 0400, dir, dev, &fops_tx_stats); + debugfs_create_devm_seqfile(dev->mt76.dev, "txpower_sku", dir, + mt7921_txpwr); + debugfs_create_file("tx_stats", 0400, dir, dev, &mt7921_tx_stats_fops); debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug); debugfs_create_file("runtime-pm", 0600, dir, dev, &fops_pm); debugfs_create_file("idle-timeout", 0600, dir, dev, &fops_pm_idle_timeout); - debugfs_create_file("chip_config", 0600, dir, dev, &fops_config); + debugfs_create_file("chip_reset", 0600, dir, dev, &fops_reset); + debugfs_create_devm_seqfile(dev->mt76.dev, "runtime_pm_stats", dir, + mt7921_pm_stats); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c index cd9665610284..71e664ee7652 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c @@ -53,8 +53,7 @@ void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, } } -static void -mt7921_tx_cleanup(struct mt7921_dev *dev) +void mt7921_tx_cleanup(struct mt7921_dev *dev) { mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], false); @@ -66,15 +65,39 @@ static int mt7921_poll_tx(struct napi_struct *napi, int budget) dev = container_of(napi, struct mt7921_dev, mt76.tx_napi); - mt7921_tx_cleanup(dev); + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { + napi_complete(napi); + queue_work(dev->mt76.wq, &dev->pm.wake_work); + return 0; + } - if (napi_complete_done(napi, 0)) + mt7921_tx_cleanup(dev); + if (napi_complete(napi)) mt7921_irq_enable(dev, MT_INT_TX_DONE_ALL); + mt76_connac_pm_unref(&dev->pm); return 0; } -void mt7921_dma_prefetch(struct mt7921_dev *dev) +static int mt7921_poll_rx(struct napi_struct *napi, int budget) +{ + struct mt7921_dev *dev; + int done; + + dev = container_of(napi->dev, struct mt7921_dev, mt76.napi_dev); + + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { + napi_complete(napi); + queue_work(dev->mt76.wq, &dev->pm.wake_work); + return 0; + } + done = mt76_dma_rx_poll(napi, budget); + mt76_connac_pm_unref(&dev->pm); + + return done; +} + +static void mt7921_dma_prefetch(struct mt7921_dev *dev) { #define PREFETCH(base, depth) ((base) << 16 | (depth)) @@ -198,11 +221,160 @@ static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val) return dev->bus_ops->rmw(mdev, addr, mask, val); } -static int mt7921_dmashdl_disabled(struct mt7921_dev *dev) +static int mt7921_dma_disable(struct mt7921_dev *dev, bool force) { - mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0, MT_WFDMA0_CSR_TX_DMASHDL_ENABLE); + if (force) { + /* reset */ + mt76_clear(dev, MT_WFDMA0_RST, + MT_WFDMA0_RST_DMASHDL_ALL_RST | + MT_WFDMA0_RST_LOGIC_RST); + + mt76_set(dev, MT_WFDMA0_RST, + MT_WFDMA0_RST_DMASHDL_ALL_RST | + MT_WFDMA0_RST_LOGIC_RST); + } + + /* disable dmashdl */ + mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0, + MT_WFDMA0_CSR_TX_DMASHDL_ENABLE); mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS); + /* disable WFDMA0 */ + mt76_clear(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN | + MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN | + MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | + MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | + MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); + + if (!mt76_poll(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_DMA_BUSY | + MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000)) + return -ETIMEDOUT; + + return 0; +} + +static int mt7921_dma_enable(struct mt7921_dev *dev) +{ + /* configure perfetch settings */ + mt7921_dma_prefetch(dev); + + /* reset dma idx */ + mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0); + + /* configure delay interrupt */ + mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0); + + mt76_set(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_WB_DDONE | + MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN | + MT_WFDMA0_GLO_CFG_CLK_GAT_DIS | + MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | + MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN | + MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); + + mt76_set(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); + + mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT); + + /* enable interrupts for TX/RX rings */ + mt7921_irq_enable(dev, + MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | + MT_INT_MCU_CMD); + mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE); + + return 0; +} + +static int mt7921_dma_reset(struct mt7921_dev *dev, bool force) +{ + int i, err; + + err = mt7921_dma_disable(dev, force); + if (err) + return err; + + /* reset hw queues */ + for (i = 0; i < __MT_TXQ_MAX; i++) + mt76_queue_reset(dev, dev->mphy.q_tx[i]); + + for (i = 0; i < __MT_MCUQ_MAX; i++) + mt76_queue_reset(dev, dev->mt76.q_mcu[i]); + + mt76_for_each_q_rx(&dev->mt76, i) + mt76_queue_reset(dev, &dev->mt76.q_rx[i]); + + mt76_tx_status_check(&dev->mt76, NULL, true); + + return mt7921_dma_enable(dev); +} + +int mt7921_wfsys_reset(struct mt7921_dev *dev) +{ + mt76_set(dev, 0x70002600, BIT(0)); + msleep(200); + mt76_clear(dev, 0x70002600, BIT(0)); + + if (!__mt76_poll_msec(&dev->mt76, MT_WFSYS_SW_RST_B, + WFSYS_SW_INIT_DONE, WFSYS_SW_INIT_DONE, 500)) + return -ETIMEDOUT; + + return 0; +} + +int mt7921_wpdma_reset(struct mt7921_dev *dev, bool force) +{ + int i, err; + + /* clean up hw queues */ + for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++) + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); + + for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++) + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); + + mt76_for_each_q_rx(&dev->mt76, i) + mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]); + + if (force) { + err = mt7921_wfsys_reset(dev); + if (err) + return err; + } + err = mt7921_dma_reset(dev, force); + if (err) + return err; + + mt76_for_each_q_rx(&dev->mt76, i) + mt76_queue_rx_reset(dev, i); + + return 0; +} + +int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev) +{ + struct mt76_connac_pm *pm = &dev->pm; + int err; + + /* check if the wpdma must be reinitialized */ + if (mt7921_dma_need_reinit(dev)) { + /* disable interrutpts */ + mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); + + err = mt7921_wpdma_reset(dev, false); + if (err) { + dev_err(dev->mt76.dev, "wpdma reset failed\n"); + return err; + } + + /* enable interrutpts */ + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); + pm->stats.lp_wake++; + } + return 0; } @@ -226,32 +398,10 @@ int mt7921_dma_init(struct mt7921_dev *dev) mt76_dma_attach(&dev->mt76); - /* reset */ - mt76_clear(dev, MT_WFDMA0_RST, - MT_WFDMA0_RST_DMASHDL_ALL_RST | - MT_WFDMA0_RST_LOGIC_RST); - - mt76_set(dev, MT_WFDMA0_RST, - MT_WFDMA0_RST_DMASHDL_ALL_RST | - MT_WFDMA0_RST_LOGIC_RST); - - ret = mt7921_dmashdl_disabled(dev); + ret = mt7921_dma_disable(dev, true); if (ret) return ret; - /* disable WFDMA0 */ - mt76_clear(dev, MT_WFDMA0_GLO_CFG, - MT_WFDMA0_GLO_CFG_TX_DMA_EN | - MT_WFDMA0_GLO_CFG_RX_DMA_EN | - MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN | - MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | - MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | - MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); - - mt76_poll(dev, MT_WFDMA0_GLO_CFG, - MT_WFDMA0_GLO_CFG_TX_DMA_BUSY | - MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000); - /* init tx queue */ ret = mt7921_init_tx_queues(&dev->phy, MT7921_TXQ_BAND0, MT7921_TX_RING_SIZE); @@ -295,41 +445,15 @@ int mt7921_dma_init(struct mt7921_dev *dev) if (ret) return ret; - ret = mt76_init_queues(dev); + ret = mt76_init_queues(dev, mt7921_poll_rx); if (ret < 0) return ret; - netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi, + netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, mt7921_poll_tx, NAPI_POLL_WEIGHT); napi_enable(&dev->mt76.tx_napi); - /* configure perfetch settings */ - mt7921_dma_prefetch(dev); - - /* reset dma idx */ - mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0); - - /* configure delay interrupt */ - mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0); - - mt76_set(dev, MT_WFDMA0_GLO_CFG, - MT_WFDMA0_GLO_CFG_TX_WB_DDONE | - MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN | - MT_WFDMA0_GLO_CFG_CLK_GAT_DIS | - MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | - MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN | - MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); - - mt76_set(dev, MT_WFDMA0_GLO_CFG, - MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); - - mt76_set(dev, 0x54000120, BIT(1)); - - /* enable interrupts for TX/RX rings */ - mt7921_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | - MT_INT_MCU_CMD); - - return 0; + return mt7921_dma_enable(dev); } void mt7921_dma_cleanup(struct mt7921_dev *dev) diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c index 89a13b4a74a4..fe28bf4050c4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c @@ -58,12 +58,14 @@ mt7921_regd_notifier(struct wiphy *wiphy, { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt7921_phy *phy = mt7921_hw_phy(hw); memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2)); dev->mt76.region = request->dfs_region; mt7921_mutex_acquire(dev); mt76_connac_mcu_set_channel_domain(hw->priv); + mt76_connac_mcu_set_rate_txpower(phy->mt76); mt7921_mutex_release(dev); } @@ -77,6 +79,9 @@ mt7921_init_wiphy(struct ieee80211_hw *hw) hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; + hw->radiotap_timestamp.units_pos = + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US; + phy->slottime = 9; hw->sta_data_size = sizeof(struct mt7921_sta); @@ -95,6 +100,7 @@ mt7921_init_wiphy(struct ieee80211_hw *hw) wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; wiphy->reg_notifier = mt7921_regd_notifier; + wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL); ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); @@ -142,7 +148,7 @@ mt7921_mac_init_band(struct mt7921_dev *dev, u8 band) mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN); } -static void mt7921_mac_init(struct mt7921_dev *dev) +void mt7921_mac_init(struct mt7921_dev *dev) { int i; @@ -160,23 +166,10 @@ static void mt7921_mac_init(struct mt7921_dev *dev) mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b, 0); } -static void mt7921_init_work(struct work_struct *work) -{ - struct mt7921_dev *dev = container_of(work, struct mt7921_dev, - init_work); - - mt7921_mcu_set_eeprom(dev); - mt7921_mac_init(dev); -} - static int mt7921_init_hardware(struct mt7921_dev *dev) { int ret, idx; - INIT_WORK(&dev->init_work, mt7921_init_work); - spin_lock_init(&dev->token_lock); - idr_init(&dev->token); - ret = mt7921_dma_init(dev); if (ret) return ret; @@ -196,6 +189,10 @@ static int mt7921_init_hardware(struct mt7921_dev *dev) if (ret < 0) return ret; + ret = mt7921_mcu_set_eeprom(dev); + if (ret) + return ret; + /* Beacon and mgmt frames should occupy wcid 0 */ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7921_WTBL_STA - 1); if (idx) @@ -206,6 +203,8 @@ static int mt7921_init_hardware(struct mt7921_dev *dev) dev->mt76.global_wcid.tx_info |= MT_WCID_TX_INFO_SET; rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid); + mt7921_mac_init(dev); + return 0; } @@ -217,10 +216,13 @@ int mt7921_register_device(struct mt7921_dev *dev) dev->phy.dev = dev; dev->phy.mt76 = &dev->mt76.phy; dev->mt76.phy.priv = &dev->phy; + dev->mt76.tx_worker.fn = mt7921_tx_worker; INIT_DELAYED_WORK(&dev->pm.ps_work, mt7921_pm_power_save_work); INIT_WORK(&dev->pm.wake_work, mt7921_pm_wake_work); - init_completion(&dev->pm.wake_cmpl); + spin_lock_init(&dev->pm.wake.lock); + mutex_init(&dev->pm.mutex); + init_waitqueue_head(&dev->pm.wait); spin_lock_init(&dev->pm.txq_lock); set_bit(MT76_STATE_PM, &dev->mphy.state); INIT_LIST_HEAD(&dev->phy.stats_list); @@ -232,15 +234,17 @@ int mt7921_register_device(struct mt7921_dev *dev) INIT_LIST_HEAD(&dev->sta_poll_list); spin_lock_init(&dev->sta_poll_lock); - init_waitqueue_head(&dev->reset_wait); INIT_WORK(&dev->reset_work, mt7921_mac_reset_work); + dev->pm.idle_timeout = MT7921_PM_TIMEOUT; + dev->pm.stats.last_wake_event = jiffies; + dev->pm.stats.last_doze_event = jiffies; + ret = mt7921_init_hardware(dev); if (ret) return ret; mt7921_init_wiphy(hw); - dev->pm.idle_timeout = MT7921_PM_TIMEOUT; dev->mphy.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING | IEEE80211_HT_CAP_MAX_AMSDU; @@ -250,9 +254,6 @@ int mt7921_register_device(struct mt7921_dev *dev) dev->mphy.sband_5g.sband.vht_cap.cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; - dev->mphy.sband_5g.sband.vht_cap.cap |= - IEEE80211_VHT_CAP_SHORT_GI_160 | - IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ; dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask; dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask; @@ -264,18 +265,15 @@ int mt7921_register_device(struct mt7921_dev *dev) if (ret) return ret; - ieee80211_queue_work(mt76_hw(dev), &dev->init_work); - return mt7921_init_debugfs(dev); } void mt7921_unregister_device(struct mt7921_dev *dev) { mt76_unregister_device(&dev->mt76); - mt7921_mcu_exit(dev); - mt7921_dma_cleanup(dev); - mt7921_tx_token_put(dev); + mt7921_dma_cleanup(dev); + mt7921_mcu_exit(dev); tasklet_disable(&dev->irq_tasklet); mt76_free_device(&dev->mt76); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c index 3f9097481a5e..214bd1859792 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c @@ -9,8 +9,6 @@ #include "mac.h" #include "mcu.h" -#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2) - #define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f) #define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\ IEEE80211_RADIOTAP_HE_##f) @@ -51,14 +49,6 @@ bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask) 0, 5000); } -static u32 mt7921_mac_wtbl_lmac_addr(struct mt7921_dev *dev, u16 wcid) -{ - mt76_wr(dev, MT_WTBLON_TOP_WDUCR, - FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7))); - - return MT_WTBL_LMAC_OFFS(wcid, 0); -} - static void mt7921_mac_sta_poll(struct mt7921_dev *dev) { static const u8 ac_to_tid[] = { @@ -95,7 +85,7 @@ static void mt7921_mac_sta_poll(struct mt7921_dev *dev) spin_unlock_bh(&dev->sta_poll_lock); idx = msta->wcid.idx; - addr = mt7921_mac_wtbl_lmac_addr(dev, idx) + 20 * 4; + addr = MT_WTBL_LMAC_OFFS(idx, 0) + 20 * 4; for (i = 0; i < IEEE80211_NUM_ACS; i++) { u32 tx_last = msta->airtime_ac[i]; @@ -285,6 +275,37 @@ mt7921_get_status_freq_info(struct mt7921_dev *dev, struct mt76_phy *mphy, status->freq = ieee80211_channel_to_frequency(chfreq, status->band); } +static void +mt7921_mac_rssi_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct sk_buff *skb = priv; + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); + + if (status->signal > 0) + return; + + if (!ether_addr_equal(vif->addr, hdr->addr1)) + return; + + ewma_rssi_add(&mvif->rssi, -status->signal); +} + +static void +mt7921_mac_assoc_rssi(struct mt7921_dev *dev, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); + + if (!ieee80211_is_assoc_resp(hdr->frame_control) && + !ieee80211_is_auth(hdr->frame_control)) + return; + + ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7921_mac_rssi_iter, skb); +} + int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) { struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; @@ -349,19 +370,6 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; } - if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { - status->flag |= RX_FLAG_AMPDU_DETAILS; - - /* all subframes of an A-MPDU have the same timestamp */ - if (phy->rx_ampdu_ts != rxd[14]) { - if (!++phy->ampdu_ref) - phy->ampdu_ref++; - } - phy->rx_ampdu_ts = rxd[14]; - - status->ampdu_ref = phy->ampdu_ref; - } - remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) @@ -393,6 +401,22 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) } if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { + status->timestamp = le32_to_cpu(rxd[0]); + status->flag |= RX_FLAG_MACTIME_START; + + if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { + status->flag |= RX_FLAG_AMPDU_DETAILS; + + /* all subframes of an A-MPDU have the same timestamp */ + if (phy->rx_ampdu_ts != status->timestamp) { + if (!++phy->ampdu_ref) + phy->ampdu_ref++; + } + phy->rx_ampdu_ts = status->timestamp; + + status->ampdu_ref = phy->ampdu_ref; + } + rxd += 2; if ((u8 *)rxd - skb->data >= skb->len) return -EINVAL; @@ -400,7 +424,9 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) /* RXD Group 3 - P-RXV */ if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { - u32 v0, v1, v2; + u8 stbc, gi; + u32 v0, v1; + bool cck; rxv = rxd; rxd += 2; @@ -409,7 +435,6 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) v0 = le32_to_cpu(rxv[0]); v1 = le32_to_cpu(rxv[1]); - v2 = le32_to_cpu(rxv[2]); if (v0 & MT_PRXV_HT_AD_CODE) status->enc_flags |= RX_ENC_FLAG_LDPC; @@ -429,87 +454,87 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) status->chain_signal[i]); } - /* RXD Group 5 - C-RXV */ - if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { - u8 stbc = FIELD_GET(MT_CRXV_HT_STBC, v2); - u8 gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2); - bool cck = false; + stbc = FIELD_GET(MT_PRXV_STBC, v0); + gi = FIELD_GET(MT_PRXV_SGI, v0); + cck = false; - rxd += 18; - if ((u8 *)rxd - skb->data >= skb->len) - return -EINVAL; + idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0); + mode = FIELD_GET(MT_PRXV_TX_MODE, v0); - idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0); - mode = FIELD_GET(MT_CRXV_TX_MODE, v2); - - switch (mode) { - case MT_PHY_TYPE_CCK: - cck = true; - fallthrough; - case MT_PHY_TYPE_OFDM: - i = mt76_get_rate(&dev->mt76, sband, i, cck); - break; - case MT_PHY_TYPE_HT_GF: - case MT_PHY_TYPE_HT: - status->encoding = RX_ENC_HT; - if (i > 31) - return -EINVAL; - break; - case MT_PHY_TYPE_VHT: - status->nss = - FIELD_GET(MT_PRXV_NSTS, v0) + 1; - status->encoding = RX_ENC_VHT; - if (i > 9) - return -EINVAL; - break; - case MT_PHY_TYPE_HE_MU: - status->flag |= RX_FLAG_RADIOTAP_HE_MU; - fallthrough; - case MT_PHY_TYPE_HE_SU: - case MT_PHY_TYPE_HE_EXT_SU: - case MT_PHY_TYPE_HE_TB: - status->nss = - FIELD_GET(MT_PRXV_NSTS, v0) + 1; - status->encoding = RX_ENC_HE; - status->flag |= RX_FLAG_RADIOTAP_HE; - i &= GENMASK(3, 0); - - if (gi <= NL80211_RATE_INFO_HE_GI_3_2) - status->he_gi = gi; - - status->he_dcm = !!(idx & MT_PRXV_TX_DCM); - break; - default: + switch (mode) { + case MT_PHY_TYPE_CCK: + cck = true; + fallthrough; + case MT_PHY_TYPE_OFDM: + i = mt76_get_rate(&dev->mt76, sband, i, cck); + break; + case MT_PHY_TYPE_HT_GF: + case MT_PHY_TYPE_HT: + status->encoding = RX_ENC_HT; + if (i > 31) return -EINVAL; - } - status->rate_idx = i; - - switch (FIELD_GET(MT_CRXV_FRAME_MODE, v2)) { - case IEEE80211_STA_RX_BW_20: - break; - case IEEE80211_STA_RX_BW_40: - if (mode & MT_PHY_TYPE_HE_EXT_SU && - (idx & MT_PRXV_TX_ER_SU_106T)) { - status->bw = RATE_INFO_BW_HE_RU; - status->he_ru = - NL80211_RATE_INFO_HE_RU_ALLOC_106; - } else { - status->bw = RATE_INFO_BW_40; - } - break; - case IEEE80211_STA_RX_BW_80: - status->bw = RATE_INFO_BW_80; - break; - case IEEE80211_STA_RX_BW_160: - status->bw = RATE_INFO_BW_160; - break; - default: + break; + case MT_PHY_TYPE_VHT: + status->nss = + FIELD_GET(MT_PRXV_NSTS, v0) + 1; + status->encoding = RX_ENC_VHT; + if (i > 9) return -EINVAL; + break; + case MT_PHY_TYPE_HE_MU: + status->flag |= RX_FLAG_RADIOTAP_HE_MU; + fallthrough; + case MT_PHY_TYPE_HE_SU: + case MT_PHY_TYPE_HE_EXT_SU: + case MT_PHY_TYPE_HE_TB: + status->nss = + FIELD_GET(MT_PRXV_NSTS, v0) + 1; + status->encoding = RX_ENC_HE; + status->flag |= RX_FLAG_RADIOTAP_HE; + i &= GENMASK(3, 0); + + if (gi <= NL80211_RATE_INFO_HE_GI_3_2) + status->he_gi = gi; + + status->he_dcm = !!(idx & MT_PRXV_TX_DCM); + break; + default: + return -EINVAL; + } + + status->rate_idx = i; + + switch (FIELD_GET(MT_PRXV_FRAME_MODE, v0)) { + case IEEE80211_STA_RX_BW_20: + break; + case IEEE80211_STA_RX_BW_40: + if (mode & MT_PHY_TYPE_HE_EXT_SU && + (idx & MT_PRXV_TX_ER_SU_106T)) { + status->bw = RATE_INFO_BW_HE_RU; + status->he_ru = + NL80211_RATE_INFO_HE_RU_ALLOC_106; + } else { + status->bw = RATE_INFO_BW_40; } + break; + case IEEE80211_STA_RX_BW_80: + status->bw = RATE_INFO_BW_80; + break; + case IEEE80211_STA_RX_BW_160: + status->bw = RATE_INFO_BW_160; + break; + default: + return -EINVAL; + } - status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; - if (mode < MT_PHY_TYPE_HE_SU && gi) - status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; + if (mode < MT_PHY_TYPE_HE_SU && gi) + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + + if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { + rxd += 18; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; } } @@ -521,6 +546,8 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) mt76_insert_ccmp_hdr(skb, key_id); } + mt7921_mac_assoc_rssi(dev, skb); + if (rxv && status->flag & RX_FLAG_RADIOTAP_HE) mt7921_mac_decode_he_radiotap(skb, status, rxv, mode); @@ -530,7 +557,7 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) status->aggr = unicast && !ieee80211_is_qos_nullfunc(hdr->frame_control); - status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + status->qos_ctl = *ieee80211_get_qos_ctl(hdr); status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); return 0; @@ -758,20 +785,6 @@ mt7921_write_hw_txp(struct mt7921_dev *dev, struct mt76_tx_info *tx_info, } } -static void mt7921_set_tx_blocked(struct mt7921_dev *dev, bool blocked) -{ - struct mt76_phy *mphy = &dev->mphy; - struct mt76_queue *q; - - q = mphy->q_tx[0]; - if (blocked == q->blocked) - return; - - q->blocked = blocked; - if (!blocked) - mt76_worker_schedule(&dev->mt76.tx_worker); -} - int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, enum mt76_txq_id qid, struct mt76_wcid *wcid, struct ieee80211_sta *sta, @@ -797,15 +810,7 @@ int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); t->skb = tx_info->skb; - spin_lock_bh(&dev->token_lock); - id = idr_alloc(&dev->token, t, 0, MT7921_TOKEN_SIZE, GFP_ATOMIC); - if (id >= 0) - dev->token_count++; - - if (dev->token_count >= MT7921_TOKEN_SIZE - MT7921_TOKEN_FREE_THR) - mt7921_set_tx_blocked(dev, true); - spin_unlock_bh(&dev->token_lock); - + id = mt76_token_consume(mdev, &t); if (id < 0) return id; @@ -967,15 +972,7 @@ void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb) msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info); stat = FIELD_GET(MT_TX_FREE_STATUS, info); - spin_lock_bh(&dev->token_lock); - txwi = idr_remove(&dev->token, msdu); - if (txwi) - dev->token_count--; - if (dev->token_count < MT7921_TOKEN_SIZE - MT7921_TOKEN_FREE_THR && - dev->mphy.q_tx[0]->blocked) - wake = true; - spin_unlock_bh(&dev->token_lock); - + txwi = mt76_token_release(mdev, msdu, &wake); if (!txwi) continue; @@ -1003,11 +1000,8 @@ void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb) mt76_put_txwi(mdev, txwi); } - if (wake) { - spin_lock_bh(&dev->token_lock); - mt7921_set_tx_blocked(dev, false); - spin_unlock_bh(&dev->token_lock); - } + if (wake) + mt76_set_tx_blocked(&dev->mt76, false); napi_consume_skb(skb, 1); @@ -1016,13 +1010,7 @@ void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb) napi_consume_skb(skb, 1); } - if (test_bit(MT76_STATE_PM, &dev->phy.mt76->state)) - return; - mt7921_mac_sta_poll(dev); - - mt76_connac_power_save_sched(&dev->mphy, &dev->pm); - mt76_worker_schedule(&dev->mt76.tx_worker); } @@ -1044,11 +1032,8 @@ void mt7921_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) u16 token; txp = mt7921_txwi_to_txp(mdev, e->txwi); - token = le16_to_cpu(txp->hw.msdu_id[0]) & ~MT_MSDU_ID_VALID; - spin_lock_bh(&dev->token_lock); - t = idr_remove(&dev->token, token); - spin_unlock_bh(&dev->token_lock); + t = mt76_token_put(mdev, token); e->skb = t ? t->skb : NULL; } @@ -1183,52 +1168,13 @@ void mt7921_update_channel(struct mt76_dev *mdev) mt76_connac_power_save_sched(&dev->mphy, &dev->pm); } -static bool -mt7921_wait_reset_state(struct mt7921_dev *dev, u32 state) -{ - bool ret; - - ret = wait_event_timeout(dev->reset_wait, - (READ_ONCE(dev->reset_state) & state), - MT7921_RESET_TIMEOUT); - - WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); - return ret; -} - -static void -mt7921_dma_reset(struct mt7921_phy *phy) -{ - struct mt7921_dev *dev = phy->dev; - int i; - - mt76_clear(dev, MT_WFDMA0_GLO_CFG, - MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); - - usleep_range(1000, 2000); - - mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], true); - for (i = 0; i < __MT_TXQ_MAX; i++) - mt76_queue_tx_cleanup(dev, phy->mt76->q_tx[i], true); - - mt76_for_each_q_rx(&dev->mt76, i) { - mt76_queue_rx_reset(dev, i); - } - - /* re-init prefetch settings after reset */ - mt7921_dma_prefetch(dev); - - mt76_set(dev, MT_WFDMA0_GLO_CFG, - MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); -} - void mt7921_tx_token_put(struct mt7921_dev *dev) { struct mt76_txwi_cache *txwi; int id; - spin_lock_bh(&dev->token_lock); - idr_for_each_entry(&dev->token, txwi, id) { + spin_lock_bh(&dev->mt76.token_lock); + idr_for_each_entry(&dev->mt76.token, txwi, id) { mt7921_txp_skb_unmap(&dev->mt76, txwi); if (txwi->skb) { struct ieee80211_hw *hw; @@ -1237,77 +1183,127 @@ void mt7921_tx_token_put(struct mt7921_dev *dev) ieee80211_free_txskb(hw, txwi->skb); } mt76_put_txwi(&dev->mt76, txwi); - dev->token_count--; + dev->mt76.token_count--; } - spin_unlock_bh(&dev->token_lock); - idr_destroy(&dev->token); + spin_unlock_bh(&dev->mt76.token_lock); + idr_destroy(&dev->mt76.token); } -/* system error recovery */ -void mt7921_mac_reset_work(struct work_struct *work) +static void +mt7921_vif_connect_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif) { - struct mt7921_dev *dev; + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt7921_dev *dev = mvif->phy->dev; - dev = container_of(work, struct mt7921_dev, reset_work); + ieee80211_disconnect(vif, true); - if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA)) - return; + mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true); + mt7921_mcu_set_tx(dev, vif); +} + +static int +mt7921_mac_reset(struct mt7921_dev *dev) +{ + int i, err; - ieee80211_stop_queues(mt76_hw(dev)); + mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); + + mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); - set_bit(MT76_RESET, &dev->mphy.state); set_bit(MT76_MCU_RESET, &dev->mphy.state); wake_up(&dev->mt76.mcu.wait); - cancel_delayed_work_sync(&dev->mphy.mac_work); + skb_queue_purge(&dev->mt76.mcu.res_q); - /* lock/unlock all queues to ensure that no tx is pending */ mt76_txq_schedule_all(&dev->mphy); mt76_worker_disable(&dev->mt76.tx_worker); - napi_disable(&dev->mt76.napi[0]); - napi_disable(&dev->mt76.napi[1]); - napi_disable(&dev->mt76.napi[2]); + napi_disable(&dev->mt76.napi[MT_RXQ_MAIN]); + napi_disable(&dev->mt76.napi[MT_RXQ_MCU]); + napi_disable(&dev->mt76.napi[MT_RXQ_MCU_WA]); napi_disable(&dev->mt76.tx_napi); - mt7921_mutex_acquire(dev); - - mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); - mt7921_tx_token_put(dev); - idr_init(&dev->token); + idr_init(&dev->mt76.token); - if (mt7921_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { - mt7921_dma_reset(&dev->phy); + err = mt7921_wpdma_reset(dev, true); + if (err) + return err; - mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); - mt7921_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); + mt76_for_each_q_rx(&dev->mt76, i) { + napi_enable(&dev->mt76.napi[i]); + napi_schedule(&dev->mt76.napi[i]); } - clear_bit(MT76_MCU_RESET, &dev->mphy.state); - clear_bit(MT76_RESET, &dev->mphy.state); - - mt76_worker_enable(&dev->mt76.tx_worker); napi_enable(&dev->mt76.tx_napi); napi_schedule(&dev->mt76.tx_napi); + mt76_worker_enable(&dev->mt76.tx_worker); + + clear_bit(MT76_MCU_RESET, &dev->mphy.state); + clear_bit(MT76_STATE_PM, &dev->mphy.state); + + mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); + + err = mt7921_run_firmware(dev); + if (err) + return err; + + err = mt7921_mcu_set_eeprom(dev); + if (err) + return err; - napi_enable(&dev->mt76.napi[0]); - napi_schedule(&dev->mt76.napi[0]); + mt7921_mac_init(dev); + return __mt7921_start(&dev->phy); +} - napi_enable(&dev->mt76.napi[1]); - napi_schedule(&dev->mt76.napi[1]); +/* system error recovery */ +void mt7921_mac_reset_work(struct work_struct *work) +{ + struct ieee80211_hw *hw; + struct mt7921_dev *dev; + int i; - napi_enable(&dev->mt76.napi[2]); - napi_schedule(&dev->mt76.napi[2]); + dev = container_of(work, struct mt7921_dev, reset_work); + hw = mt76_hw(dev); - ieee80211_wake_queues(mt76_hw(dev)); + dev_err(dev->mt76.dev, "chip reset\n"); + ieee80211_stop_queues(hw); - mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); - mt7921_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); + cancel_delayed_work_sync(&dev->mphy.mac_work); + cancel_delayed_work_sync(&dev->pm.ps_work); + cancel_work_sync(&dev->pm.wake_work); - mt7921_mutex_release(dev); + mutex_lock(&dev->mt76.mutex); + for (i = 0; i < 10; i++) { + if (!mt7921_mac_reset(dev)) + break; + } + mutex_unlock(&dev->mt76.mutex); - ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, - MT7921_WATCHDOG_TIME); + if (i == 10) + dev_err(dev->mt76.dev, "chip reset failed\n"); + + if (test_and_clear_bit(MT76_HW_SCANNING, &dev->mphy.state)) { + struct cfg80211_scan_info info = { + .aborted = true, + }; + + ieee80211_scan_completed(dev->mphy.hw, &info); + } + + ieee80211_wake_queues(hw); + ieee80211_iterate_active_interfaces(hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7921_vif_connect_iter, NULL); +} + +void mt7921_reset(struct mt76_dev *mdev) +{ + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + + queue_work(dev->mt76.wq, &dev->reset_work); } static void @@ -1317,31 +1313,20 @@ mt7921_mac_update_mib_stats(struct mt7921_phy *phy) struct mib_stats *mib = &phy->mib; int i, aggr0 = 0, aggr1; - memset(mib, 0, sizeof(*mib)); - - mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(0), - MT_MIB_SDR3_FCS_ERR_MASK); + mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(0), + MT_MIB_SDR3_FCS_ERR_MASK); + mib->ack_fail_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR3(0), + MT_MIB_ACK_FAIL_COUNT_MASK); + mib->ba_miss_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR2(0), + MT_MIB_BA_FAIL_COUNT_MASK); + mib->rts_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR0(0), + MT_MIB_RTS_COUNT_MASK); + mib->rts_retries_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR1(0), + MT_MIB_RTS_FAIL_COUNT_MASK); for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) { u32 val, val2; - val = mt76_rr(dev, MT_MIB_MB_SDR1(0, i)); - - val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val); - if (val2 > mib->ack_fail_cnt) - mib->ack_fail_cnt = val2; - - val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); - if (val2 > mib->ba_miss_cnt) - mib->ba_miss_cnt = val2; - - val = mt76_rr(dev, MT_MIB_MB_SDR0(0, i)); - val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val); - if (val2 > mib->rts_retries_cnt) { - mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); - mib->rts_retries_cnt = val2; - } - val = mt76_rr(dev, MT_TX_AGG_CNT(0, i)); val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i)); @@ -1385,25 +1370,20 @@ void mt7921_mac_work(struct work_struct *work) mac_work.work); phy = mphy->priv; - if (test_bit(MT76_STATE_PM, &mphy->state)) - goto out; - mt7921_mutex_acquire(phy->dev); mt76_update_survey(mphy->dev); - if (++mphy->mac_work_count == 5) { + if (++mphy->mac_work_count == 2) { mphy->mac_work_count = 0; mt7921_mac_update_mib_stats(phy); } - if (++phy->sta_work_count == 10) { + if (++phy->sta_work_count == 4) { phy->sta_work_count = 0; mt7921_mac_sta_stats_work(phy); - }; + } mt7921_mutex_release(phy->dev); - -out: ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work, MT7921_WATCHDOG_TIME); } @@ -1417,13 +1397,19 @@ void mt7921_pm_wake_work(struct work_struct *work) pm.wake_work); mphy = dev->phy.mt76; - if (!mt7921_mcu_drv_pmctrl(dev)) + if (!mt7921_mcu_drv_pmctrl(dev)) { + int i; + + mt76_for_each_q_rx(&dev->mt76, i) + napi_schedule(&dev->mt76.napi[i]); mt76_connac_pm_dequeue_skbs(mphy, &dev->pm); - else - dev_err(mphy->dev->dev, "failed to wake device\n"); + mt7921_tx_cleanup(dev); + ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, + MT7921_WATCHDOG_TIME); + } ieee80211_wake_queues(mphy->hw); - complete_all(&dev->pm.wake_cmpl); + wake_up(&dev->pm.wait); } void mt7921_pm_power_save_work(struct work_struct *work) @@ -1435,6 +1421,10 @@ void mt7921_pm_power_save_work(struct work_struct *work) pm.ps_work.work); delta = dev->pm.idle_timeout; + if (test_bit(MT76_HW_SCANNING, &dev->mphy.state) || + test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state)) + goto out; + if (time_is_after_jiffies(dev->pm.last_activity + delta)) { delta = dev->pm.last_activity + delta - jiffies; goto out; @@ -1503,8 +1493,10 @@ void mt7921_coredump_work(struct work_struct *work) break; skb_pull(skb, sizeof(struct mt7921_mcu_rxd)); - if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) - break; + if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) { + dev_kfree_skb(skb); + continue; + } memcpy(data, skb->data, skb->len); data += skb->len; @@ -1513,4 +1505,5 @@ void mt7921_coredump_work(struct work_struct *work) } dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ, GFP_KERNEL); + mt7921_reset(&dev->mt76); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h index a0c1fa0f20e4..109c8849d106 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h @@ -97,18 +97,24 @@ enum rx_pkt_type { #define MT_RXD3_NORMAL_PF_MODE BIT(29) #define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30) -/* P-RXV */ +/* P-RXV DW0 */ #define MT_PRXV_TX_RATE GENMASK(6, 0) #define MT_PRXV_TX_DCM BIT(4) #define MT_PRXV_TX_ER_SU_106T BIT(5) #define MT_PRXV_NSTS GENMASK(9, 7) #define MT_PRXV_HT_AD_CODE BIT(11) +#define MT_PRXV_FRAME_MODE GENMASK(14, 12) +#define MT_PRXV_SGI GENMASK(16, 15) +#define MT_PRXV_STBC GENMASK(23, 22) +#define MT_PRXV_TX_MODE GENMASK(27, 24) #define MT_PRXV_HE_RU_ALLOC_L GENMASK(31, 28) -#define MT_PRXV_HE_RU_ALLOC_H GENMASK(3, 0) + +/* P-RXV DW1 */ #define MT_PRXV_RCPI3 GENMASK(31, 24) #define MT_PRXV_RCPI2 GENMASK(23, 16) #define MT_PRXV_RCPI1 GENMASK(15, 8) #define MT_PRXV_RCPI0 GENMASK(7, 0) +#define MT_PRXV_HE_RU_ALLOC_H GENMASK(3, 0) /* C-RXV */ #define MT_CRXV_HT_STBC GENMASK(1, 0) diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 729f6c42cdde..f4c27aa41048 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -65,9 +65,9 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band, IEEE80211_HE_MAC_CAP0_HTC_HE; he_cap_elem->mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | - IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED; + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3; he_cap_elem->mac_cap_info[4] = - IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU; + IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU; if (band == NL80211_BAND_2GHZ) he_cap_elem->phy_cap_info[0] = @@ -108,7 +108,7 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band, IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE | IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT; he_cap_elem->phy_cap_info[7] |= - IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR | + IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP | IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI; he_cap_elem->phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | @@ -125,10 +125,6 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band, he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map); he_mcs->tx_mcs_80 = cpu_to_le16(mcs_map); - he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map); - he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map); - he_mcs->rx_mcs_80p80 = cpu_to_le16(mcs_map); - he_mcs->tx_mcs_80p80 = cpu_to_le16(mcs_map); memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres)); if (he_cap_elem->phy_cap_info[6] & @@ -169,28 +165,48 @@ void mt7921_set_stream_he_caps(struct mt7921_phy *phy) } } -static int mt7921_start(struct ieee80211_hw *hw) +int __mt7921_start(struct mt7921_phy *phy) { - struct mt7921_dev *dev = mt7921_hw_dev(hw); - struct mt7921_phy *phy = mt7921_hw_phy(hw); + struct mt76_phy *mphy = phy->mt76; + int err; - mt7921_mutex_acquire(dev); + err = mt76_connac_mcu_set_mac_enable(mphy->dev, 0, true, false); + if (err) + return err; - mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, true, false); - mt76_connac_mcu_set_channel_domain(phy->mt76); + err = mt76_connac_mcu_set_channel_domain(mphy); + if (err) + return err; + + err = mt7921_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH); + if (err) + return err; + + err = mt76_connac_mcu_set_rate_txpower(phy->mt76); + if (err) + return err; - mt7921_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH); mt7921_mac_reset_counters(phy); - set_bit(MT76_STATE_RUNNING, &phy->mt76->state); + set_bit(MT76_STATE_RUNNING, &mphy->state); - ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, + ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, MT7921_WATCHDOG_TIME); - mt7921_mutex_release(dev); - return 0; } +static int mt7921_start(struct ieee80211_hw *hw) +{ + struct mt7921_phy *phy = mt7921_hw_phy(hw); + int err; + + mt7921_mutex_acquire(phy->dev); + err = __mt7921_start(phy); + mt7921_mutex_release(phy->dev); + + return err; +} + static void mt7921_stop(struct ieee80211_hw *hw) { struct mt7921_dev *dev = mt7921_hw_dev(hw); @@ -224,9 +240,6 @@ static int get_omac_idx(enum nl80211_iftype type, u64 mask) if (i) return i - 1; - if (type != NL80211_IFTYPE_STATION) - break; - /* next, try to find a free repeater entry for the sta */ i = get_free_idx(mask >> REPEATER_BSSID_START, 0, REPEATER_BSSID_MAX - REPEATER_BSSID_START); @@ -295,15 +308,6 @@ static int mt7921_add_interface(struct ieee80211_hw *hw, if (ret) goto out; - if (dev->pm.enable) { - ret = mt7921_mcu_set_bss_pm(dev, vif, true); - if (ret) - goto out; - - vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; - mt76_set(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON); - } - dev->mt76.vif_mask |= BIT(mvif->mt76.idx); phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx); @@ -318,6 +322,8 @@ static int mt7921_add_interface(struct ieee80211_hw *hw, mt7921_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + ewma_rssi_init(&mvif->rssi); + rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); if (vif->txq) { mtxq = (struct mt76_txq *)vif->txq->drv_priv; @@ -348,19 +354,12 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw, if (vif == phy->monitor_vif) phy->monitor_vif = NULL; + mt7921_mutex_acquire(dev); mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid); - - if (dev->pm.enable) { - mt7921_mcu_set_bss_pm(dev, vif, false); - mt76_clear(dev, MT_WF_RFCR(0), - MT_WF_RFCR_DROP_OTHER_BEACON); - } - mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, false); rcu_assign_pointer(dev->mt76.wcid[idx], NULL); - mt7921_mutex_acquire(dev); dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx); phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx); mt7921_mutex_release(dev); @@ -396,8 +395,7 @@ out: clear_bit(MT76_RESET, &phy->mt76->state); mt7921_mutex_release(dev); - mt76_txq_schedule_all(phy->mt76); - + mt76_worker_schedule(&dev->mt76.tx_worker); ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mt76->mac_work, MT7921_WATCHDOG_TIME); @@ -413,7 +411,8 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct mt7921_sta *msta = sta ? (struct mt7921_sta *)sta->drv_priv : &mvif->sta; struct mt76_wcid *wcid = &msta->wcid; - int idx = key->keyidx; + u8 *wcid_keyidx = &wcid->hw_key_idx; + int idx = key->keyidx, err = 0; /* The hardware does not support per-STA RX GTK, fallback * to software mode for these. @@ -429,6 +428,7 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, switch (key->cipher) { case WLAN_CIPHER_SUITE_AES_CMAC: key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE; + wcid_keyidx = &wcid->hw_key_idx2; break; case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: @@ -443,23 +443,29 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return -EOPNOTSUPP; } - if (cmd == SET_KEY) { - key->hw_key_idx = wcid->idx; - wcid->hw_key_idx = idx; - } else if (idx == wcid->hw_key_idx) { - wcid->hw_key_idx = -1; - } + mt7921_mutex_acquire(dev); + + if (cmd == SET_KEY) + *wcid_keyidx = idx; + else if (idx == *wcid_keyidx) + *wcid_keyidx = -1; + else + goto out; + mt76_wcid_key_setup(&dev->mt76, wcid, cmd == SET_KEY ? key : NULL); - return mt7921_mcu_add_key(dev, vif, msta, key, cmd); + err = mt7921_mcu_add_key(dev, vif, msta, key, cmd); +out: + mt7921_mutex_release(dev); + + return err; } static int mt7921_config(struct ieee80211_hw *hw, u32 changed) { struct mt7921_dev *dev = mt7921_hw_dev(hw); struct mt7921_phy *phy = mt7921_hw_phy(hw); - bool band = phy != &dev->phy; int ret; if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { @@ -480,9 +486,9 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed) else phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC; - mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN, + mt76_rmw_field(dev, MT_DMA_DCR0(0), MT_DMA_DCR0_RXD_G5_EN, enabled); - mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter); + mt76_wr(dev, MT_WF_RFCR(0), phy->rxfilter); } mt7921_mutex_release(dev); @@ -511,7 +517,6 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw, { struct mt7921_dev *dev = mt7921_hw_dev(hw); struct mt7921_phy *phy = mt7921_hw_phy(hw); - bool band = phy != &dev->phy; u32 ctl_flags = MT_WF_RFCR1_DROP_ACK | MT_WF_RFCR1_DROP_BF_POLL | MT_WF_RFCR1_DROP_BA | @@ -551,16 +556,46 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw, MT_WF_RFCR_DROP_NDPA); *total_flags = flags; - mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter); + mt76_wr(dev, MT_WF_RFCR(0), phy->rxfilter); if (*total_flags & FIF_CONTROL) - mt76_clear(dev, MT_WF_RFCR1(band), ctl_flags); + mt76_clear(dev, MT_WF_RFCR1(0), ctl_flags); else - mt76_set(dev, MT_WF_RFCR1(band), ctl_flags); + mt76_set(dev, MT_WF_RFCR1(0), ctl_flags); mt7921_mutex_release(dev); } +static int +mt7921_bss_bcnft_apply(struct mt7921_dev *dev, struct ieee80211_vif *vif, + bool assoc) +{ + int ret; + + if (!dev->pm.enable) + return 0; + + if (assoc) { + ret = mt7921_mcu_uni_bss_bcnft(dev, vif, true); + if (ret) + return ret; + + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; + mt76_set(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON); + + return 0; + } + + ret = mt7921_mcu_set_bss_pm(dev, vif, false); + if (ret) + return ret; + + vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; + mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON); + + return 0; +} + static void mt7921_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, @@ -587,6 +622,18 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_PS) mt7921_mcu_uni_bss_ps(dev, vif); + if (changed & BSS_CHANGED_ASSOC) { + mt7921_mcu_sta_add(dev, NULL, vif, true); + mt7921_bss_bcnft_apply(dev, vif, info->assoc); + } + + if (changed & BSS_CHANGED_ARP_FILTER) { + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + + mt76_connac_mcu_update_arp_filter(&dev->mt76, &mvif->mt76, + info); + } + mt7921_mutex_release(dev); } @@ -622,8 +669,7 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, mt7921_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - ret = mt76_connac_mcu_add_sta_cmd(&dev->mphy, vif, sta, &msta->wcid, - true, MCU_UNI_CMD_STA_REC_UPDATE); + ret = mt7921_mcu_sta_add(dev, sta, vif, true); if (ret) return ret; @@ -641,17 +687,17 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid); mt76_connac_pm_wake(&dev->mphy, &dev->pm); - mt76_connac_mcu_add_sta_cmd(&dev->mphy, vif, sta, &msta->wcid, false, - MCU_UNI_CMD_STA_REC_UPDATE); - + mt7921_mcu_sta_add(dev, sta, vif, false); mt7921_mac_wtbl_update(dev, msta->wcid.idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { + if (vif->type == NL80211_IFTYPE_STATION) { struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid, - false); + ewma_rssi_init(&mvif->rssi); + if (!sta->tdls) + mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, + &mvif->sta.wcid, false); } spin_lock_bh(&dev->sta_poll_lock); @@ -664,23 +710,18 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, mt76_connac_power_save_sched(&dev->mphy, &dev->pm); } -static void -mt7921_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) +void mt7921_tx_worker(struct mt76_worker *w) { - struct mt7921_dev *dev = mt7921_hw_dev(hw); - struct mt7921_phy *phy = mt7921_hw_phy(hw); - struct mt76_phy *mphy = phy->mt76; - - if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) - return; + struct mt7921_dev *dev = container_of(w, struct mt7921_dev, + mt76.tx_worker); - if (test_bit(MT76_STATE_PM, &mphy->state)) { + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { queue_work(dev->mt76.wq, &dev->pm.wake_work); return; } - dev->pm.last_activity = jiffies; - mt76_worker_schedule(&dev->mt76.tx_worker); + mt76_txq_schedule_all(&dev->mphy); + mt76_connac_pm_unref(&dev->pm); } static void mt7921_tx(struct ieee80211_hw *hw, @@ -708,9 +749,9 @@ static void mt7921_tx(struct ieee80211_hw *hw, wcid = &mvif->sta.wcid; } - if (!test_bit(MT76_STATE_PM, &mphy->state)) { - dev->pm.last_activity = jiffies; + if (mt76_connac_pm_ref(mphy, &dev->pm)) { mt76_tx(mphy, control->sta, wcid, skb); + mt76_connac_pm_unref(&dev->pm); return; } @@ -814,11 +855,17 @@ mt7921_get_stats(struct ieee80211_hw *hw, struct mt7921_phy *phy = mt7921_hw_phy(hw); struct mib_stats *mib = &phy->mib; + mt7921_mutex_acquire(phy->dev); + stats->dot11RTSSuccessCount = mib->rts_cnt; stats->dot11RTSFailureCount = mib->rts_retries_cnt; stats->dot11FCSErrorCount = mib->fcs_err_cnt; stats->dot11ACKFailureCount = mib->ack_fail_cnt; + memset(mib, 0, sizeof(*mib)); + + mt7921_mutex_release(phy->dev); + return 0; } @@ -827,9 +874,7 @@ mt7921_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; struct mt7921_dev *dev = mt7921_hw_dev(hw); - struct mt7921_phy *phy = mt7921_hw_phy(hw); u8 omac_idx = mvif->mt76.omac_idx; - bool band = phy != &dev->phy; union { u64 t64; u32 t32[2]; @@ -840,9 +885,9 @@ mt7921_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) n = omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : omac_idx; /* TSF software read */ - mt76_set(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE); - tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(band)); - tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(band)); + mt76_set(dev, MT_LPON_TCR(0, n), MT_LPON_TCR_SW_MODE); + tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(0)); + tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(0)); mt7921_mutex_release(dev); @@ -855,9 +900,7 @@ mt7921_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, { struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; struct mt7921_dev *dev = mt7921_hw_dev(hw); - struct mt7921_phy *phy = mt7921_hw_phy(hw); u8 omac_idx = mvif->mt76.omac_idx; - bool band = phy != &dev->phy; union { u64 t64; u32 t32[2]; @@ -867,10 +910,10 @@ mt7921_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mt7921_mutex_acquire(dev); n = omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : omac_idx; - mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]); - mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]); + mt76_wr(dev, MT_LPON_UTTR0(0), tsf.t32[0]); + mt76_wr(dev, MT_LPON_UTTR1(0), tsf.t32[1]); /* TSF software overwrite */ - mt76_set(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_WRITE); + mt76_set(dev, MT_LPON_TCR(0, n), MT_LPON_TCR_SW_WRITE); mt7921_mutex_release(dev); } @@ -1008,14 +1051,6 @@ mt7921_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) return 0; } -static void -mt7921_sta_rc_update(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - u32 changed) -{ -} - static void mt7921_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -1120,6 +1155,15 @@ static void mt7921_set_rekey_data(struct ieee80211_hw *hw, } #endif /* CONFIG_PM */ +static void mt7921_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) +{ + struct mt7921_dev *dev = mt7921_hw_dev(hw); + + wait_event_timeout(dev->mt76.tx_wait, !mt76_has_tx_pending(&dev->mphy), + HZ / 2); +} + const struct ieee80211_ops mt7921_ops = { .tx = mt7921_tx, .start = mt7921_start, @@ -1133,11 +1177,10 @@ const struct ieee80211_ops mt7921_ops = { .sta_add = mt7921_sta_add, .sta_remove = mt7921_sta_remove, .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove, - .sta_rc_update = mt7921_sta_rc_update, .set_key = mt7921_set_key, .ampdu_action = mt7921_ampdu_action, .set_rts_threshold = mt7921_set_rts_threshold, - .wake_tx_queue = mt7921_wake_tx_queue, + .wake_tx_queue = mt76_wake_tx_queue, .release_buffered_frames = mt76_release_buffered_frames, .get_txpower = mt76_get_txpower, .get_stats = mt7921_get_stats, @@ -1158,4 +1201,5 @@ const struct ieee80211_ops mt7921_ops = { .set_wakeup = mt7921_set_wakeup, .set_rekey_data = mt7921_set_rekey_data, #endif /* CONFIG_PM */ + .flush = mt7921_flush, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c index b5cc72e7e81c..5f3d56d570a5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c @@ -4,6 +4,7 @@ #include <linux/firmware.h> #include <linux/fs.h> #include "mt7921.h" +#include "mt7921_trace.h" #include "mcu.h" #include "mac.h" @@ -159,8 +160,10 @@ mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd, int ret = 0; if (!skb) { - dev_err(mdev->dev, "Message %d (seq %d) timeout\n", + dev_err(mdev->dev, "Message %08x (seq %d) timeout\n", cmd, seq); + mt7921_reset(mdev); + return -ETIMEDOUT; } @@ -222,8 +225,16 @@ mt7921_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, u32 val; u8 seq; - /* TODO: make dynamic based on msg type */ - mdev->mcu.timeout = 20 * HZ; + switch (cmd) { + case MCU_UNI_CMD_HIF_CTRL: + case MCU_UNI_CMD_SUSPEND: + case MCU_UNI_CMD_OFFLOAD: + mdev->mcu.timeout = HZ / 3; + break; + default: + mdev->mcu.timeout = 3 * HZ; + break; + } seq = ++dev->mt76.mcu.msg_seq & 0xf; if (!seq) @@ -404,9 +415,12 @@ mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb, if (wlan_idx >= MT76_N_WCIDS) return; + + rcu_read_lock(); + wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]); if (!wcid) - return; + goto out; msta = container_of(wcid, struct mt7921_sta, wcid); stats = &msta->stats; @@ -414,6 +428,8 @@ mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb, /* current rate */ mt7921_mcu_tx_rate_parse(mphy, &peer, &rate, curr); stats->tx_rate = rate; +out: + rcu_read_unlock(); } static void @@ -466,33 +482,45 @@ mt7921_mcu_bss_event(struct mt7921_dev *dev, struct sk_buff *skb) static void mt7921_mcu_debug_msg_event(struct mt7921_dev *dev, struct sk_buff *skb) { - struct mt7921_mcu_rxd *rxd = (struct mt7921_mcu_rxd *)skb->data; - struct debug_msg { + struct mt7921_debug_msg { __le16 id; u8 type; u8 flag; __le32 value; __le16 len; u8 content[512]; - } __packed * debug_msg; - u16 cur_len; - int i; - - skb_pull(skb, sizeof(*rxd)); - debug_msg = (struct debug_msg *)skb->data; + } __packed * msg; - cur_len = min_t(u16, le16_to_cpu(debug_msg->len), 512); + skb_pull(skb, sizeof(struct mt7921_mcu_rxd)); + msg = (struct mt7921_debug_msg *)skb->data; - if (debug_msg->type == 0x3) { - for (i = 0 ; i < cur_len; i++) - if (!debug_msg->content[i]) - debug_msg->content[i] = ' '; + if (msg->type == 3) { /* fw log */ + u16 len = min_t(u16, le16_to_cpu(msg->len), 512); + int i; - dev_dbg(dev->mt76.dev, "%s", debug_msg->content); + for (i = 0 ; i < len; i++) { + if (!msg->content[i]) + msg->content[i] = ' '; + } + wiphy_info(mt76_hw(dev)->wiphy, "%.*s", len, msg->content); } } static void +mt7921_mcu_low_power_event(struct mt7921_dev *dev, struct sk_buff *skb) +{ + struct mt7921_mcu_lp_event { + u8 state; + u8 reserved[3]; + } __packed * event; + + skb_pull(skb, sizeof(struct mt7921_mcu_rxd)); + event = (struct mt7921_mcu_lp_event *)skb->data; + + trace_lp_event(dev, event->state); +} + +static void mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb) { struct mt7921_mcu_rxd *rxd = (struct mt7921_mcu_rxd *)skb->data; @@ -515,6 +543,9 @@ mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb) mt76_connac_mcu_coredump_event(&dev->mt76, skb, &dev->coredump); return; + case MCU_EVENT_LP_INFO: + mt7921_mcu_low_power_event(dev, skb); + break; default: break; } @@ -537,6 +568,7 @@ void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb) rxd->eid == MCU_EVENT_SCAN_DONE || rxd->eid == MCU_EVENT_DBG_MSG || rxd->eid == MCU_EVENT_COREDUMP || + rxd->eid == MCU_EVENT_LP_INFO || !rxd->seq) mt7921_mcu_rx_unsolicited_event(dev, skb); else @@ -919,6 +951,24 @@ int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl) sizeof(data), false); } +int mt7921_run_firmware(struct mt7921_dev *dev) +{ + int err; + + err = mt7921_driver_own(dev); + if (err) + return err; + + err = mt7921_load_firmware(dev); + if (err) + return err; + + set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); + mt7921_mcu_fw_log_2_host(dev, 1); + + return 0; +} + int mt7921_mcu_init(struct mt7921_dev *dev) { static const struct mt76_mcu_ops mt7921_mcu_ops = { @@ -927,38 +977,15 @@ int mt7921_mcu_init(struct mt7921_dev *dev) .mcu_parse_response = mt7921_mcu_parse_response, .mcu_restart = mt7921_mcu_restart, }; - int ret; dev->mt76.mcu_ops = &mt7921_mcu_ops; - ret = mt7921_driver_own(dev); - if (ret) - return ret; - - ret = mt7921_load_firmware(dev); - if (ret) - return ret; - - set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); - mt7921_mcu_fw_log_2_host(dev, 1); - - return 0; + return mt7921_run_firmware(dev); } void mt7921_mcu_exit(struct mt7921_dev *dev) { - u32 reg = mt7921_reg_map_l1(dev, MT_TOP_MISC); - - __mt76_mcu_restart(&dev->mt76); - if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE, - FIELD_PREP(MT_TOP_MISC_FW_STATE, - FW_STATE_FW_DOWNLOAD), 1000)) { - dev_err(dev->mt76.dev, "Failed to exit mcu\n"); - return; - } - - reg = mt7921_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0); - mt76_wr(dev, reg, MT_TOP_LPCR_HOST_FW_OWN); + mt7921_wfsys_reset(dev); skb_queue_purge(&dev->mt76.mcu.res_q); } @@ -1238,12 +1265,35 @@ int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif, sizeof(req), false); } +int mt7921_mcu_sta_add(struct mt7921_dev *dev, struct ieee80211_sta *sta, + struct ieee80211_vif *vif, bool enable) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + int rssi = -ewma_rssi_read(&mvif->rssi); + struct mt76_sta_cmd_info info = { + .sta = sta, + .vif = vif, + .enable = enable, + .cmd = MCU_UNI_CMD_STA_REC_UPDATE, + .rcpi = to_rcpi(rssi), + }; + struct mt7921_sta *msta; + + msta = sta ? (struct mt7921_sta *)sta->drv_priv : NULL; + info.wcid = msta ? &msta->wcid : &mvif->sta.wcid; + + return mt76_connac_mcu_add_sta_cmd(&dev->mphy, &info); +} + int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev) { struct mt76_phy *mphy = &dev->mt76.phy; - int i; + struct mt76_connac_pm *pm = &dev->pm; + int i, err = 0; + + mutex_lock(&pm->mutex); - if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state)) + if (!test_bit(MT76_STATE_PM, &mphy->state)) goto out; for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) { @@ -1255,22 +1305,35 @@ int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev) if (i == MT7921_DRV_OWN_RETRY_COUNT) { dev_err(dev->mt76.dev, "driver own failed\n"); - return -EIO; + err = -EIO; + goto out; } + mt7921_wpdma_reinit_cond(dev); + clear_bit(MT76_STATE_PM, &mphy->state); + + pm->stats.last_wake_event = jiffies; + pm->stats.doze_time += pm->stats.last_wake_event - + pm->stats.last_doze_event; out: - dev->pm.last_activity = jiffies; + mutex_unlock(&pm->mutex); - return 0; + if (err) + mt7921_reset(&dev->mt76); + + return err; } int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev) { struct mt76_phy *mphy = &dev->mt76.phy; - int i; + struct mt76_connac_pm *pm = &dev->pm; + int i, err = 0; - if (test_and_set_bit(MT76_STATE_PM, &mphy->state)) - return 0; + mutex_lock(&pm->mutex); + + if (mt76_connac_skip_fw_pmctrl(mphy, pm)) + goto out; for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) { mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_SET_OWN); @@ -1281,10 +1344,20 @@ int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev) if (i == MT7921_DRV_OWN_RETRY_COUNT) { dev_err(dev->mt76.dev, "firmware own failed\n"); - return -EIO; + clear_bit(MT76_STATE_PM, &mphy->state); + err = -EIO; } - return 0; + pm->stats.last_doze_event = jiffies; + pm->stats.awake_time += pm->stats.last_doze_event - + pm->stats.last_wake_event; +out: + mutex_unlock(&pm->mutex); + + if (err) + mt7921_reset(&dev->mt76); + + return err; } void @@ -1292,8 +1365,14 @@ mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) { struct mt7921_phy *phy = priv; struct mt7921_dev *dev = phy->dev; + int ret; - if (mt7921_mcu_set_bss_pm(dev, vif, dev->pm.enable)) + if (dev->pm.enable) + ret = mt7921_mcu_uni_bss_bcnft(dev, vif, true); + else + ret = mt7921_mcu_set_bss_pm(dev, vif, false); + + if (ret) return; if (dev->pm.enable) { @@ -1304,3 +1383,26 @@ mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON); } } + +int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr) +{ + struct mt7921_txpwr_event *event; + struct mt7921_txpwr_req req = { + .dbdc_idx = 0, + }; + struct sk_buff *skb; + int ret; + + ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CMD_GET_TXPWR, + &req, sizeof(req), true, &skb); + if (ret) + return ret; + + event = (struct mt7921_txpwr_event *)skb->data; + WARN_ON(skb->len != le16_to_cpu(event->len)); + memcpy(txpwr, &event->txpwr, sizeof(event->txpwr)); + + dev_kfree_skb(skb); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h index 2fdc62367b3f..49823d0a3d0a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h @@ -79,12 +79,14 @@ struct mt7921_uni_txd { /* event table */ enum { MCU_EVENT_REG_ACCESS = 0x05, + MCU_EVENT_LP_INFO = 0x07, MCU_EVENT_SCAN_DONE = 0x0d, MCU_EVENT_BSS_ABSENCE = 0x11, MCU_EVENT_BSS_BEACON_LOSS = 0x13, MCU_EVENT_CH_PRIVILEGE = 0x18, MCU_EVENT_SCHED_SCAN_DONE = 0x23, MCU_EVENT_DBG_MSG = 0x27, + MCU_EVENT_TXPWR = 0xd0, MCU_EVENT_COREDUMP = 0xf0, }; @@ -177,25 +179,6 @@ enum { MCU_PHY_STATE_OFDMLQ_CNINFO, }; -#define STA_TYPE_STA BIT(0) -#define STA_TYPE_AP BIT(1) -#define STA_TYPE_ADHOC BIT(2) -#define STA_TYPE_WDS BIT(4) -#define STA_TYPE_BC BIT(5) - -#define NETWORK_INFRA BIT(16) -#define NETWORK_P2P BIT(17) -#define NETWORK_IBSS BIT(18) -#define NETWORK_WDS BIT(21) - -#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA) -#define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA) -#define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P) -#define CONNECTION_P2P_GO (STA_TYPE_AP | NETWORK_P2P) -#define CONNECTION_IBSS_ADHOC (STA_TYPE_ADHOC | NETWORK_IBSS) -#define CONNECTION_WDS (STA_TYPE_WDS | NETWORK_WDS) -#define CONNECTION_INFRA_BC (STA_TYPE_BC | NETWORK_INFRA) - struct sec_key { u8 cipher_id; u8 cipher_len; @@ -251,29 +234,6 @@ enum { MT_IBF = BIT(1) /* implicit beamforming */ }; -#define MT7921_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \ - sizeof(struct wtbl_generic) + \ - sizeof(struct wtbl_rx) + \ - sizeof(struct wtbl_ht) + \ - sizeof(struct wtbl_vht) + \ - sizeof(struct wtbl_hdr_trans) +\ - sizeof(struct wtbl_ba) + \ - sizeof(struct wtbl_smps)) - -#define MT7921_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \ - sizeof(struct sta_rec_basic) + \ - sizeof(struct sta_rec_ht) + \ - sizeof(struct sta_rec_he) + \ - sizeof(struct sta_rec_ba) + \ - sizeof(struct sta_rec_vht) + \ - sizeof(struct sta_rec_uapsd) + \ - sizeof(struct sta_rec_amsdu) + \ - sizeof(struct tlv) + \ - MT7921_WTBL_UPDATE_MAX_SIZE) - -#define MT7921_WTBL_UPDATE_BA_SIZE (sizeof(struct wtbl_req_hdr) + \ - sizeof(struct wtbl_ba)) - #define STA_CAP_WMM BIT(0) #define STA_CAP_SGI_20 BIT(4) #define STA_CAP_SGI_40 BIT(5) @@ -431,4 +391,20 @@ struct mt7921_mcu_wlan_info { __le32 wlan_idx; struct mt7921_mcu_wlan_info_event event; } __packed; + +struct mt7921_txpwr_req { + u8 ver; + u8 action; + __le16 len; + u8 dbdc_idx; + u8 rsv[3]; +} __packed; + +struct mt7921_txpwr_event { + u8 ver; + u8 action; + __le16 len; + struct mt7921_txpwr txpwr; +} __packed; + #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h index 46e6aeec35ae..59862ea4951c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h @@ -18,7 +18,7 @@ #define MT7921_PM_TIMEOUT (HZ / 12) #define MT7921_HW_SCAN_TIMEOUT (HZ / 10) -#define MT7921_WATCHDOG_TIME (HZ / 10) +#define MT7921_WATCHDOG_TIME (HZ / 4) #define MT7921_RESET_TIMEOUT (30 * HZ) #define MT7921_TX_RING_SIZE 2048 @@ -35,7 +35,6 @@ #define MT7921_EEPROM_SIZE 3584 #define MT7921_TOKEN_SIZE 8192 -#define MT7921_TOKEN_FREE_THR 64 #define MT7921_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */ #define MT7921_CFEND_RATE_11B 0x03 /* 11B LP, 11M */ @@ -46,6 +45,9 @@ #define MT7921_SKU_MAX_DELTA_IDX MT7921_SKU_RATE_NUM #define MT7921_SKU_TABLE_SIZE (MT7921_SKU_RATE_NUM + 1) +#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2) +#define to_rcpi(rssi) (2 * (rssi) + 220) + struct mt7921_vif; struct mt7921_sta; @@ -92,21 +94,25 @@ struct mt7921_sta { struct mt7921_sta_key_conf bip; }; +DECLARE_EWMA(rssi, 10, 8); + struct mt7921_vif { struct mt76_vif mt76; /* must be first */ struct mt7921_sta sta; struct mt7921_phy *phy; + struct ewma_rssi rssi; + struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; }; struct mib_stats { - u16 ack_fail_cnt; - u16 fcs_err_cnt; - u16 rts_cnt; - u16 rts_retries_cnt; - u16 ba_miss_cnt; + u32 ack_fail_cnt; + u32 fcs_err_cnt; + u32 rts_cnt; + u32 rts_retries_cnt; + u32 ba_miss_cnt; }; struct mt7921_phy { @@ -125,7 +131,7 @@ struct mt7921_phy { s16 coverage_class; u8 slottime; - __le32 rx_ampdu_ts; + u32 rx_ampdu_ts; u32 ampdu_ref; struct mib_stats mib; @@ -149,18 +155,11 @@ struct mt7921_dev { u16 chainmask; - struct work_struct init_work; struct work_struct reset_work; - wait_queue_head_t reset_wait; - u32 reset_state; struct list_head sta_poll_list; spinlock_t sta_poll_lock; - spinlock_t token_lock; - int token_count; - struct idr token; - u8 fw_debug; struct mt76_connac_pm pm; @@ -168,6 +167,36 @@ struct mt7921_dev { }; enum { + TXPWR_USER, + TXPWR_EEPROM, + TXPWR_MAC, + TXPWR_MAX_NUM, +}; + +struct mt7921_txpwr { + u8 ch; + u8 rsv[3]; + struct { + u8 ch; + u8 cck[4]; + u8 ofdm[8]; + u8 ht20[8]; + u8 ht40[9]; + u8 vht20[12]; + u8 vht40[12]; + u8 vht80[12]; + u8 vht160[12]; + u8 he26[12]; + u8 he52[12]; + u8 he106[12]; + u8 he242[12]; + u8 he484[12]; + u8 he996[12]; + u8 he996x2[12]; + } data[TXPWR_MAX_NUM]; +}; + +enum { MT_LMAC_AC00, MT_LMAC_AC01, MT_LMAC_AC02, @@ -209,6 +238,7 @@ extern struct pci_driver mt7921_pci_driver; u32 mt7921_reg_map(struct mt7921_dev *dev, u32 addr); +int __mt7921_start(struct mt7921_phy *phy); int mt7921_register_device(struct mt7921_dev *dev); void mt7921_unregister_device(struct mt7921_dev *dev); int mt7921_eeprom_init(struct mt7921_dev *dev); @@ -218,15 +248,17 @@ int mt7921_eeprom_get_target_power(struct mt7921_dev *dev, u8 chain_idx); void mt7921_eeprom_init_sku(struct mt7921_dev *dev); int mt7921_dma_init(struct mt7921_dev *dev); -void mt7921_dma_prefetch(struct mt7921_dev *dev); +int mt7921_wpdma_reset(struct mt7921_dev *dev, bool force); +int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev); void mt7921_dma_cleanup(struct mt7921_dev *dev); +int mt7921_run_firmware(struct mt7921_dev *dev); int mt7921_mcu_init(struct mt7921_dev *dev); -int mt7921_mcu_add_bss_info(struct mt7921_phy *phy, - struct ieee80211_vif *vif, int enable); int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif, struct mt7921_sta *msta, struct ieee80211_key_conf *key, enum set_key_cmd cmd); int mt7921_set_channel(struct mt7921_phy *phy); +int mt7921_mcu_sta_add(struct mt7921_dev *dev, struct ieee80211_sta *sta, + struct ieee80211_vif *vif, bool enable); int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd); int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif); int mt7921_mcu_set_eeprom(struct mt7921_dev *dev); @@ -281,6 +313,12 @@ mt7921_l1_rmw(struct mt7921_dev *dev, u32 addr, u32 mask, u32 val) #define mt7921_l1_set(dev, addr, val) mt7921_l1_rmw(dev, addr, 0, val) #define mt7921_l1_clear(dev, addr, val) mt7921_l1_rmw(dev, addr, val, 0) +static inline bool mt7921_dma_need_reinit(struct mt7921_dev *dev) +{ + return !mt76_get_field(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT); +} + +void mt7921_mac_init(struct mt7921_dev *dev); bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask); void mt7921_mac_reset_counters(struct mt7921_phy *phy); void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi, @@ -296,10 +334,14 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void mt7921_mac_work(struct work_struct *work); void mt7921_mac_reset_work(struct work_struct *work); +void mt7921_reset(struct mt76_dev *mdev); +void mt7921_tx_cleanup(struct mt7921_dev *dev); int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, enum mt76_txq_id qid, struct mt76_wcid *wcid, struct ieee80211_sta *sta, struct mt76_tx_info *tx_info); + +void mt7921_tx_worker(struct mt76_worker *w); void mt7921_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e); int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc); void mt7921_tx_token_put(struct mt7921_dev *dev); @@ -326,9 +368,6 @@ int mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif, bool enable); int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif, bool enable); -int mt7921_mcu_update_arp_filter(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *info); int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev); int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev); void mt7921_pm_wake_work(struct work_struct *work); @@ -339,4 +378,6 @@ int mt7921_mac_set_beacon_filter(struct mt7921_phy *phy, bool enable); void mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif); void mt7921_coredump_work(struct work_struct *work); +int mt7921_wfsys_reset(struct mt7921_dev *dev); +int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921_trace.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921_trace.h new file mode 100644 index 000000000000..9bc4db67f352 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921_trace.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (C) 2021 Lorenzo Bianconi <lorenzo@kernel.org> + */ + +#if !defined(__MT7921_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define __MT7921_TRACE_H + +#include <linux/tracepoint.h> +#include "mt7921.h" + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mt7921 + +#define MAXNAME 32 +#define DEV_ENTRY __array(char, wiphy_name, 32) +#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \ + wiphy_name(mt76_hw(dev)->wiphy), MAXNAME) +#define DEV_PR_FMT "%s" +#define DEV_PR_ARG __entry->wiphy_name +#define LP_STATE_PR_ARG __entry->lp_state ? "lp ready" : "lp not ready" + +TRACE_EVENT(lp_event, + TP_PROTO(struct mt7921_dev *dev, u8 lp_state), + + TP_ARGS(dev, lp_state), + + TP_STRUCT__entry( + DEV_ENTRY + __field(u8, lp_state) + ), + + TP_fast_assign( + DEV_ASSIGN; + __entry->lp_state = lp_state; + ), + + TP_printk( + DEV_PR_FMT " %s", + DEV_PR_ARG, LP_STATE_PR_ARG + ) +); + +#endif + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE mt7921_trace + +#include <trace/define_trace.h> diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c index 5570b4a50531..fa02d934f0bf 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c @@ -13,7 +13,7 @@ #include "../trace.h" static const struct pci_device_id mt7921_pci_device_table[] = { - { PCI_DEVICE(0x14c3, 0x7961) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7961) }, { }, }; @@ -61,6 +61,18 @@ static void mt7921_irq_tasklet(unsigned long data) if (intr & MT_INT_TX_DONE_MCU) mask |= MT_INT_TX_DONE_MCU; + if (intr & MT_INT_MCU_CMD) { + u32 intr_sw; + + intr_sw = mt76_rr(dev, MT_MCU_CMD); + /* ack MCU2HOST_SW_INT_STA */ + mt76_wr(dev, MT_MCU_CMD, intr_sw); + if (intr_sw & MT_MCU_CMD_WAKE_RX_PCIE) { + mask |= MT_INT_RX_DONE_DATA; + intr |= MT_INT_RX_DONE_DATA; + } + } + mt76_set_irq_mask(&dev->mt76, MT_WFDMA0_HOST_INT_ENA, mask, 0); if (intr & MT_INT_TX_DONE_ALL) @@ -87,6 +99,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev, .survey_flags = SURVEY_INFO_TIME_TX | SURVEY_INFO_TIME_RX | SURVEY_INFO_TIME_BSS_RX, + .token_size = MT7921_TOKEN_SIZE, .tx_prepare_skb = mt7921_tx_prepare_skb, .tx_complete_skb = mt7921_tx_complete_skb, .rx_skb = mt7921_queue_rx_skb, @@ -137,7 +150,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev, mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); - mt7921_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); ret = devm_request_irq(mdev->dev, pdev->irq, mt7921_irq_handler, IRQF_SHARED, KBUILD_MODNAME, dev); @@ -146,10 +159,12 @@ static int mt7921_pci_probe(struct pci_dev *pdev, ret = mt7921_register_device(dev); if (ret) - goto err_free_dev; + goto err_free_irq; return 0; +err_free_irq: + devm_free_irq(&pdev->dev, pdev->irq, dev); err_free_dev: mt76_free_device(&dev->mt76); err_free_pci_vec: @@ -187,13 +202,15 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state) return err; } + if (!dev->pm.enable) + mt76_connac_mcu_set_deep_sleep(&dev->mt76, true); + napi_disable(&mdev->tx_napi); mt76_worker_disable(&mdev->tx_worker); mt76_for_each_q_rx(mdev, i) { napi_disable(&mdev->napi[i]); } - tasklet_kill(&dev->irq_tasklet); pci_enable_wake(pdev, pci_choose_state(pdev, state), true); @@ -208,13 +225,16 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state) /* disable interrupt */ mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); + synchronize_irq(pdev->irq); + tasklet_kill(&dev->irq_tasklet); - pci_save_state(pdev); - err = pci_set_power_state(pdev, pci_choose_state(pdev, state)); + err = mt7921_mcu_fw_pmctrl(dev); if (err) goto restore; - err = mt7921_mcu_drv_pmctrl(dev); + pci_save_state(pdev); + err = pci_set_power_state(pdev, pci_choose_state(pdev, state)); if (err) goto restore; @@ -225,6 +245,10 @@ restore: napi_enable(&mdev->napi[i]); } napi_enable(&mdev->tx_napi); + + if (!dev->pm.enable) + mt76_connac_mcu_set_deep_sleep(&dev->mt76, false); + if (hif_suspend) mt76_connac_mcu_set_hif_suspend(mdev, false); @@ -237,20 +261,23 @@ static int mt7921_pci_resume(struct pci_dev *pdev) struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); int i, err; - err = mt7921_mcu_fw_pmctrl(dev); - if (err < 0) - return err; - err = pci_set_power_state(pdev, PCI_D0); if (err) return err; pci_restore_state(pdev); + err = mt7921_mcu_drv_pmctrl(dev); + if (err < 0) + return err; + + mt7921_wpdma_reinit_cond(dev); + /* enable interrupt */ - mt7921_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); mt7921_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | MT_INT_MCU_CMD); + mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE); /* put dma enabled */ mt76_set(dev, MT_WFDMA0_GLO_CFG, @@ -264,6 +291,9 @@ static int mt7921_pci_resume(struct pci_dev *pdev) napi_enable(&mdev->tx_napi); napi_schedule(&mdev->tx_napi); + if (!dev->pm.enable) + mt76_connac_mcu_set_deep_sleep(&dev->mt76, false); + if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state)) err = mt76_connac_mcu_set_hif_suspend(mdev, false); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h index 6dad7f6ab09d..b6944c867a57 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h @@ -96,8 +96,8 @@ #define MT_WF_MIB_BASE(_band) ((_band) ? 0xa4800 : 0x24800) #define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE(_band) + (ofs)) -#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x014) -#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0) +#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x698) +#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(31, 16) #define MT_MIB_SDR9(_band) MT_WF_MIB(_band, 0x02c) #define MT_MIB_SDR9_BUSY_MASK GENMASK(23, 0) @@ -121,16 +121,21 @@ #define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16) #define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0) -#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(_band, 0x104 + ((n) << 4)) -#define MT_MIB_BA_MISS_COUNT_MASK GENMASK(15, 0) -#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(31, 16) +#define MT_MIB_MB_BSDR0(_band) MT_WF_MIB(_band, 0x688) +#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0) +#define MT_MIB_MB_BSDR1(_band) MT_WF_MIB(_band, 0x690) +#define MT_MIB_RTS_FAIL_COUNT_MASK GENMASK(15, 0) +#define MT_MIB_MB_BSDR2(_band) MT_WF_MIB(_band, 0x518) +#define MT_MIB_BA_FAIL_COUNT_MASK GENMASK(15, 0) +#define MT_MIB_MB_BSDR3(_band) MT_WF_MIB(_band, 0x520) +#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(15, 0) #define MT_MIB_MB_SDR2(_band, n) MT_WF_MIB(_band, 0x108 + ((n) << 4)) #define MT_MIB_FRAME_RETRIES_COUNT_MASK GENMASK(15, 0) -#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0x0a8 + ((n) << 2)) -#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, 0x164 + ((n) << 2)) -#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x4b8 + ((n) << 2)) +#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0x7dc + ((n) << 2)) +#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, 0x7ec + ((n) << 2)) +#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x0b0 + ((n) << 2)) #define MT_MIB_ARNCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(7, 0)) #define MT_WTBLON_TOP_BASE 0x34000 @@ -246,13 +251,16 @@ #define MT_WFDMA0_BUSY_ENA_TX_FIFO1 BIT(1) #define MT_WFDMA0_BUSY_ENA_RX_FIFO BIT(2) -#define MT_MCU_CMD MT_WFDMA0(0x1f0) -#define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1) -#define MT_MCU_CMD_STOP_DMA BIT(2) -#define MT_MCU_CMD_RESET_DONE BIT(3) -#define MT_MCU_CMD_RECOVERY_DONE BIT(4) -#define MT_MCU_CMD_NORMAL_STATE BIT(5) -#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1) +#define MT_MCU_CMD MT_WFDMA0(0x1f0) +#define MT_MCU_CMD_WAKE_RX_PCIE BIT(0) +#define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1) +#define MT_MCU_CMD_STOP_DMA BIT(2) +#define MT_MCU_CMD_RESET_DONE BIT(3) +#define MT_MCU_CMD_RECOVERY_DONE BIT(4) +#define MT_MCU_CMD_NORMAL_STATE BIT(5) +#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1) + +#define MT_MCU2HOST_SW_INT_ENA MT_WFDMA0(0x1f4) #define MT_WFDMA0_HOST_INT_STA MT_WFDMA0(0x200) #define HOST_RX_DONE_INT_STS0 BIT(0) /* Rx mcu */ @@ -357,11 +365,11 @@ #define MT_INFRA_CFG_BASE 0xfe000 #define MT_INFRA(ofs) (MT_INFRA_CFG_BASE + (ofs)) -#define MT_HIF_REMAP_L1 MT_INFRA(0x260) +#define MT_HIF_REMAP_L1 MT_INFRA(0x24c) #define MT_HIF_REMAP_L1_MASK GENMASK(15, 0) #define MT_HIF_REMAP_L1_OFFSET GENMASK(15, 0) #define MT_HIF_REMAP_L1_BASE GENMASK(31, 16) -#define MT_HIF_REMAP_BASE_L1 0xe0000 +#define MT_HIF_REMAP_BASE_L1 0x40000 #define MT_SWDEF_BASE 0x41f200 #define MT_SWDEF(ofs) (MT_SWDEF_BASE + (ofs)) @@ -380,11 +388,17 @@ #define MT_TOP_MISC MT_TOP(0xf0) #define MT_TOP_MISC_FW_STATE GENMASK(2, 0) +#define MT_MCU_WPDMA0_BASE 0x54000000 +#define MT_MCU_WPDMA0(ofs) (MT_MCU_WPDMA0_BASE + (ofs)) + +#define MT_WFDMA_DUMMY_CR MT_MCU_WPDMA0(0x120) +#define MT_WFDMA_NEED_REINIT BIT(1) + #define MT_HW_BOUND 0x70010020 #define MT_HW_CHIPID 0x70010200 #define MT_HW_REV 0x70010204 -#define MT_PCIE_MAC_BASE 0x74030000 +#define MT_PCIE_MAC_BASE 0x10000 #define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs)) #define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188) @@ -413,6 +427,10 @@ #define PCIE_LPCR_HOST_CLR_OWN BIT(1) #define PCIE_LPCR_HOST_SET_OWN BIT(0) +#define MT_WFSYS_SW_RST_B 0x18000140 +#define WFSYS_SW_RST_B BIT(0) +#define WFSYS_SW_INIT_DONE BIT(4) + #define MT_CONN_ON_MISC 0x7c0600f0 #define MT_TOP_MISC2_FW_N9_RDY GENMASK(1, 0) diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/trace.c b/drivers/net/wireless/mediatek/mt76/mt7921/trace.c new file mode 100644 index 000000000000..4dc3c7b89ebd --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/trace.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (C) 2021 Lorenzo Bianconi <lorenzo@kernel.org> + */ + +#include <linux/module.h> + +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "mt7921_trace.h" + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c index 0b6facb17ff7..a18d2896ee1f 100644 --- a/drivers/net/wireless/mediatek/mt76/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/sdio.c @@ -256,6 +256,9 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, q->entry[q->head].skb = tx_info.skb; q->entry[q->head].buf_sz = len; + + smp_wmb(); + q->head = (q->head + 1) % q->ndesc; q->queued++; diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c index cc769645afa5..001d0ba5f73e 100644 --- a/drivers/net/wireless/mediatek/mt76/testmode.c +++ b/drivers/net/wireless/mediatek/mt76/testmode.c @@ -62,36 +62,83 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy) spin_unlock_bh(&q->lock); } +static u32 +mt76_testmode_max_mpdu_len(struct mt76_phy *phy, u8 tx_rate_mode) +{ + switch (tx_rate_mode) { + case MT76_TM_TX_MODE_HT: + return IEEE80211_MAX_MPDU_LEN_HT_7935; + case MT76_TM_TX_MODE_VHT: + case MT76_TM_TX_MODE_HE_SU: + case MT76_TM_TX_MODE_HE_EXT_SU: + case MT76_TM_TX_MODE_HE_TB: + case MT76_TM_TX_MODE_HE_MU: + if (phy->sband_5g.sband.vht_cap.cap & + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991) + return IEEE80211_MAX_MPDU_LEN_VHT_7991; + return IEEE80211_MAX_MPDU_LEN_VHT_11454; + case MT76_TM_TX_MODE_CCK: + case MT76_TM_TX_MODE_OFDM: + default: + return IEEE80211_MAX_FRAME_LEN; + } +} -static int -mt76_testmode_tx_init(struct mt76_phy *phy) +static void +mt76_testmode_free_skb(struct mt76_phy *phy) { struct mt76_testmode_data *td = &phy->test; - struct ieee80211_tx_info *info; - struct ieee80211_hdr *hdr; - struct sk_buff *skb; + struct sk_buff *skb = td->tx_skb; + + if (!skb) + return; + + if (skb_has_frag_list(skb)) { + kfree_skb_list(skb_shinfo(skb)->frag_list); + skb_shinfo(skb)->frag_list = NULL; + } + + dev_kfree_skb(skb); + td->tx_skb = NULL; +} + +int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len) +{ +#define MT_TXP_MAX_LEN 4095 u16 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | IEEE80211_FCTL_FROMDS; - struct ieee80211_tx_rate *rate; - u8 max_nss = hweight8(phy->antenna_mask); + struct mt76_testmode_data *td = &phy->test; bool ext_phy = phy != &phy->dev->phy; + struct sk_buff **frag_tail, *head; + struct ieee80211_tx_info *info; + struct ieee80211_hdr *hdr; + u32 max_len, head_len; + int nfrags, i; - if (td->tx_antenna_mask) - max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask)); + max_len = mt76_testmode_max_mpdu_len(phy, td->tx_rate_mode); + if (len > max_len) + len = max_len; + else if (len < sizeof(struct ieee80211_hdr)) + len = sizeof(struct ieee80211_hdr); - skb = alloc_skb(td->tx_msdu_len, GFP_KERNEL); - if (!skb) + nfrags = len / MT_TXP_MAX_LEN; + head_len = nfrags ? MT_TXP_MAX_LEN : len; + + if (len > IEEE80211_MAX_FRAME_LEN) + fc |= IEEE80211_STYPE_QOS_DATA; + + head = alloc_skb(head_len, GFP_KERNEL); + if (!head) return -ENOMEM; - dev_kfree_skb(td->tx_skb); - td->tx_skb = skb; - hdr = __skb_put_zero(skb, td->tx_msdu_len); + hdr = __skb_put_zero(head, head_len); hdr->frame_control = cpu_to_le16(fc); memcpy(hdr->addr1, phy->macaddr, sizeof(phy->macaddr)); memcpy(hdr->addr2, phy->macaddr, sizeof(phy->macaddr)); memcpy(hdr->addr3, phy->macaddr, sizeof(phy->macaddr)); + skb_set_queue_mapping(head, IEEE80211_AC_BE); - info = IEEE80211_SKB_CB(skb); + info = IEEE80211_SKB_CB(head); info->flags = IEEE80211_TX_CTL_INJECTED | IEEE80211_TX_CTL_NO_ACK | IEEE80211_TX_CTL_NO_PS_BUFFER; @@ -99,9 +146,60 @@ mt76_testmode_tx_init(struct mt76_phy *phy) if (ext_phy) info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY; + frag_tail = &skb_shinfo(head)->frag_list; + + for (i = 0; i < nfrags; i++) { + struct sk_buff *frag; + u16 frag_len; + + if (i == nfrags - 1) + frag_len = len % MT_TXP_MAX_LEN; + else + frag_len = MT_TXP_MAX_LEN; + + frag = alloc_skb(frag_len, GFP_KERNEL); + if (!frag) + return -ENOMEM; + + __skb_put_zero(frag, frag_len); + head->len += frag->len; + head->data_len += frag->len; + + if (*frag_tail) { + (*frag_tail)->next = frag; + frag_tail = &frag; + } else { + *frag_tail = frag; + } + } + + mt76_testmode_free_skb(phy); + td->tx_skb = head; + + return 0; +} +EXPORT_SYMBOL(mt76_testmode_alloc_skb); + +static int +mt76_testmode_tx_init(struct mt76_phy *phy) +{ + struct mt76_testmode_data *td = &phy->test; + struct ieee80211_tx_info *info; + struct ieee80211_tx_rate *rate; + u8 max_nss = hweight8(phy->antenna_mask); + int ret; + + ret = mt76_testmode_alloc_skb(phy, td->tx_mpdu_len); + if (ret) + return ret; + if (td->tx_rate_mode > MT76_TM_TX_MODE_VHT) goto out; + if (td->tx_antenna_mask) + max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask)); + + info = IEEE80211_SKB_CB(td->tx_skb); rate = &info->control.rates[0]; rate->count = 1; rate->idx = td->tx_rate_idx; @@ -171,8 +269,6 @@ mt76_testmode_tx_init(struct mt76_phy *phy) } } out: - skb_set_queue_mapping(skb, IEEE80211_AC_BE); - return 0; } @@ -203,8 +299,7 @@ mt76_testmode_tx_stop(struct mt76_phy *phy) wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued, MT76_TM_TIMEOUT * HZ); - dev_kfree_skb(td->tx_skb); - td->tx_skb = NULL; + mt76_testmode_free_skb(phy); } static inline void @@ -224,10 +319,10 @@ mt76_testmode_init_defaults(struct mt76_phy *phy) { struct mt76_testmode_data *td = &phy->test; - if (td->tx_msdu_len > 0) + if (td->tx_mpdu_len > 0) return; - td->tx_msdu_len = 1024; + td->tx_mpdu_len = 1024; td->tx_count = 1; td->tx_rate_mode = MT76_TM_TX_MODE_OFDM; td->tx_rate_nss = 1; @@ -345,16 +440,6 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, if (tb[MT76_TM_ATTR_TX_COUNT]) td->tx_count = nla_get_u32(tb[MT76_TM_ATTR_TX_COUNT]); - if (tb[MT76_TM_ATTR_TX_LENGTH]) { - u32 val = nla_get_u32(tb[MT76_TM_ATTR_TX_LENGTH]); - - if (val > IEEE80211_MAX_FRAME_LEN || - val < sizeof(struct ieee80211_hdr)) - goto out; - - td->tx_msdu_len = val; - } - if (tb[MT76_TM_ATTR_TX_RATE_IDX]) td->tx_rate_idx = nla_get_u8(tb[MT76_TM_ATTR_TX_RATE_IDX]); @@ -375,6 +460,16 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, &td->tx_power_control, 0, 1)) goto out; + if (tb[MT76_TM_ATTR_TX_LENGTH]) { + u32 val = nla_get_u32(tb[MT76_TM_ATTR_TX_LENGTH]); + + if (val > mt76_testmode_max_mpdu_len(phy, td->tx_rate_mode) || + val < sizeof(struct ieee80211_hdr)) + goto out; + + td->tx_mpdu_len = val; + } + if (tb[MT76_TM_ATTR_TX_IPG]) td->tx_ipg = nla_get_u32(tb[MT76_TM_ATTR_TX_IPG]); @@ -506,7 +601,7 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg, goto out; if (nla_put_u32(msg, MT76_TM_ATTR_TX_COUNT, td->tx_count) || - nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, td->tx_msdu_len) || + nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, td->tx_mpdu_len) || nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_MODE, td->tx_rate_mode) || nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, td->tx_rate_nss) || nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, td->tx_rate_idx) || diff --git a/drivers/net/wireless/mediatek/mt76/testmode.h b/drivers/net/wireless/mediatek/mt76/testmode.h index e0c706ce9b42..d32a7654c47e 100644 --- a/drivers/net/wireless/mediatek/mt76/testmode.h +++ b/drivers/net/wireless/mediatek/mt76/testmode.h @@ -21,7 +21,7 @@ * @MT76_TM_ATTR_TX_COUNT: configured number of frames to send when setting * state to MT76_TM_STATE_TX_FRAMES (u32) * @MT76_TM_ATTR_TX_PENDING: pending frames during MT76_TM_STATE_TX_FRAMES (u32) - * @MT76_TM_ATTR_TX_LENGTH: packet tx msdu length (u32) + * @MT76_TM_ATTR_TX_LENGTH: packet tx mpdu length (u32) * @MT76_TM_ATTR_TX_RATE_MODE: packet tx mode (u8, see &enum mt76_testmode_tx_mode) * @MT76_TM_ATTR_TX_RATE_NSS: packet tx number of spatial streams (u8) * @MT76_TM_ATTR_TX_RATE_IDX: packet tx rate/MCS index (u8) diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index b8fe8adc43a3..53ea8de82df0 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -213,7 +213,7 @@ void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *sk if (phy->test.tx_queued == phy->test.tx_done) wake_up(&dev->tx_wait); - ieee80211_free_txskb(hw, skb); + dev_kfree_skb_any(skb); return; } #endif @@ -422,8 +422,7 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q, return idx; do { - if (test_bit(MT76_STATE_PM, &phy->state) || - test_bit(MT76_RESET, &phy->state)) + if (test_bit(MT76_RESET, &phy->state)) return -EBUSY; if (stop || mt76_txq_stopped(q)) @@ -461,11 +460,10 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) int ret = 0; while (1) { - if (test_bit(MT76_STATE_PM, &phy->state) || - test_bit(MT76_RESET, &phy->state)) { - ret = -EBUSY; - break; - } + int n_frames = 0; + + if (test_bit(MT76_RESET, &phy->state)) + return -EBUSY; if (dev->queue_ops->tx_cleanup && q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) { @@ -497,11 +495,16 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) } if (!mt76_txq_stopped(q)) - ret += mt76_txq_send_burst(phy, q, mtxq); + n_frames = mt76_txq_send_burst(phy, q, mtxq); spin_unlock_bh(&q->lock); ieee80211_return_txq(phy->hw, txq, false); + + if (unlikely(n_frames < 0)) + return n_frames; + + ret += n_frames; } return ret; @@ -535,10 +538,8 @@ void mt76_txq_schedule_all(struct mt76_phy *phy) } EXPORT_SYMBOL_GPL(mt76_txq_schedule_all); -void mt76_tx_worker(struct mt76_worker *w) +void mt76_tx_worker_run(struct mt76_dev *dev) { - struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker); - mt76_txq_schedule_all(&dev->phy); if (dev->phy2) mt76_txq_schedule_all(dev->phy2); @@ -550,6 +551,14 @@ void mt76_tx_worker(struct mt76_worker *w) mt76_testmode_tx_pending(dev->phy2); #endif } +EXPORT_SYMBOL_GPL(mt76_tx_worker_run); + +void mt76_tx_worker(struct mt76_worker *w) +{ + struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker); + + mt76_tx_worker_run(dev); +} void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta, bool send_bar) @@ -639,3 +648,64 @@ void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q, spin_unlock_bh(&q->lock); } EXPORT_SYMBOL_GPL(mt76_queue_tx_complete); + +void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) +{ + struct mt76_phy *phy = &dev->phy, *phy2 = dev->phy2; + struct mt76_queue *q, *q2 = NULL; + + q = phy->q_tx[0]; + if (blocked == q->blocked) + return; + + q->blocked = blocked; + if (phy2) { + q2 = phy2->q_tx[0]; + q2->blocked = blocked; + } + + if (!blocked) + mt76_worker_schedule(&dev->tx_worker); +} +EXPORT_SYMBOL_GPL(__mt76_set_tx_blocked); + +int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) +{ + int token; + + spin_lock_bh(&dev->token_lock); + + token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size, + GFP_ATOMIC); + if (token >= 0) + dev->token_count++; + + if (dev->token_count >= dev->drv->token_size - MT76_TOKEN_FREE_THR) + __mt76_set_tx_blocked(dev, true); + + spin_unlock_bh(&dev->token_lock); + + return token; +} +EXPORT_SYMBOL_GPL(mt76_token_consume); + +struct mt76_txwi_cache * +mt76_token_release(struct mt76_dev *dev, int token, bool *wake) +{ + struct mt76_txwi_cache *txwi; + + spin_lock_bh(&dev->token_lock); + + txwi = idr_remove(&dev->token, token); + if (txwi) + dev->token_count--; + + if (dev->token_count < dev->drv->token_size - MT76_TOKEN_FREE_THR && + dev->phy.q_tx[0]->blocked) + *wake = true; + + spin_unlock_bh(&dev->token_lock); + + return txwi; +} +EXPORT_SYMBOL_GPL(mt76_token_release); diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c index c868582c5d22..aa3b64902cf9 100644 --- a/drivers/net/wireless/mediatek/mt7601u/eeprom.c +++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c @@ -99,7 +99,7 @@ mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom) { u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1); - return ~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN); + return (u16)~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN); } static void diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c index cada48800928..5d9e952b2966 100644 --- a/drivers/net/wireless/mediatek/mt7601u/init.c +++ b/drivers/net/wireless/mediatek/mt7601u/init.c @@ -610,6 +610,7 @@ int mt7601u_register_device(struct mt7601u_dev *dev) wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); diff --git a/drivers/net/wireless/microchip/wilc1000/Kconfig b/drivers/net/wireless/microchip/wilc1000/Kconfig index 7f15e42602dd..62cfcdc9aacc 100644 --- a/drivers/net/wireless/microchip/wilc1000/Kconfig +++ b/drivers/net/wireless/microchip/wilc1000/Kconfig @@ -27,6 +27,7 @@ config WILC1000_SPI depends on CFG80211 && INET && SPI select WILC1000 select CRC7 + select CRC_ITU_T help This module adds support for the SPI interface of adapters using WILC1000 chipset. The Atmel WILC1000 has a Serial Peripheral diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c index 1b205e7d97a8..7e4d9235251c 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.c +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c @@ -24,12 +24,10 @@ static irqreturn_t isr_uh_routine(int irq, void *user_data) { - struct net_device *dev = user_data; - struct wilc_vif *vif = netdev_priv(dev); - struct wilc *wilc = vif->wilc; + struct wilc *wilc = user_data; if (wilc->close) { - netdev_err(dev, "Can't handle UH interrupt\n"); + pr_err("Can't handle UH interrupt"); return IRQ_HANDLED; } return IRQ_WAKE_THREAD; @@ -37,12 +35,10 @@ static irqreturn_t isr_uh_routine(int irq, void *user_data) static irqreturn_t isr_bh_routine(int irq, void *userdata) { - struct net_device *dev = userdata; - struct wilc_vif *vif = netdev_priv(userdata); - struct wilc *wilc = vif->wilc; + struct wilc *wilc = userdata; if (wilc->close) { - netdev_err(dev, "Can't handle BH interrupt\n"); + pr_err("Can't handle BH interrupt\n"); return IRQ_HANDLED; } @@ -60,7 +56,7 @@ static int init_irq(struct net_device *dev) ret = request_threaded_irq(wl->dev_irq_num, isr_uh_routine, isr_bh_routine, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, - "WILC_IRQ", dev); + "WILC_IRQ", wl); if (ret) { netdev_err(dev, "Failed to request IRQ [%d]\n", ret); return ret; @@ -575,7 +571,6 @@ static int wilc_mac_open(struct net_device *ndev) { struct wilc_vif *vif = netdev_priv(ndev); struct wilc *wl = vif->wilc; - unsigned char mac_add[ETH_ALEN] = {0}; int ret = 0; struct mgmt_frame_regs mgmt_regs = {}; @@ -598,9 +593,12 @@ static int wilc_mac_open(struct net_device *ndev) wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), vif->iftype, vif->idx); - wilc_get_mac_address(vif, mac_add); - netdev_dbg(ndev, "Mac address: %pM\n", mac_add); - ether_addr_copy(ndev->dev_addr, mac_add); + + if (is_valid_ether_addr(ndev->dev_addr)) + wilc_set_mac_address(vif, ndev->dev_addr); + else + wilc_get_mac_address(vif, ndev->dev_addr); + netdev_dbg(ndev, "Mac address: %pM\n", ndev->dev_addr); if (!is_valid_ether_addr(ndev->dev_addr)) { netdev_err(ndev, "Wrong MAC address\n"); @@ -639,7 +637,14 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p) int srcu_idx; if (!is_valid_ether_addr(addr->sa_data)) - return -EINVAL; + return -EADDRNOTAVAIL; + + if (!vif->mac_opened) { + eth_commit_mac_addr_change(dev, p); + return 0; + } + + /* Verify MAC Address is not already in use: */ srcu_idx = srcu_read_lock(&wilc->srcu); list_for_each_entry_rcu(tmp_vif, &wilc->vif_list, list) { @@ -647,7 +652,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p) if (ether_addr_equal(addr->sa_data, mac_addr)) { if (vif != tmp_vif) { srcu_read_unlock(&wilc->srcu, srcu_idx); - return -EINVAL; + return -EADDRNOTAVAIL; } srcu_read_unlock(&wilc->srcu, srcu_idx); return 0; @@ -659,9 +664,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p) if (result) return result; - ether_addr_copy(vif->bssid, addr->sa_data); - ether_addr_copy(vif->ndev->dev_addr, addr->sa_data); - + eth_commit_mac_addr_change(dev, p); return result; } diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c index 351ff909ab1c..e14b9fc2c67a 100644 --- a/drivers/net/wireless/microchip/wilc1000/sdio.c +++ b/drivers/net/wireless/microchip/wilc1000/sdio.c @@ -947,7 +947,7 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint) for (i = 0; (i < 3) && (nint > 0); i++, nint--) reg |= BIT(i); - ret = wilc_sdio_read_reg(wilc, WILC_INTR2_ENABLE, ®); + ret = wilc_sdio_write_reg(wilc, WILC_INTR2_ENABLE, reg); if (ret) { dev_err(&func->dev, "Failed write reg (%08x)...\n", diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c index be732929322c..1472e9843896 100644 --- a/drivers/net/wireless/microchip/wilc1000/spi.c +++ b/drivers/net/wireless/microchip/wilc1000/spi.c @@ -7,12 +7,41 @@ #include <linux/clk.h> #include <linux/spi/spi.h> #include <linux/crc7.h> +#include <linux/crc-itu-t.h> #include "netdev.h" #include "cfg80211.h" +static bool enable_crc7; /* protect SPI commands with CRC7 */ +module_param(enable_crc7, bool, 0644); +MODULE_PARM_DESC(enable_crc7, + "Enable CRC7 checksum to protect command transfers\n" + "\t\t\tagainst corruption during the SPI transfer.\n" + "\t\t\tCommand transfers are short and the CPU-cycle cost\n" + "\t\t\tof enabling this is small."); + +static bool enable_crc16; /* protect SPI data with CRC16 */ +module_param(enable_crc16, bool, 0644); +MODULE_PARM_DESC(enable_crc16, + "Enable CRC16 checksum to protect data transfers\n" + "\t\t\tagainst corruption during the SPI transfer.\n" + "\t\t\tData transfers can be large and the CPU-cycle cost\n" + "\t\t\tof enabling this may be substantial."); + +/* + * For CMD_SINGLE_READ and CMD_INTERNAL_READ, WILC may insert one or + * more zero bytes between the command response and the DATA Start tag + * (0xf3). This behavior appears to be undocumented in "ATWILC1000 + * USER GUIDE" (https://tinyurl.com/4hhshdts) but we have observed 1-4 + * zero bytes when the SPI bus operates at 48MHz and none when it + * operates at 1MHz. + */ +#define WILC_SPI_RSP_HDR_EXTRA_DATA 8 + struct wilc_spi { - int crc_off; + bool probing_crc; /* true if we're probing chip's CRC config */ + bool crc7_enabled; /* true if crc7 is currently enabled */ + bool crc16_enabled; /* true if crc16 is currently enabled */ }; static const struct wilc_hif_func wilc_hif_spi; @@ -36,12 +65,36 @@ static const struct wilc_hif_func wilc_hif_spi; #define CMD_RESET 0xcf #define SPI_ENABLE_VMM_RETRY_LIMIT 2 -#define DATA_PKT_SZ_256 256 -#define DATA_PKT_SZ_512 512 -#define DATA_PKT_SZ_1K 1024 -#define DATA_PKT_SZ_4K (4 * 1024) -#define DATA_PKT_SZ_8K (8 * 1024) -#define DATA_PKT_SZ DATA_PKT_SZ_8K + +/* SPI response fields (section 11.1.2 in ATWILC1000 User Guide): */ +#define RSP_START_FIELD GENMASK(7, 4) +#define RSP_TYPE_FIELD GENMASK(3, 0) + +/* SPI response values for the response fields: */ +#define RSP_START_TAG 0xc +#define RSP_TYPE_FIRST_PACKET 0x1 +#define RSP_TYPE_INNER_PACKET 0x2 +#define RSP_TYPE_LAST_PACKET 0x3 +#define RSP_STATE_NO_ERROR 0x00 + +#define PROTOCOL_REG_PKT_SZ_MASK GENMASK(6, 4) +#define PROTOCOL_REG_CRC16_MASK GENMASK(3, 3) +#define PROTOCOL_REG_CRC7_MASK GENMASK(2, 2) + +/* + * The SPI data packet size may be any integer power of two in the + * range from 256 to 8192 bytes. + */ +#define DATA_PKT_LOG_SZ_MIN 8 /* 256 B */ +#define DATA_PKT_LOG_SZ_MAX 13 /* 8 KiB */ + +/* + * Select the data packet size (log2 of number of bytes): Use the + * maximum data packet size. We only retransmit complete packets, so + * there is no benefit from using smaller data packets. + */ +#define DATA_PKT_LOG_SZ DATA_PKT_LOG_SZ_MAX +#define DATA_PKT_SZ (1 << DATA_PKT_LOG_SZ) #define USE_SPI_DMA 0 @@ -79,16 +132,15 @@ struct wilc_spi_cmd { } __packed; struct wilc_spi_read_rsp_data { - u8 rsp_cmd_type; - u8 status; - u8 resp_header; - u8 resp_data[4]; + u8 header; + u8 data[4]; u8 crc[]; } __packed; struct wilc_spi_rsp_data { u8 rsp_cmd_type; u8 status; + u8 data[]; } __packed; static int wilc_bus_probe(struct spi_device *spi) @@ -281,7 +333,8 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz) struct wilc_spi *spi_priv = wilc->bus_data; int ix, nbytes; int result = 0; - u8 cmd, order, crc[2] = {0}; + u8 cmd, order, crc[2]; + u16 crc_calc; /* * Data @@ -323,9 +376,12 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz) } /* - * Write Crc + * Write CRC */ - if (!spi_priv->crc_off) { + if (spi_priv->crc16_enabled) { + crc_calc = crc_itu_t(0xffff, &b[ix], nbytes); + crc[0] = crc_calc >> 8; + crc[1] = crc_calc; if (wilc_spi_tx(wilc, crc, 2)) { dev_err(&spi->dev, "Failed data block crc write, bus error...\n"); result = -EINVAL; @@ -359,10 +415,11 @@ static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b, struct spi_device *spi = to_spi_device(wilc->dev); struct wilc_spi *spi_priv = wilc->bus_data; u8 wb[32], rb[32]; - int cmd_len, resp_len; - u8 crc[2]; + int cmd_len, resp_len, i; + u16 crc_calc, crc_recv; struct wilc_spi_cmd *c; - struct wilc_spi_read_rsp_data *r; + struct wilc_spi_rsp_data *r; + struct wilc_spi_read_rsp_data *r_data; memset(wb, 0x0, sizeof(wb)); memset(rb, 0x0, sizeof(rb)); @@ -384,8 +441,9 @@ static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b, } cmd_len = offsetof(struct wilc_spi_cmd, u.simple_cmd.crc); - resp_len = sizeof(*r); - if (!spi_priv->crc_off) { + resp_len = sizeof(*r) + sizeof(*r_data) + WILC_SPI_RSP_HDR_EXTRA_DATA; + + if (spi_priv->crc7_enabled) { c->u.simple_cmd.crc[0] = wilc_get_crc7(wb, cmd_len); cmd_len += 1; resp_len += 2; @@ -403,11 +461,12 @@ static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b, return -EINVAL; } - r = (struct wilc_spi_read_rsp_data *)&rb[cmd_len]; + r = (struct wilc_spi_rsp_data *)&rb[cmd_len]; if (r->rsp_cmd_type != cmd) { - dev_err(&spi->dev, - "Failed cmd response, cmd (%02x), resp (%02x)\n", - cmd, r->rsp_cmd_type); + if (!spi_priv->probing_crc) + dev_err(&spi->dev, + "Failed cmd, cmd (%02x), resp (%02x)\n", + cmd, r->rsp_cmd_type); return -EINVAL; } @@ -417,17 +476,30 @@ static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b, return -EINVAL; } - if (WILC_GET_RESP_HDR_START(r->resp_header) != 0xf) { - dev_err(&spi->dev, "Error, data read response (%02x)\n", - r->resp_header); + for (i = 0; i < WILC_SPI_RSP_HDR_EXTRA_DATA; ++i) + if (WILC_GET_RESP_HDR_START(r->data[i]) == 0xf) + break; + + if (i >= WILC_SPI_RSP_HDR_EXTRA_DATA) { + dev_err(&spi->dev, "Error, data start missing\n"); return -EINVAL; } - if (b) - memcpy(b, r->resp_data, 4); + r_data = (struct wilc_spi_read_rsp_data *)&r->data[i]; - if (!spi_priv->crc_off) - memcpy(crc, r->crc, 2); + if (b) + memcpy(b, r_data->data, 4); + + if (!clockless && spi_priv->crc16_enabled) { + crc_recv = (r_data->crc[0] << 8) | r_data->crc[1]; + crc_calc = crc_itu_t(0xffff, r_data->data, 4); + if (crc_recv != crc_calc) { + dev_err(&spi->dev, "%s: bad CRC 0x%04x " + "(calculated 0x%04x)\n", __func__, + crc_recv, crc_calc); + return -EINVAL; + } + } return 0; } @@ -454,7 +526,7 @@ static int wilc_spi_write_cmd(struct wilc *wilc, u8 cmd, u32 adr, u32 data, c->u.internal_w_cmd.addr[1] = adr; c->u.internal_w_cmd.data = cpu_to_be32(data); cmd_len = offsetof(struct wilc_spi_cmd, u.internal_w_cmd.crc); - if (!spi_priv->crc_off) + if (spi_priv->crc7_enabled) c->u.internal_w_cmd.crc[0] = wilc_get_crc7(wb, cmd_len); } else if (cmd == CMD_SINGLE_WRITE) { c->u.w_cmd.addr[0] = adr >> 16; @@ -462,14 +534,14 @@ static int wilc_spi_write_cmd(struct wilc *wilc, u8 cmd, u32 adr, u32 data, c->u.w_cmd.addr[2] = adr; c->u.w_cmd.data = cpu_to_be32(data); cmd_len = offsetof(struct wilc_spi_cmd, u.w_cmd.crc); - if (!spi_priv->crc_off) + if (spi_priv->crc7_enabled) c->u.w_cmd.crc[0] = wilc_get_crc7(wb, cmd_len); } else { dev_err(&spi->dev, "write cmd [%x] not supported\n", cmd); return -EINVAL; } - if (!spi_priv->crc_off) + if (spi_priv->crc7_enabled) cmd_len += 1; resp_len = sizeof(*r); @@ -507,6 +579,7 @@ static int wilc_spi_dma_rw(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz) { struct spi_device *spi = to_spi_device(wilc->dev); struct wilc_spi *spi_priv = wilc->bus_data; + u16 crc_recv, crc_calc; u8 wb[32], rb[32]; int cmd_len, resp_len; int retry, ix = 0; @@ -525,7 +598,7 @@ static int wilc_spi_dma_rw(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz) c->u.dma_cmd.size[0] = sz >> 8; c->u.dma_cmd.size[1] = sz; cmd_len = offsetof(struct wilc_spi_cmd, u.dma_cmd.crc); - if (!spi_priv->crc_off) + if (spi_priv->crc7_enabled) c->u.dma_cmd.crc[0] = wilc_get_crc7(wb, cmd_len); } else if (cmd == CMD_DMA_EXT_WRITE || cmd == CMD_DMA_EXT_READ) { c->u.dma_cmd_ext.addr[0] = adr >> 16; @@ -535,14 +608,14 @@ static int wilc_spi_dma_rw(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz) c->u.dma_cmd_ext.size[1] = sz >> 8; c->u.dma_cmd_ext.size[2] = sz; cmd_len = offsetof(struct wilc_spi_cmd, u.dma_cmd_ext.crc); - if (!spi_priv->crc_off) + if (spi_priv->crc7_enabled) c->u.dma_cmd_ext.crc[0] = wilc_get_crc7(wb, cmd_len); } else { dev_err(&spi->dev, "dma read write cmd [%x] not supported\n", cmd); return -EINVAL; } - if (!spi_priv->crc_off) + if (spi_priv->crc7_enabled) cmd_len += 1; resp_len = sizeof(*r); @@ -608,12 +681,22 @@ static int wilc_spi_dma_rw(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz) } /* - * Read Crc + * Read CRC */ - if (!spi_priv->crc_off && wilc_spi_rx(wilc, crc, 2)) { - dev_err(&spi->dev, - "Failed block crc read, bus err\n"); - return -EINVAL; + if (spi_priv->crc16_enabled) { + if (wilc_spi_rx(wilc, crc, 2)) { + dev_err(&spi->dev, + "Failed block CRC read, bus err\n"); + return -EINVAL; + } + crc_recv = (crc[0] << 8) | crc[1]; + crc_calc = crc_itu_t(0xffff, &b[ix], nbytes); + if (crc_recv != crc_calc) { + dev_err(&spi->dev, "%s: bad CRC 0x%04x " + "(calculated 0x%04x)\n", __func__, + crc_recv, crc_calc); + return -EINVAL; + } } ix += nbytes; @@ -680,11 +763,13 @@ static int spi_internal_write(struct wilc *wilc, u32 adr, u32 dat) static int spi_internal_read(struct wilc *wilc, u32 adr, u32 *data) { struct spi_device *spi = to_spi_device(wilc->dev); + struct wilc_spi *spi_priv = wilc->bus_data; int result; result = wilc_spi_single_read(wilc, CMD_INTERNAL_READ, adr, data, 0); if (result) { - dev_err(&spi->dev, "Failed internal read cmd...\n"); + if (!spi_priv->probing_crc) + dev_err(&spi->dev, "Failed internal read cmd...\n"); return result; } @@ -721,6 +806,52 @@ static int wilc_spi_write_reg(struct wilc *wilc, u32 addr, u32 data) return 0; } +static int spi_data_rsp(struct wilc *wilc, u8 cmd) +{ + struct spi_device *spi = to_spi_device(wilc->dev); + int result, i; + u8 rsp[4]; + + /* + * The response to data packets is two bytes long. For + * efficiency's sake, wilc_spi_write() wisely ignores the + * responses for all packets but the final one. The downside + * of that optimization is that when the final data packet is + * short, we may receive (part of) the response to the + * second-to-last packet before the one for the final packet. + * To handle this, we always read 4 bytes and then search for + * the last byte that contains the "Response Start" code (0xc + * in the top 4 bits). We then know that this byte is the + * first response byte of the final data packet. + */ + result = wilc_spi_rx(wilc, rsp, sizeof(rsp)); + if (result) { + dev_err(&spi->dev, "Failed bus error...\n"); + return result; + } + + for (i = sizeof(rsp) - 2; i >= 0; --i) + if (FIELD_GET(RSP_START_FIELD, rsp[i]) == RSP_START_TAG) + break; + + if (i < 0) { + dev_err(&spi->dev, + "Data packet response missing (%02x %02x %02x %02x)\n", + rsp[0], rsp[1], rsp[2], rsp[3]); + return -1; + } + + /* rsp[i] is the last response start byte */ + + if (FIELD_GET(RSP_TYPE_FIELD, rsp[i]) != RSP_TYPE_LAST_PACKET + || rsp[i + 1] != RSP_STATE_NO_ERROR) { + dev_err(&spi->dev, "Data response error (%02x %02x)\n", + rsp[i], rsp[i + 1]); + return -1; + } + return 0; +} + static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size) { struct spi_device *spi = to_spi_device(wilc->dev); @@ -748,7 +879,10 @@ static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size) return result; } - return 0; + /* + * Data response + */ + return spi_data_rsp(wilc, CMD_DMA_EXT_WRITE); } /******************************************** @@ -772,7 +906,7 @@ static int wilc_spi_init(struct wilc *wilc, bool resume) u32 reg; u32 chipid; static int isinit; - int ret; + int ret, i; if (isinit) { ret = wilc_spi_read_reg(wilc, WILC_CHIPID, &chipid); @@ -787,42 +921,54 @@ static int wilc_spi_init(struct wilc *wilc, bool resume) */ /* - * TODO: We can remove the CRC trials if there is a definite - * way to reset + * Infer the CRC settings that are currently in effect. This + * is necessary because we can't be sure that the chip has + * been RESET (e.g, after module unload and reload). */ - /* the SPI to it's initial value. */ - ret = spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, ®); - if (ret) { - /* - * Read failed. Try with CRC off. This might happen when module - * is removed but chip isn't reset - */ - spi_priv->crc_off = 1; - dev_err(&spi->dev, - "Failed read with CRC on, retrying with CRC off\n"); + spi_priv->probing_crc = true; + spi_priv->crc7_enabled = enable_crc7; + spi_priv->crc16_enabled = false; /* don't check CRC16 during probing */ + for (i = 0; i < 2; ++i) { ret = spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, ®); - if (ret) { - /* - * Read failed with both CRC on and off, - * something went bad - */ - dev_err(&spi->dev, "Failed internal read protocol\n"); - return ret; - } + if (ret == 0) + break; + spi_priv->crc7_enabled = !enable_crc7; } - if (spi_priv->crc_off == 0) { - reg &= ~0xc; /* disable crc checking */ - reg &= ~0x70; - reg |= (0x5 << 4); - ret = spi_internal_write(wilc, WILC_SPI_PROTOCOL_OFFSET, reg); - if (ret) { - dev_err(&spi->dev, - "[wilc spi %d]: Failed internal write reg\n", - __LINE__); - return ret; - } - spi_priv->crc_off = 1; + if (ret) { + dev_err(&spi->dev, "Failed with CRC7 on and off.\n"); + return ret; + } + + /* set up the desired CRC configuration: */ + reg &= ~(PROTOCOL_REG_CRC7_MASK | PROTOCOL_REG_CRC16_MASK); + if (enable_crc7) + reg |= PROTOCOL_REG_CRC7_MASK; + if (enable_crc16) + reg |= PROTOCOL_REG_CRC16_MASK; + + /* set up the data packet size: */ + BUILD_BUG_ON(DATA_PKT_LOG_SZ < DATA_PKT_LOG_SZ_MIN + || DATA_PKT_LOG_SZ > DATA_PKT_LOG_SZ_MAX); + reg &= ~PROTOCOL_REG_PKT_SZ_MASK; + reg |= FIELD_PREP(PROTOCOL_REG_PKT_SZ_MASK, + DATA_PKT_LOG_SZ - DATA_PKT_LOG_SZ_MIN); + + /* establish the new setup: */ + ret = spi_internal_write(wilc, WILC_SPI_PROTOCOL_OFFSET, reg); + if (ret) { + dev_err(&spi->dev, + "[wilc spi %d]: Failed internal write reg\n", + __LINE__); + return ret; } + /* update our state to match new protocol settings: */ + spi_priv->crc7_enabled = enable_crc7; + spi_priv->crc16_enabled = enable_crc16; + + /* re-read to make sure new settings are in effect: */ + spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, ®); + + spi_priv->probing_crc = false; /* * make sure can read back chip id correctly diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c index 31d51385ba93..2030fc7f53ca 100644 --- a/drivers/net/wireless/microchip/wilc1000/wlan.c +++ b/drivers/net/wireless/microchip/wilc1000/wlan.c @@ -552,12 +552,60 @@ static struct rxq_entry_t *wilc_wlan_rxq_remove(struct wilc *wilc) void chip_allow_sleep(struct wilc *wilc) { u32 reg = 0; + const struct wilc_hif_func *hif_func = wilc->hif_func; + u32 wakeup_reg, wakeup_bit; + u32 to_host_from_fw_reg, to_host_from_fw_bit; + u32 from_host_to_fw_reg, from_host_to_fw_bit; + u32 trials = 100; + int ret; + + if (wilc->io_type == WILC_HIF_SDIO) { + wakeup_reg = WILC_SDIO_WAKEUP_REG; + wakeup_bit = WILC_SDIO_WAKEUP_BIT; + from_host_to_fw_reg = WILC_SDIO_HOST_TO_FW_REG; + from_host_to_fw_bit = WILC_SDIO_HOST_TO_FW_BIT; + to_host_from_fw_reg = WILC_SDIO_FW_TO_HOST_REG; + to_host_from_fw_bit = WILC_SDIO_FW_TO_HOST_BIT; + } else { + wakeup_reg = WILC_SPI_WAKEUP_REG; + wakeup_bit = WILC_SPI_WAKEUP_BIT; + from_host_to_fw_reg = WILC_SPI_HOST_TO_FW_REG; + from_host_to_fw_bit = WILC_SPI_HOST_TO_FW_BIT; + to_host_from_fw_reg = WILC_SPI_FW_TO_HOST_REG; + to_host_from_fw_bit = WILC_SPI_FW_TO_HOST_BIT; + } + + while (--trials) { + ret = hif_func->hif_read_reg(wilc, to_host_from_fw_reg, ®); + if (ret) + return; + if ((reg & to_host_from_fw_bit) == 0) + break; + } + if (!trials) + pr_warn("FW not responding\n"); - wilc->hif_func->hif_read_reg(wilc, WILC_SDIO_WAKEUP_REG, ®); + /* Clear bit 1 */ + ret = hif_func->hif_read_reg(wilc, wakeup_reg, ®); + if (ret) + return; + if (reg & wakeup_bit) { + reg &= ~wakeup_bit; + ret = hif_func->hif_write_reg(wilc, wakeup_reg, reg); + if (ret) + return; + } - wilc->hif_func->hif_write_reg(wilc, WILC_SDIO_WAKEUP_REG, - reg & ~WILC_SDIO_WAKEUP_BIT); - wilc->hif_func->hif_write_reg(wilc, WILC_SDIO_HOST_TO_FW_REG, 0); + ret = hif_func->hif_read_reg(wilc, from_host_to_fw_reg, ®); + if (ret) + return; + if (reg & from_host_to_fw_bit) { + reg &= ~from_host_to_fw_bit; + ret = hif_func->hif_write_reg(wilc, from_host_to_fw_reg, reg); + if (ret) + return; + + } } EXPORT_SYMBOL_GPL(chip_allow_sleep); diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.h b/drivers/net/wireless/microchip/wilc1000/wlan.h index d55eb6b3a12a..771c25fa849b 100644 --- a/drivers/net/wireless/microchip/wilc1000/wlan.h +++ b/drivers/net/wireless/microchip/wilc1000/wlan.h @@ -97,6 +97,12 @@ #define WILC_SPI_WAKEUP_REG 0x1 #define WILC_SPI_WAKEUP_BIT BIT(1) +#define WILC_SPI_HOST_TO_FW_REG 0x0b +#define WILC_SPI_HOST_TO_FW_BIT BIT(0) + +#define WILC_SPI_FW_TO_HOST_REG 0x10 +#define WILC_SPI_FW_TO_HOST_BIT BIT(0) + #define WILC_SPI_PROTOCOL_OFFSET (WILC_SPI_PROTOCOL_CONFIG - \ WILC_SPI_REG_BASE) @@ -392,7 +398,6 @@ struct wilc_cfg_rsp { u8 seq_no; }; -struct wilc; struct wilc_vif; int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 504b4d0b98c4..84b15a655eab 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -680,13 +680,10 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev, eth_zero_addr(vif->bssid); ret = qtnf_cmd_send_connect(vif, sme); - if (ret) { + if (ret) pr_err("VIF%u.%u: failed to connect\n", vif->mac->macid, vif->vifid); - goto out; - } -out: return ret; } @@ -702,13 +699,10 @@ qtnf_external_auth(struct wiphy *wiphy, struct net_device *dev, pr_warn("unexpected bssid: %pM", auth->bssid); ret = qtnf_cmd_send_external_auth(vif, auth); - if (ret) { + if (ret) pr_err("VIF%u.%u: failed to report external auth\n", vif->mac->macid, vif->vifid); - goto out; - } -out: return ret; } @@ -727,8 +721,7 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev, } if (vif->wdev.iftype != NL80211_IFTYPE_STATION) { - ret = -EOPNOTSUPP; - goto out; + return -EOPNOTSUPP; } ret = qtnf_cmd_send_disconnect(vif, reason_code); @@ -742,7 +735,6 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev, NULL, 0, true, GFP_KERNEL); } -out: return ret; } @@ -935,13 +927,10 @@ static int qtnf_update_owe_info(struct wiphy *wiphy, struct net_device *dev, return -EOPNOTSUPP; ret = qtnf_cmd_send_update_owe(vif, owe_info); - if (ret) { + if (ret) pr_err("VIF%u.%u: failed to update owe info\n", vif->mac->macid, vif->vifid); - goto out; - } -out: return ret; } @@ -987,18 +976,14 @@ static int qtnf_resume(struct wiphy *wiphy) vif = qtnf_mac_get_base_vif(mac); if (!vif) { pr_err("MAC%u: primary VIF is not configured\n", mac->macid); - ret = -EFAULT; - goto exit; + return -EFAULT; } ret = qtnf_cmd_send_wowlan_set(vif, NULL); - if (ret) { + if (ret) pr_err("MAC%u: failed to reset WoWLAN triggers\n", mac->macid); - goto exit; - } -exit: return ret; } diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index f3ccbd2b1084..c68563c83098 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -379,10 +379,6 @@ int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif) qtnf_bus_lock(vif->mac->bus); ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; - -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -407,10 +403,7 @@ int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg) cmd->do_register = reg; ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -446,10 +439,7 @@ int qtnf_cmd_send_frame(struct qtnf_vif *vif, u32 cookie, u16 flags, qtnf_cmd_skb_put_buffer(cmd_skb, buf, len); ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -477,10 +467,6 @@ int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type, qtnf_bus_lock(vif->mac->bus); ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; - -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -1677,10 +1663,7 @@ int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed) wiphy->retry_short); ret = qtnf_cmd_send(mac->bus, cmd_skb); - if (ret) - goto out; -out: qtnf_bus_unlock(mac->bus); return ret; @@ -1772,10 +1755,7 @@ int qtnf_cmd_send_add_key(struct qtnf_vif *vif, u8 key_index, bool pairwise, params->seq_len); ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -1807,10 +1787,7 @@ int qtnf_cmd_send_del_key(struct qtnf_vif *vif, u8 key_index, bool pairwise, cmd->pairwise = pairwise; ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -1837,10 +1814,7 @@ int qtnf_cmd_send_set_default_key(struct qtnf_vif *vif, u8 key_index, cmd->multicast = multicast; ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -1864,10 +1838,7 @@ int qtnf_cmd_send_set_default_mgmt_key(struct qtnf_vif *vif, u8 key_index) cmd->key_index = key_index; ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -1931,8 +1902,6 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac, } ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; out: qtnf_bus_unlock(vif->mac->bus); @@ -1966,10 +1935,7 @@ int qtnf_cmd_send_del_sta(struct qtnf_vif *vif, cmd->reason_code = cpu_to_le16(params->reason_code); ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -2189,10 +2155,6 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif, qtnf_bus_lock(vif->mac->bus); ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; - -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -2218,10 +2180,6 @@ int qtnf_cmd_send_external_auth(struct qtnf_vif *vif, qtnf_bus_lock(vif->mac->bus); ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; - -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -2245,10 +2203,7 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code) cmd->reason = cpu_to_le16(reason_code); ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -2271,10 +2226,6 @@ int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up) qtnf_bus_lock(vif->mac->bus); ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; - -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -2580,10 +2531,6 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif, qtnf_bus_lock(bus); ret = qtnf_cmd_send(bus, cmd_skb); - if (ret) - goto out; - -out: qtnf_bus_unlock(bus); return ret; @@ -2611,10 +2558,6 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif, qtnf_bus_lock(bus); ret = qtnf_cmd_send(bus, cmd_skb); - if (ret) - goto out; - -out: qtnf_bus_unlock(bus); return ret; @@ -2639,10 +2582,7 @@ int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout) qtnf_bus_lock(bus); ret = qtnf_cmd_send(bus, cmd_skb); - if (ret) - goto out; -out: qtnf_bus_unlock(bus); return ret; @@ -2754,10 +2694,7 @@ int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif, cmd->triggers = cpu_to_le32(triggers); ret = qtnf_cmd_send(bus, cmd_skb); - if (ret) - goto out; -out: qtnf_bus_unlock(bus); return ret; } @@ -2821,10 +2758,6 @@ int qtnf_cmd_send_update_owe(struct qtnf_vif *vif, qtnf_bus_lock(vif->mac->bus); ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; - -out: qtnf_bus_unlock(vif->mac->bus); return ret; diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c index c775c177933b..8dc80574d08d 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c @@ -570,8 +570,10 @@ qtnf_event_handle_external_auth(struct qtnf_vif *vif, return 0; if (ev->ssid_len) { - memcpy(auth.ssid.ssid, ev->ssid, ev->ssid_len); - auth.ssid.ssid_len = ev->ssid_len; + int len = clamp_val(ev->ssid_len, 0, IEEE80211_MAX_SSID_LEN); + + memcpy(auth.ssid.ssid, ev->ssid, len); + auth.ssid.ssid_len = len; } auth.key_mgmt_suite = le32_to_cpu(ev->akm_suite); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index 61a4f1ad31e2..e95c101c2711 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c @@ -989,11 +989,7 @@ static void rt2x00lib_rate(struct ieee80211_rate *entry, void rt2x00lib_set_mac_address(struct rt2x00_dev *rt2x00dev, u8 *eeprom_mac_addr) { - const char *mac_addr; - - mac_addr = of_get_mac_address(rt2x00dev->dev->of_node); - if (!IS_ERR(mac_addr)) - ether_addr_copy(eeprom_mac_addr, mac_addr); + of_get_mac_address(rt2x00dev->dev->of_node, eeprom_mac_addr); if (!is_valid_ether_addr(eeprom_mac_addr)) { eth_random_addr(eeprom_mac_addr); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index d6d1be4169e5..d1a566cc0c9e 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1391,7 +1391,6 @@ struct rtl8xxxu_priv { struct delayed_work ra_watchdog; struct work_struct c2hcmd_work; struct sk_buff_head c2hcmd_queue; - spinlock_t c2hcmd_lock; struct rtl8xxxu_btcoex bt_coex; struct rtl8xxxu_ra_report ra_report; }; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 5cd7ef3625c5..9ff09cf7eb62 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -1145,7 +1145,7 @@ void rtl8xxxu_gen1_config_channel(struct ieee80211_hw *hw) switch (hw->conf.chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: ht = false; - /* fall through */ + fallthrough; case NL80211_CHAN_WIDTH_20: opmode |= BW_OPMODE_20MHZ; rtl8xxxu_write8(priv, REG_BW_OPMODE, opmode); @@ -1272,7 +1272,7 @@ void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw) switch (hw->conf.chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: ht = false; - /* fall through */ + fallthrough; case NL80211_CHAN_WIDTH_20: rf_mode_bw |= WMAC_TRXPTCL_CTL_BW_20; subchannel = 0; @@ -1741,11 +1741,11 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) case 3: priv->ep_tx_low_queue = 1; priv->ep_tx_count++; - /* fall through */ + fallthrough; case 2: priv->ep_tx_normal_queue = 1; priv->ep_tx_count++; - /* fall through */ + fallthrough; case 1: priv->ep_tx_high_queue = 1; priv->ep_tx_count++; @@ -5423,7 +5423,6 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work) struct rtl8xxxu_priv *priv; struct rtl8723bu_c2h *c2h; struct sk_buff *skb = NULL; - unsigned long flags; u8 bt_info = 0; struct rtl8xxxu_btcoex *btcoex; struct rtl8xxxu_ra_report *rarpt; @@ -5439,9 +5438,7 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work) goto out; while (!skb_queue_empty(&priv->c2hcmd_queue)) { - spin_lock_irqsave(&priv->c2hcmd_lock, flags); - skb = __skb_dequeue(&priv->c2hcmd_queue); - spin_unlock_irqrestore(&priv->c2hcmd_lock, flags); + skb = skb_dequeue(&priv->c2hcmd_queue); c2h = (struct rtl8723bu_c2h *)skb->data; @@ -5499,7 +5496,6 @@ static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv, struct rtl8723bu_c2h *c2h = (struct rtl8723bu_c2h *)skb->data; struct device *dev = &priv->udev->dev; int len; - unsigned long flags; len = skb->len - 2; @@ -5538,9 +5534,7 @@ static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv, break; } - spin_lock_irqsave(&priv->c2hcmd_lock, flags); - __skb_queue_tail(&priv->c2hcmd_queue, skb); - spin_unlock_irqrestore(&priv->c2hcmd_lock, flags); + skb_queue_tail(&priv->c2hcmd_queue, skb); schedule_work(&priv->c2hcmd_work); } @@ -6606,7 +6600,6 @@ static int rtl8xxxu_probe(struct usb_interface *interface, spin_lock_init(&priv->rx_urb_lock); INIT_WORK(&priv->rx_urb_wq, rtl8xxxu_rx_urb_work); INIT_DELAYED_WORK(&priv->ra_watchdog, rtl8xxxu_watchdog_callback); - spin_lock_init(&priv->c2hcmd_lock); INIT_WORK(&priv->c2hcmd_work, rtl8xxxu_c2hcmd_callback); skb_queue_head_init(&priv->c2hcmd_queue); diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 6e8bd99e8911..2a7ee90a3f54 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -551,7 +551,6 @@ int rtl_init_core(struct ieee80211_hw *hw) spin_lock_init(&rtlpriv->locks.rf_lock); spin_lock_init(&rtlpriv->locks.waitq_lock); spin_lock_init(&rtlpriv->locks.entry_list_lock); - spin_lock_init(&rtlpriv->locks.c2hcmd_lock); spin_lock_init(&rtlpriv->locks.scan_list_lock); spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock); spin_lock_init(&rtlpriv->locks.fw_ps_lock); @@ -2269,7 +2268,6 @@ static bool rtl_c2h_fast_cmd(struct ieee80211_hw *hw, struct sk_buff *skb) void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); - unsigned long flags; if (rtl_c2h_fast_cmd(hw, skb)) { rtl_c2h_content_parsing(hw, skb); @@ -2278,11 +2276,7 @@ void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb) } /* enqueue */ - spin_lock_irqsave(&rtlpriv->locks.c2hcmd_lock, flags); - - __skb_queue_tail(&rtlpriv->c2hcmd_queue, skb); - - spin_unlock_irqrestore(&rtlpriv->locks.c2hcmd_lock, flags); + skb_queue_tail(&rtlpriv->c2hcmd_queue, skb); /* wake up wq */ queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.c2hcmd_wq, 0); @@ -2340,16 +2334,11 @@ void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct sk_buff *skb; - unsigned long flags; int i; for (i = 0; i < 200; i++) { /* dequeue a task */ - spin_lock_irqsave(&rtlpriv->locks.c2hcmd_lock, flags); - - skb = __skb_dequeue(&rtlpriv->c2hcmd_queue); - - spin_unlock_irqrestore(&rtlpriv->locks.c2hcmd_lock, flags); + skb = skb_dequeue(&rtlpriv->c2hcmd_queue); /* do it */ if (!skb) diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 965bd9589045..8efe2f5e5b9f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -564,7 +564,7 @@ static int rtl_op_resume(struct ieee80211_hw *hw) rtlhal->enter_pnp_sleep = false; rtlhal->wake_from_pnp_sleep = true; - /* to resovle s4 can not wake up*/ + /* to resolve s4 can not wake up*/ now = ktime_get_real_seconds(); if (now - rtlhal->last_suspend_sec < 5) return -1; @@ -806,7 +806,7 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw, if (0 == changed_flags) return; - /*TODO: we disable broadcase now, so enable here */ + /*TODO: we disable broadcast now, so enable here */ if (changed_flags & FIF_ALLMULTI) { if (*new_flags & FIF_ALLMULTI) { mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] | @@ -1018,6 +1018,25 @@ static void send_beacon_frame(struct ieee80211_hw *hw, } } +void rtl_update_beacon_work_callback(struct work_struct *work) +{ + struct rtl_works *rtlworks = + container_of(work, struct rtl_works, update_beacon_work); + struct ieee80211_hw *hw = rtlworks->hw; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct ieee80211_vif *vif = rtlpriv->mac80211.vif; + + if (!vif) { + WARN_ONCE(true, "no vif to update beacon\n"); + return; + } + + mutex_lock(&rtlpriv->locks.conf_mutex); + send_beacon_frame(hw, vif); + mutex_unlock(&rtlpriv->locks.conf_mutex); +} +EXPORT_SYMBOL_GPL(rtl_update_beacon_work_callback); + static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, @@ -1747,6 +1766,18 @@ static void rtl_op_flush(struct ieee80211_hw *hw, rtlpriv->intf_ops->flush(hw, queues, drop); } +static int rtl_op_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, + bool set) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CU) + schedule_work(&rtlpriv->works.update_beacon_work); + + return 0; +} + /* Description: * This routine deals with the Power Configuration CMD * parsing for RTL8723/RTL8188E Series IC. @@ -1796,7 +1827,7 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version, value |= (GET_PWR_CFG_VALUE(cfg_cmd) & GET_PWR_CFG_MASK(cfg_cmd)); - /*Write the value back to sytem register*/ + /*Write the value back to system register*/ rtl_write_byte(rtlpriv, offset, value); break; case PWR_CMD_POLLING: @@ -1903,6 +1934,7 @@ const struct ieee80211_ops rtl_ops = { .sta_add = rtl_op_sta_add, .sta_remove = rtl_op_sta_remove, .flush = rtl_op_flush, + .set_tim = rtl_op_set_tim, }; EXPORT_SYMBOL_GPL(rtl_ops); diff --git a/drivers/net/wireless/realtek/rtlwifi/core.h b/drivers/net/wireless/realtek/rtlwifi/core.h index 7447ff456710..345161b47442 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.h +++ b/drivers/net/wireless/realtek/rtlwifi/core.h @@ -60,5 +60,6 @@ void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data); bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb); bool rtl_btc_status_false(void); void rtl_dm_diginit(struct ieee80211_hw *hw, u32 cur_igval); +void rtl_update_beacon_work_callback(struct work_struct *work); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c index 861cc663ca93..bf686a916acb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c @@ -2466,8 +2466,6 @@ void rtl8188ee_bt_reg_init(struct ieee80211_hw *hw) /* 0:Low, 1:High, 2:From Efuse. */ rtlpriv->btcoexist.reg_bt_iso = 2; - /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */ - rtlpriv->btcoexist.reg_bt_sco = 3; /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */ rtlpriv->btcoexist.reg_bt_sco = 0; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c index 1dbdddce0823..a74724c971b9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c @@ -372,18 +372,14 @@ static struct pci_driver rtl92de_driver = { /* add global spin lock to solve the problem that * Dul mac register operation on the same time */ -spinlock_t globalmutex_power; -spinlock_t globalmutex_for_fwdownload; -spinlock_t globalmutex_for_power_and_efuse; +DEFINE_SPINLOCK(globalmutex_power); +DEFINE_SPINLOCK(globalmutex_for_fwdownload); +DEFINE_SPINLOCK(globalmutex_for_power_and_efuse); static int __init rtl92de_module_init(void) { int ret = 0; - spin_lock_init(&globalmutex_power); - spin_lock_init(&globalmutex_for_fwdownload); - spin_lock_init(&globalmutex_for_power_and_efuse); - ret = pci_register_driver(&rtl92de_driver); if (ret) WARN_ONCE(true, "rtl8192de: No device found\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c index 27c8a5d96520..fcaaf664cbec 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c @@ -249,7 +249,7 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = { 0x824, 0x00030FE0, 0x828, 0x00000000, 0x82C, 0x002081DD, - 0x830, 0x2AAA8E24, + 0x830, 0x2AAAEEC8, 0x834, 0x0037A706, 0x838, 0x06489B44, 0x83C, 0x0000095B, @@ -324,10 +324,10 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = { 0x9D8, 0x00000000, 0x9DC, 0x00000000, 0x9E0, 0x00005D00, - 0x9E4, 0x00000002, + 0x9E4, 0x00000003, 0x9E8, 0x00000001, 0xA00, 0x00D047C8, - 0xA04, 0x01FF000C, + 0xA04, 0x01FF800C, 0xA08, 0x8C8A8300, 0xA0C, 0x2E68000F, 0xA10, 0x9500BB78, @@ -1320,7 +1320,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x083, 0x00021800, 0x084, 0x00028000, 0x085, 0x00048000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x0009483A, + 0xA0000000, 0x00000000, 0x086, 0x00094838, + 0xB0000000, 0x00000000, 0x087, 0x00044980, 0x088, 0x00048000, 0x089, 0x0000D480, @@ -1409,36 +1413,32 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x03C, 0x000CA000, 0x0EF, 0x00000000, 0x0EF, 0x00001100, - 0xFF0F0104, 0xABCD, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0004ADF3, 0x034, 0x00049DF0, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0004ADF3, 0x034, 0x00049DF0, - 0xFF0F0404, 0xCDEF, - 0x034, 0x0004ADF3, - 0x034, 0x00049DF0, - 0xFF0F0200, 0xCDEF, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0004ADF5, 0x034, 0x00049DF2, - 0xFF0F02C0, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A0F3, + 0x034, 0x000490B1, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0004A0F3, 0x034, 0x000490B1, - 0xCDCDCDCD, 0xCDCD, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004ADF5, + 0x034, 0x00049DF2, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004ADF3, + 0x034, 0x00049DF0, + 0xA0000000, 0x00000000, 0x034, 0x0004ADF7, 0x034, 0x00049DF3, - 0xFF0F0104, 0xDEAD, - 0xFF0F0104, 0xABCD, - 0x034, 0x00048DED, - 0x034, 0x00047DEA, - 0x034, 0x00046DE7, - 0x034, 0x00045CE9, - 0x034, 0x00044CE6, - 0x034, 0x000438C6, - 0x034, 0x00042886, - 0x034, 0x00041486, - 0x034, 0x00040447, - 0xFF0F0204, 0xCDEF, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00048DED, 0x034, 0x00047DEA, 0x034, 0x00046DE7, @@ -1448,7 +1448,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00042886, 0x034, 0x00041486, 0x034, 0x00040447, - 0xFF0F0404, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00048DED, 0x034, 0x00047DEA, 0x034, 0x00046DE7, @@ -1458,7 +1458,17 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00042886, 0x034, 0x00041486, 0x034, 0x00040447, - 0xFF0F02C0, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000480AE, + 0x034, 0x000470AB, + 0x034, 0x0004608B, + 0x034, 0x00045069, + 0x034, 0x00044048, + 0x034, 0x00043045, + 0x034, 0x00042026, + 0x034, 0x00041023, + 0x034, 0x00040002, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x000480AE, 0x034, 0x000470AB, 0x034, 0x0004608B, @@ -1468,7 +1478,17 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00042026, 0x034, 0x00041023, 0x034, 0x00040002, - 0xCDCDCDCD, 0xCDCD, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00048DED, + 0x034, 0x00047DEA, + 0x034, 0x00046DE7, + 0x034, 0x00045CE9, + 0x034, 0x00044CE6, + 0x034, 0x000438C6, + 0x034, 0x00042886, + 0x034, 0x00041486, + 0x034, 0x00040447, + 0xA0000000, 0x00000000, 0x034, 0x00048DEF, 0x034, 0x00047DEC, 0x034, 0x00046DE9, @@ -1478,38 +1498,36 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x0004248A, 0x034, 0x0004108D, 0x034, 0x0004008A, - 0xFF0F0104, 0xDEAD, - 0xFF0F0200, 0xABCD, + 0xB0000000, 0x00000000, + 0x80000210, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0002ADF4, - 0xFF0F02C0, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A0F3, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0002A0F3, - 0xCDCDCDCD, 0xCDCD, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002ADF4, + 0xA0000000, 0x00000000, 0x034, 0x0002ADF7, - 0xFF0F0200, 0xDEAD, - 0xFF0F0104, 0xABCD, - 0x034, 0x00029DF4, - 0xFF0F0204, 0xCDEF, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00029DF4, - 0xFF0F0404, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00029DF4, - 0xFF0F0200, 0xCDEF, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00029DF1, - 0xFF0F02C0, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000290F0, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x000290F0, - 0xCDCDCDCD, 0xCDCD, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00029DF1, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00029DF4, + 0xA0000000, 0x00000000, 0x034, 0x00029DF2, - 0xFF0F0104, 0xDEAD, - 0xFF0F0104, 0xABCD, - 0x034, 0x00028DF1, - 0x034, 0x00027DEE, - 0x034, 0x00026DEB, - 0x034, 0x00025CEC, - 0x034, 0x00024CE9, - 0x034, 0x000238CA, - 0x034, 0x00022889, - 0x034, 0x00021489, - 0x034, 0x0002044A, - 0xFF0F0204, 0xCDEF, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00028DF1, 0x034, 0x00027DEE, 0x034, 0x00026DEB, @@ -1519,7 +1537,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00022889, 0x034, 0x00021489, 0x034, 0x0002044A, - 0xFF0F0404, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00028DF1, 0x034, 0x00027DEE, 0x034, 0x00026DEB, @@ -1529,7 +1547,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00022889, 0x034, 0x00021489, 0x034, 0x0002044A, - 0xFF0F02C0, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x000280AF, 0x034, 0x000270AC, 0x034, 0x0002608B, @@ -1539,7 +1557,27 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00022026, 0x034, 0x00021023, 0x034, 0x00020002, - 0xCDCDCDCD, 0xCDCD, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000280AF, + 0x034, 0x000270AC, + 0x034, 0x0002608B, + 0x034, 0x00025069, + 0x034, 0x00024048, + 0x034, 0x00023045, + 0x034, 0x00022026, + 0x034, 0x00021023, + 0x034, 0x00020002, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00028DF1, + 0x034, 0x00027DEE, + 0x034, 0x00026DEB, + 0x034, 0x00025CEC, + 0x034, 0x00024CE9, + 0x034, 0x000238CA, + 0x034, 0x00022889, + 0x034, 0x00021489, + 0x034, 0x0002044A, + 0xA0000000, 0x00000000, 0x034, 0x00028DEE, 0x034, 0x00027DEB, 0x034, 0x00026CCD, @@ -1549,27 +1587,24 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00022849, 0x034, 0x00021449, 0x034, 0x0002004D, - 0xFF0F0104, 0xDEAD, - 0xFF0F02C0, 0xABCD, + 0xB0000000, 0x00000000, + 0x8000020c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D7, + 0x034, 0x000090D3, + 0x034, 0x000080B1, + 0x034, 0x000070AE, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0000A0D7, 0x034, 0x000090D3, 0x034, 0x000080B1, 0x034, 0x000070AE, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x034, 0x0000ADF7, 0x034, 0x00009DF4, 0x034, 0x00008DF1, 0x034, 0x00007DEE, - 0xFF0F02C0, 0xDEAD, - 0xFF0F0104, 0xABCD, - 0x034, 0x00006DEB, - 0x034, 0x00005CEC, - 0x034, 0x00004CE9, - 0x034, 0x000038CA, - 0x034, 0x00002889, - 0x034, 0x00001489, - 0x034, 0x0000044A, - 0xFF0F0204, 0xCDEF, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00006DEB, 0x034, 0x00005CEC, 0x034, 0x00004CE9, @@ -1577,7 +1612,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00002889, 0x034, 0x00001489, 0x034, 0x0000044A, - 0xFF0F0404, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00006DEB, 0x034, 0x00005CEC, 0x034, 0x00004CE9, @@ -1585,7 +1620,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00002889, 0x034, 0x00001489, 0x034, 0x0000044A, - 0xFF0F02C0, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0000608D, 0x034, 0x0000506B, 0x034, 0x0000404A, @@ -1593,7 +1628,23 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00002044, 0x034, 0x00001025, 0x034, 0x00000004, - 0xCDCDCDCD, 0xCDCD, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000608D, + 0x034, 0x0000506B, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x00002044, + 0x034, 0x00001025, + 0x034, 0x00000004, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00006DEB, + 0x034, 0x00005CEC, + 0x034, 0x00004CE9, + 0x034, 0x000038CA, + 0x034, 0x00002889, + 0x034, 0x00001489, + 0x034, 0x0000044A, + 0xA0000000, 0x00000000, 0x034, 0x00006DCD, 0x034, 0x00005CCD, 0x034, 0x00004CCA, @@ -1601,11 +1652,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00002888, 0x034, 0x00001488, 0x034, 0x00000486, - 0xFF0F0104, 0xDEAD, + 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000040, - 0xFF0F0104, 0xABCD, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x035, 0x00000187, 0x035, 0x00008187, 0x035, 0x00010187, @@ -1615,7 +1666,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x035, 0x00040188, 0x035, 0x00048188, 0x035, 0x00050188, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x035, 0x00000187, 0x035, 0x00008187, 0x035, 0x00010187, @@ -1625,7 +1676,37 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x035, 0x00040188, 0x035, 0x00048188, 0x035, 0x00050188, - 0xFF0F0404, 0xCDEF, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x00000128, + 0x035, 0x00008128, + 0x035, 0x00010128, + 0x035, 0x000201C8, + 0x035, 0x000281C8, + 0x035, 0x000301C8, + 0x035, 0x000401C8, + 0x035, 0x000481C8, + 0x035, 0x000501C8, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x00000145, + 0x035, 0x00008145, + 0x035, 0x00010145, + 0x035, 0x00020196, + 0x035, 0x00028196, + 0x035, 0x00030196, + 0x035, 0x000401C7, + 0x035, 0x000481C7, + 0x035, 0x000501C7, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x00000128, + 0x035, 0x00008128, + 0x035, 0x00010128, + 0x035, 0x000201C8, + 0x035, 0x000281C8, + 0x035, 0x000301C8, + 0x035, 0x000401C8, + 0x035, 0x000481C8, + 0x035, 0x000501C8, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, 0x035, 0x00000187, 0x035, 0x00008187, 0x035, 0x00010187, @@ -1635,7 +1716,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x035, 0x00040188, 0x035, 0x00048188, 0x035, 0x00050188, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x035, 0x00000145, 0x035, 0x00008145, 0x035, 0x00010145, @@ -1645,11 +1726,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x035, 0x000401C7, 0x035, 0x000481C7, 0x035, 0x000501C7, - 0xFF0F0104, 0xDEAD, + 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000010, - 0xFF0F0104, 0xABCD, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x036, 0x00085733, 0x036, 0x0008D733, 0x036, 0x00095733, @@ -1662,7 +1743,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x036, 0x000CE4B4, 0x036, 0x000D64B4, 0x036, 0x000DE4B4, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x036, 0x00085733, 0x036, 0x0008D733, 0x036, 0x00095733, @@ -1675,7 +1756,46 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x036, 0x000CE4B4, 0x036, 0x000D64B4, 0x036, 0x000DE4B4, - 0xFF0F0404, 0xCDEF, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x000063B5, + 0x036, 0x0000E3B5, + 0x036, 0x000163B5, + 0x036, 0x0001E3B5, + 0x036, 0x000263B5, + 0x036, 0x0002E3B5, + 0x036, 0x000363B5, + 0x036, 0x0003E3B5, + 0x036, 0x000463B5, + 0x036, 0x0004E3B5, + 0x036, 0x000563B5, + 0x036, 0x0005E3B5, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x000056B3, + 0x036, 0x0000D6B3, + 0x036, 0x000156B3, + 0x036, 0x0001D6B3, + 0x036, 0x00026634, + 0x036, 0x0002E634, + 0x036, 0x00036634, + 0x036, 0x0003E634, + 0x036, 0x000467B4, + 0x036, 0x0004E7B4, + 0x036, 0x000567B4, + 0x036, 0x0005E7B4, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x000063B5, + 0x036, 0x0000E3B5, + 0x036, 0x000163B5, + 0x036, 0x0001E3B5, + 0x036, 0x000263B5, + 0x036, 0x0002E3B5, + 0x036, 0x000363B5, + 0x036, 0x0003E3B5, + 0x036, 0x000463B5, + 0x036, 0x0004E3B5, + 0x036, 0x000563B5, + 0x036, 0x0005E3B5, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, 0x036, 0x00085733, 0x036, 0x0008D733, 0x036, 0x00095733, @@ -1688,7 +1808,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x036, 0x000CE4B4, 0x036, 0x000D64B4, 0x036, 0x000DE4B4, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x036, 0x000056B3, 0x036, 0x0000D6B3, 0x036, 0x000156B3, @@ -1701,103 +1821,162 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x036, 0x0004E7B4, 0x036, 0x000567B4, 0x036, 0x0005E7B4, - 0xFF0F0104, 0xDEAD, + 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x0EF, 0x00000008, - 0xFF0F0104, 0xABCD, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x03C, 0x000001C8, 0x03C, 0x00000492, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x03C, 0x000001C8, 0x03C, 0x00000492, - 0xFF0F0404, 0xCDEF, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x000001B6, + 0x03C, 0x00000492, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000022A, + 0x03C, 0x00000594, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x000001B6, + 0x03C, 0x00000492, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, 0x03C, 0x000001C8, 0x03C, 0x00000492, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x03C, 0x0000022A, 0x03C, 0x00000594, - 0xFF0F0104, 0xDEAD, - 0xFF0F0104, 0xABCD, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x03C, 0x00000800, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x03C, 0x00000800, - 0xFF0F0404, 0xCDEF, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, 0x03C, 0x00000800, - 0xFF0F02C0, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, 0x03C, 0x00000820, - 0xCDCDCDCD, 0xCDCD, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000820, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000800, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000800, + 0xA0000000, 0x00000000, 0x03C, 0x00000900, - 0xFF0F0104, 0xDEAD, + 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000002, - 0xFF0F0104, 0xABCD, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x008, 0x0004E400, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x008, 0x0004E400, - 0xFF0F0404, 0xCDEF, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x008, 0x00002000, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x008, 0x00002000, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x008, 0x00002000, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x008, 0x00002000, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, 0x008, 0x0004E400, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x008, 0x00002000, - 0xFF0F0104, 0xDEAD, + 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x0DF, 0x000000C0, - 0x01F, 0x00040064, - 0xFF0F0104, 0xABCD, + 0x01F, 0x00000064, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x058, 0x000A7284, 0x059, 0x000600EC, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x058, 0x000A7284, 0x059, 0x000600EC, - 0xFF0F0404, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x058, 0x00081184, + 0x059, 0x0006016C, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x058, 0x00081184, + 0x059, 0x0006016C, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x058, 0x00081184, + 0x059, 0x0006016C, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, 0x058, 0x000A7284, 0x059, 0x000600EC, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x058, 0x00081184, 0x059, 0x0006016C, - 0xFF0F0104, 0xDEAD, - 0xFF0F0104, 0xABCD, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x061, 0x000E8D73, 0x062, 0x00093FC5, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x061, 0x000E8D73, 0x062, 0x00093FC5, - 0xFF0F0404, 0xCDEF, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x061, 0x000EFD83, + 0x062, 0x00093FCC, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x061, 0x000EAD53, + 0x062, 0x00093BC4, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x061, 0x000EFD83, + 0x062, 0x00093FCC, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, 0x061, 0x000E8D73, 0x062, 0x00093FC5, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x061, 0x000EAD53, 0x062, 0x00093BC4, - 0xFF0F0104, 0xDEAD, - 0xFF0F0104, 0xABCD, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x063, 0x000110E9, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x063, 0x000110E9, - 0xFF0F0404, 0xCDEF, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000110EB, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, 0x063, 0x000110E9, - 0xFF0F0200, 0xCDEF, - 0x063, 0x000710E9, - 0xFF0F02C0, 0xCDEF, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, 0x063, 0x000110E9, - 0xCDCDCDCD, 0xCDCD, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000110EB, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000110E9, + 0xA0000000, 0x00000000, 0x063, 0x000714E9, - 0xFF0F0104, 0xDEAD, - 0xFF0F0104, 0xABCD, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, + 0x064, 0x0001C27C, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, + 0x064, 0x0001C27C, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, 0x064, 0x0001C27C, - 0xFF0F0204, 0xCDEF, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x064, 0x0001C67C, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, 0x064, 0x0001C27C, - 0xFF0F0404, 0xCDEF, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, 0x064, 0x0001C27C, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x064, 0x0001C67C, - 0xFF0F0104, 0xDEAD, - 0xFF0F0200, 0xABCD, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x00091016, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x00091016, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, 0x065, 0x00093016, - 0xFF0F02C0, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, 0x065, 0x00093015, - 0xCDCDCDCD, 0xCDCD, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x00093015, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x00093016, + 0xA0000000, 0x00000000, 0x065, 0x00091016, - 0xFF0F0200, 0xDEAD, + 0xB0000000, 0x00000000, 0x018, 0x00000006, 0x0EF, 0x00002000, 0x03B, 0x0003824B, @@ -1895,9 +2074,10 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x0B4, 0x0001214C, 0x0B7, 0x0003000C, 0x01C, 0x000539D2, + 0x0C4, 0x000AFE00, 0x018, 0x0001F12A, - 0x0FE, 0x00000000, - 0x0FE, 0x00000000, + 0xFFE, 0x00000000, + 0xFFE, 0x00000000, 0x018, 0x0001712A, }; @@ -2017,6 +2197,7 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = { u32 RTL8812AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8812AE_MAC_REG_ARRAY); u32 RTL8821AE_MAC_REG_ARRAY[] = { + 0x421, 0x0000000F, 0x428, 0x0000000A, 0x429, 0x00000010, 0x430, 0x00000000, @@ -2485,7 +2666,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = { 0x81C, 0xA6360001, 0x81C, 0xA5380001, 0x81C, 0xA43A0001, - 0x81C, 0xA33C0001, + 0x81C, 0x683C0001, 0x81C, 0x673E0001, 0x81C, 0x66400001, 0x81C, 0x65420001, @@ -2519,7 +2700,66 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = { 0x81C, 0x017A0001, 0x81C, 0x017C0001, 0x81C, 0x017E0001, - 0xFF0F02C0, 0xABCD, + 0x8000020c, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFB000101, + 0x81C, 0xFA020101, + 0x81C, 0xF9040101, + 0x81C, 0xF8060101, + 0x81C, 0xF7080101, + 0x81C, 0xF60A0101, + 0x81C, 0xF50C0101, + 0x81C, 0xF40E0101, + 0x81C, 0xF3100101, + 0x81C, 0xF2120101, + 0x81C, 0xF1140101, + 0x81C, 0xF0160101, + 0x81C, 0xEF180101, + 0x81C, 0xEE1A0101, + 0x81C, 0xED1C0101, + 0x81C, 0xEC1E0101, + 0x81C, 0xEB200101, + 0x81C, 0xEA220101, + 0x81C, 0xE9240101, + 0x81C, 0xE8260101, + 0x81C, 0xE7280101, + 0x81C, 0xE62A0101, + 0x81C, 0xE52C0101, + 0x81C, 0xE42E0101, + 0x81C, 0xE3300101, + 0x81C, 0xA5320101, + 0x81C, 0xA4340101, + 0x81C, 0xA3360101, + 0x81C, 0x87380101, + 0x81C, 0x863A0101, + 0x81C, 0x853C0101, + 0x81C, 0x843E0101, + 0x81C, 0x69400101, + 0x81C, 0x68420101, + 0x81C, 0x67440101, + 0x81C, 0x66460101, + 0x81C, 0x49480101, + 0x81C, 0x484A0101, + 0x81C, 0x474C0101, + 0x81C, 0x2A4E0101, + 0x81C, 0x29500101, + 0x81C, 0x28520101, + 0x81C, 0x27540101, + 0x81C, 0x26560101, + 0x81C, 0x25580101, + 0x81C, 0x245A0101, + 0x81C, 0x235C0101, + 0x81C, 0x055E0101, + 0x81C, 0x04600101, + 0x81C, 0x03620101, + 0x81C, 0x02640101, + 0x81C, 0x01660101, + 0x81C, 0x01680101, + 0x81C, 0x016A0101, + 0x81C, 0x016C0101, + 0x81C, 0x016E0101, + 0x81C, 0x01700101, + 0x81C, 0x01720101, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, 0x81C, 0xFB000101, 0x81C, 0xFA020101, 0x81C, 0xF9040101, @@ -2578,7 +2818,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = { 0x81C, 0x016E0101, 0x81C, 0x01700101, 0x81C, 0x01720101, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x81C, 0xFF000101, 0x81C, 0xFF020101, 0x81C, 0xFE040101, @@ -2637,7 +2877,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = { 0x81C, 0x046E0101, 0x81C, 0x03700101, 0x81C, 0x02720101, - 0xFF0F02C0, 0xDEAD, + 0xB0000000, 0x00000000, 0x81C, 0x01740101, 0x81C, 0x01760101, 0x81C, 0x01780101, diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 6c5e242b1bc5..86a236873254 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -805,6 +805,7 @@ static void rtl_usb_stop(struct ieee80211_hw *hw) tasklet_kill(&rtlusb->rx_work_tasklet); cancel_work_sync(&rtlpriv->works.lps_change_work); + cancel_work_sync(&rtlpriv->works.update_beacon_work); flush_workqueue(rtlpriv->works.rtl_wq); @@ -1031,6 +1032,8 @@ int rtl_usb_probe(struct usb_interface *intf, rtl_fill_h2c_cmd_work_callback); INIT_WORK(&rtlpriv->works.lps_change_work, rtl_lps_change_work_callback); + INIT_WORK(&rtlpriv->works.update_beacon_work, + rtl_update_beacon_work_callback); rtlpriv->usb_data_index = 0; init_completion(&rtlpriv->firmware_loading_complete); @@ -1070,7 +1073,6 @@ int rtl_usb_probe(struct usb_interface *intf, err = ieee80211_register_hw(hw); if (err) { pr_err("Can't register mac80211 hw.\n"); - err = -ENODEV; goto error_out; } rtlpriv->mac80211.mac80211_registered = 1; diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index fdccfd29fd61..aa07856411b1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2450,7 +2450,6 @@ struct rtl_locks { spinlock_t waitq_lock; spinlock_t entry_list_lock; spinlock_t usb_lock; - spinlock_t c2hcmd_lock; spinlock_t scan_list_lock; /* lock for the scan list */ /*FW clock change */ @@ -2487,6 +2486,7 @@ struct rtl_works { struct work_struct lps_change_work; struct work_struct fill_h2c_cmd; + struct work_struct update_beacon_work; }; struct rtl_debug { @@ -3086,14 +3086,9 @@ static inline __le16 rtl_get_fc(struct sk_buff *skb) return rtl_get_hdr(skb)->frame_control; } -static inline u16 rtl_get_tid_h(struct ieee80211_hdr *hdr) -{ - return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK; -} - static inline u16 rtl_get_tid(struct sk_buff *skb) { - return rtl_get_tid_h(rtl_get_hdr(skb)); + return ieee80211_get_tid(rtl_get_hdr(skb)); } static inline struct ieee80211_sta *get_sta(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c index ea2be1e25065..cedbf3825848 100644 --- a/drivers/net/wireless/realtek/rtw88/coex.c +++ b/drivers/net/wireless/realtek/rtw88/coex.c @@ -787,7 +787,6 @@ static void rtw_coex_update_wl_ch_info(struct rtw_dev *rtwdev, u8 type) { struct rtw_chip_info *chip = rtwdev->chip; struct rtw_coex_dm *coex_dm = &rtwdev->coex.dm; - struct rtw_efuse *efuse = &rtwdev->efuse; u8 link = 0; u8 center_chan = 0; u8 bw; @@ -798,7 +797,7 @@ static void rtw_coex_update_wl_ch_info(struct rtw_dev *rtwdev, u8 type) if (type != COEX_MEDIA_DISCONNECT) center_chan = rtwdev->hal.current_channel; - if (center_chan == 0 || (efuse->share_ant && center_chan <= 14)) { + if (center_chan == 0) { link = 0; center_chan = 0; bw = 0; @@ -2325,8 +2324,11 @@ static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev) if (efuse->share_ant) { /* Shared-Ant */ if (coex_stat->bt_a2dp_exist) { slot_type = TDMA_4SLOT; - table_case = 9; tdma_case = 11; + if (coex_stat->wl_gl_busy) + table_case = 26; + else + table_case = 9; } else { table_case = 9; tdma_case = 7; @@ -2646,6 +2648,11 @@ void rtw_coex_power_on_setting(struct rtw_dev *rtwdev) rtw_coex_set_gnt_debug(rtwdev); } +void rtw_coex_power_off_setting(struct rtw_dev *rtwdev) +{ + rtw_write16(rtwdev, REG_WIFI_BT_INFO, BIT_BT_INT_EN); +} + void rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only) { __rtw_coex_init_hw_config(rtwdev, wifi_only); diff --git a/drivers/net/wireless/realtek/rtw88/coex.h b/drivers/net/wireless/realtek/rtw88/coex.h index 8ab9852ec9ed..fc61a0cab3e4 100644 --- a/drivers/net/wireless/realtek/rtw88/coex.h +++ b/drivers/net/wireless/realtek/rtw88/coex.h @@ -393,6 +393,7 @@ void rtw_coex_bt_multi_link_remain_work(struct work_struct *work); void rtw_coex_wl_ccklock_work(struct work_struct *work); void rtw_coex_power_on_setting(struct rtw_dev *rtwdev); +void rtw_coex_power_off_setting(struct rtw_dev *rtwdev); void rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only); void rtw_coex_ips_notify(struct rtw_dev *rtwdev, u8 type); void rtw_coex_lps_notify(struct rtw_dev *rtwdev, u8 type); @@ -405,4 +406,12 @@ void rtw_coex_switchband_notify(struct rtw_dev *rtwdev, u8 type); void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev, u32 type); void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m); +static inline bool rtw_coex_disabled(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + + return coex_stat->bt_disabled; +} + #endif diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c index 948cb79050ea..18ab472ea46c 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.c +++ b/drivers/net/wireless/realtek/rtw88/debug.c @@ -10,6 +10,7 @@ #include "fw.h" #include "debug.h" #include "phy.h" +#include "reg.h" #ifdef CONFIG_RTW88_DEBUGFS @@ -34,9 +35,17 @@ struct rtw_debugfs_priv { u32 addr; u32 len; } read_reg; + struct { + u8 bit; + } dm_cap; }; }; +static const char * const rtw_dm_cap_strs[] = { + [RTW_DM_CAP_NA] = "NA", + [RTW_DM_CAP_TXGAPK] = "TXGAPK", +}; + static int rtw_debugfs_single_show(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; @@ -270,7 +279,7 @@ static ssize_t rtw_debugfs_set_rsvd_page(struct file *filp, if (num != 2) { rtw_warn(rtwdev, "invalid arguments\n"); - return num; + return -EINVAL; } debugfs_priv->rsvd_page.page_offset = offset; @@ -818,6 +827,117 @@ static int rtw_debugfs_get_coex_enable(struct seq_file *m, void *v) return 0; } +static ssize_t rtw_debugfs_set_fw_crash(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct seq_file *seqpriv = (struct seq_file *)filp->private_data; + struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + char tmp[32 + 1]; + bool input; + int ret; + + rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1); + + ret = kstrtobool(tmp, &input); + if (ret) + return -EINVAL; + + if (!input) + return -EINVAL; + + rtw_write8(rtwdev, REG_HRCV_MSG, 1); + + return count; +} + +static int rtw_debugfs_get_fw_crash(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + + seq_printf(m, "%d\n", test_bit(RTW_FLAG_RESTARTING, rtwdev->flags)); + return 0; +} + +static ssize_t rtw_debugfs_set_dm_cap(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct seq_file *seqpriv = (struct seq_file *)filp->private_data; + struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + int bit; + bool en; + + if (kstrtoint_from_user(buffer, count, 10, &bit)) + return -EINVAL; + + en = bit > 0; + bit = abs(bit); + + if (bit >= RTW_DM_CAP_NUM) { + rtw_warn(rtwdev, "unknown DM CAP %d\n", bit); + return -EINVAL; + } + + if (en) + dm_info->dm_flags &= ~BIT(bit); + else + dm_info->dm_flags |= BIT(bit); + + debugfs_priv->dm_cap.bit = bit; + + return count; +} + +static void dump_gapk_status(struct rtw_dev *rtwdev, struct seq_file *m) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_gapk_info *txgapk = &rtwdev->dm_info.gapk; + int i, path; + u32 val; + + seq_printf(m, "\n(%2d) %c%s\n\n", RTW_DM_CAP_TXGAPK, + dm_info->dm_flags & BIT(RTW_DM_CAP_TXGAPK) ? '-' : '+', + rtw_dm_cap_strs[RTW_DM_CAP_TXGAPK]); + + for (path = 0; path < rtwdev->hal.rf_path_num; path++) { + val = rtw_read_rf(rtwdev, path, RF_GAINTX, RFREG_MASK); + seq_printf(m, "path %d:\n0x%x = 0x%x\n", path, RF_GAINTX, val); + + for (i = 0; i < RF_HW_OFFSET_NUM; i++) + seq_printf(m, "[TXGAPK] offset %d %d\n", + txgapk->rf3f_fs[path][i], i); + seq_puts(m, "\n"); + } +} + +static int rtw_debugfs_get_dm_cap(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + int i; + + switch (debugfs_priv->dm_cap.bit) { + case RTW_DM_CAP_TXGAPK: + dump_gapk_status(rtwdev, m); + break; + default: + for (i = 1; i < RTW_DM_CAP_NUM; i++) { + seq_printf(m, "(%2d) %c%s\n", i, + dm_info->dm_flags & BIT(i) ? '-' : '+', + rtw_dm_cap_strs[i]); + } + break; + } + debugfs_priv->dm_cap.bit = RTW_DM_CAP_NA; + return 0; +} + #define rtw_debug_impl_mac(page, addr) \ static struct rtw_debugfs_priv rtw_debug_priv_mac_ ##page = { \ .cb_read = rtw_debug_get_mac_page, \ @@ -921,6 +1041,16 @@ static struct rtw_debugfs_priv rtw_debug_priv_coex_info = { .cb_read = rtw_debugfs_get_coex_info, }; +static struct rtw_debugfs_priv rtw_debug_priv_fw_crash = { + .cb_write = rtw_debugfs_set_fw_crash, + .cb_read = rtw_debugfs_get_fw_crash, +}; + +static struct rtw_debugfs_priv rtw_debug_priv_dm_cap = { + .cb_write = rtw_debugfs_set_dm_cap, + .cb_read = rtw_debugfs_get_dm_cap, +}; + #define rtw_debugfs_add_core(name, mode, fopname, parent) \ do { \ rtw_debug_priv_ ##name.rtwdev = rtwdev; \ @@ -994,6 +1124,8 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev) } rtw_debugfs_add_r(rf_dump); rtw_debugfs_add_r(tx_pwr_tbl); + rtw_debugfs_add_rw(fw_crash); + rtw_debugfs_add_rw(dm_cap); } #endif /* CONFIG_RTW88_DEBUGFS */ diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h index e16e0da26e77..c8efd1900a34 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.h +++ b/drivers/net/wireless/realtek/rtw88/debug.h @@ -19,6 +19,7 @@ enum rtw_debug_mask { RTW_DBG_PS = 0x00000400, RTW_DBG_BF = 0x00000800, RTW_DBG_WOW = 0x00001000, + RTW_DBG_CFO = 0x00002000, RTW_DBG_ALL = 0xffffffff }; diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index 6649b84f6b1e..ea2cd4db1d3c 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -350,6 +350,18 @@ void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para) } EXPORT_SYMBOL(rtw_fw_do_iqk); +void rtw_fw_inform_rfk_status(struct rtw_dev *rtwdev, bool start) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WIFI_CALIBRATION); + + RFK_SET_INFORM_START(h2c_pkt, start); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} +EXPORT_SYMBOL(rtw_fw_inform_rfk_status); + void rtw_fw_query_bt_info(struct rtw_dev *rtwdev) { u8 h2c_pkt[H2C_PKT_SIZE] = {0}; @@ -500,6 +512,21 @@ void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool connect) rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } +void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev) +{ + struct rtw_traffic_stats *stats = &rtwdev->stats; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_PHY_INFO); + SET_WL_PHY_INFO_TX_TP(h2c_pkt, stats->tx_throughput); + SET_WL_PHY_INFO_RX_TP(h2c_pkt, stats->rx_throughput); + SET_WL_PHY_INFO_TX_RATE_DESC(h2c_pkt, dm_info->tx_rate); + SET_WL_PHY_INFO_RX_RATE_DESC(h2c_pkt, dm_info->curr_rx_rate); + SET_WL_PHY_INFO_RX_EVM(h2c_pkt, dm_info->rx_evm_dbm[RF_PATH_A]); + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev) { struct rtw_lps_conf *conf = &rtwdev->lps_conf; diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h index 39c905c1b1d8..7c5b1d75e26f 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.h +++ b/drivers/net/wireless/realtek/rtw88/fw.h @@ -345,6 +345,7 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id) #define H2C_CMD_LPS_PG_INFO 0x2b #define H2C_CMD_RA_INFO 0x40 #define H2C_CMD_RSSI_MONITOR 0x42 +#define H2C_CMD_WL_PHY_INFO 0x58 #define H2C_CMD_COEX_TDMA_TYPE 0x60 #define H2C_CMD_QUERY_BT_INFO 0x61 @@ -353,6 +354,7 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id) #define H2C_CMD_WL_CH_INFO 0x66 #define H2C_CMD_QUERY_BT_MP_INFO 0x67 #define H2C_CMD_BT_WIFI_CONTROL 0x69 +#define H2C_CMD_WIFI_CALIBRATION 0x6d #define H2C_CMD_KEEP_ALIVE 0x03 #define H2C_CMD_DISCONNECT_DECISION 0x04 @@ -369,6 +371,17 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id) #define MEDIA_STATUS_RPT_SET_MACID(h2c_pkt, value) \ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16)) +#define SET_WL_PHY_INFO_TX_TP(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(17, 8)) +#define SET_WL_PHY_INFO_RX_TP(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(27, 18)) +#define SET_WL_PHY_INFO_TX_RATE_DESC(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0)) +#define SET_WL_PHY_INFO_RX_RATE_DESC(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8)) +#define SET_WL_PHY_INFO_RX_EVM(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16)) + #define SET_PWR_MODE_SET_MODE(h2c_pkt, value) \ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(14, 8)) #define SET_PWR_MODE_SET_RLBM(h2c_pkt, value) \ @@ -530,6 +543,9 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id) le32_get_bits(*((__le32 *)(_header) + 0x01), GENMASK(31, 16)) #define GET_FW_DUMP_TLV_VAL(_header) \ le32_get_bits(*((__le32 *)(_header) + 0x02), GENMASK(31, 0)) + +#define RFK_SET_INFORM_START(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8)) static inline struct rtw_c2h_cmd *get_c2h_from_skb(struct sk_buff *skb) { u32 pkt_offset; @@ -545,6 +561,7 @@ void rtw_fw_send_general_info(struct rtw_dev *rtwdev); void rtw_fw_send_phydm_info(struct rtw_dev *rtwdev); void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para); +void rtw_fw_inform_rfk_status(struct rtw_dev *rtwdev, bool start); void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev); void rtw_fw_set_pg_info(struct rtw_dev *rtwdev); void rtw_fw_query_bt_info(struct rtw_dev *rtwdev); @@ -559,6 +576,7 @@ void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data); void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si); void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si); void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool conn); +void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev); int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr, u8 *buf, u32 size); void rtw_remove_rsvd_page(struct rtw_dev *rtwdev, diff --git a/drivers/net/wireless/realtek/rtw88/hci.h b/drivers/net/wireless/realtek/rtw88/hci.h index 2cba327e6218..4c6fc6fb3f83 100644 --- a/drivers/net/wireless/realtek/rtw88/hci.h +++ b/drivers/net/wireless/realtek/rtw88/hci.h @@ -11,6 +11,7 @@ struct rtw_hci_ops { struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb); void (*tx_kick_off)(struct rtw_dev *rtwdev); + void (*flush_queues)(struct rtw_dev *rtwdev, u32 queues, bool drop); int (*setup)(struct rtw_dev *rtwdev); int (*start)(struct rtw_dev *rtwdev); void (*stop)(struct rtw_dev *rtwdev); @@ -258,4 +259,19 @@ static inline enum rtw_hci_type rtw_hci_type(struct rtw_dev *rtwdev) return rtwdev->hci.type; } +static inline void rtw_hci_flush_queues(struct rtw_dev *rtwdev, u32 queues, + bool drop) +{ + if (rtwdev->hci.ops->flush_queues) + rtwdev->hci.ops->flush_queues(rtwdev, queues, drop); +} + +static inline void rtw_hci_flush_all_queues(struct rtw_dev *rtwdev, bool drop) +{ + if (rtwdev->hci.ops->flush_queues) + rtwdev->hci.ops->flush_queues(rtwdev, + BIT(rtwdev->hw->queues) - 1, + drop); +} + #endif diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c index 59028b121b00..d1678aed9d9c 100644 --- a/drivers/net/wireless/realtek/rtw88/mac.c +++ b/drivers/net/wireless/realtek/rtw88/mac.c @@ -530,6 +530,25 @@ static int iddma_download_firmware(struct rtw_dev *rtwdev, u32 src, u32 dst, return 0; } +int rtw_ddma_to_fw_fifo(struct rtw_dev *rtwdev, u32 ocp_src, u32 size) +{ + u32 ch0_ctrl = BIT_DDMACH0_OWN | BIT_DDMACH0_DDMA_MODE; + + if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0)) { + rtw_dbg(rtwdev, RTW_DBG_FW, "busy to start ddma\n"); + return -EBUSY; + } + + ch0_ctrl |= size & BIT_MASK_DDMACH0_DLEN; + + if (iddma_enable(rtwdev, ocp_src, OCPBASE_RXBUF_FW_88XX, ch0_ctrl)) { + rtw_dbg(rtwdev, RTW_DBG_FW, "busy to complete ddma\n"); + return -EBUSY; + } + + return 0; +} + static bool check_fw_checksum(struct rtw_dev *rtwdev, u32 addr) { diff --git a/drivers/net/wireless/realtek/rtw88/mac.h b/drivers/net/wireless/realtek/rtw88/mac.h index ce64cdf7a565..3172aa5ac4de 100644 --- a/drivers/net/wireless/realtek/rtw88/mac.h +++ b/drivers/net/wireless/realtek/rtw88/mac.h @@ -15,7 +15,10 @@ #define ILLEGAL_KEY_GROUP 0xFAAAAA00 /* HW memory address */ +#define OCPBASE_RXBUF_FW_88XX 0x18680000 #define OCPBASE_TXBUF_88XX 0x18780000 +#define OCPBASE_ROM_88XX 0x00000000 +#define OCPBASE_IMEM_88XX 0x00030000 #define OCPBASE_DMEM_88XX 0x00200000 #define OCPBASE_EMEM_88XX 0x00100000 @@ -33,6 +36,7 @@ void rtw_mac_power_off(struct rtw_dev *rtwdev); int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw); int rtw_mac_init(struct rtw_dev *rtwdev); void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop); +int rtw_ddma_to_fw_fifo(struct rtw_dev *rtwdev, u32 ocp_src, u32 size); static inline void rtw_mac_flush_all_queues(struct rtw_dev *rtwdev, bool drop) { diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c index 2351dfb0d2e2..333df6b38113 100644 --- a/drivers/net/wireless/realtek/rtw88/mac80211.c +++ b/drivers/net/wireless/realtek/rtw88/mac80211.c @@ -520,6 +520,7 @@ static int rtw_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, hw_key_type, hw_key_idx); break; case DISABLE_KEY: + rtw_hci_flush_all_queues(rtwdev, false); rtw_mac_flush_all_queues(rtwdev, false); rtw_sec_clear_cam(rtwdev, sec, key->hw_key_idx); break; @@ -670,6 +671,7 @@ static void rtw_ops_flush(struct ieee80211_hw *hw, mutex_lock(&rtwdev->mutex); rtw_leave_lps_deep(rtwdev); + rtw_hci_flush_queues(rtwdev, queues, drop); rtw_mac_flush_queues(rtwdev, queues, drop); mutex_unlock(&rtwdev->mutex); } diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index e6989c0525cc..f3a3a86fa9b5 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -345,15 +345,9 @@ static bool rtw_fw_dump_crash_log(struct rtw_dev *rtwdev) "fw crash dump's seq is wrong: %d\n", seq); goto free_buf; } - if (seq == 0 && - (GET_FW_DUMP_TLV_TYPE(buf) != FW_CD_TYPE || - GET_FW_DUMP_TLV_LEN(buf) != FW_CD_LEN || - GET_FW_DUMP_TLV_VAL(buf) != FW_CD_VAL)) { - rtw_dbg(rtwdev, RTW_DBG_FW, "fw crash dump's tlv is wrong\n"); - goto free_buf; - } - print_hex_dump_bytes("rtw88 fw dump: ", DUMP_PREFIX_OFFSET, buf, size); + print_hex_dump(KERN_ERR, "rtw88 fw dump: ", DUMP_PREFIX_OFFSET, 16, 1, + buf, size, true); if (GET_FW_DUMP_MORE(buf) == 1) { rtwdev->fw.prev_dump_seq = seq; @@ -368,6 +362,78 @@ exit: return ret; } +int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size, + const char *prefix_str) +{ + u32 rxff = rtwdev->chip->fw_rxff_size; + u32 dump_size, done_size = 0; + u8 *buf; + int ret; + + buf = vzalloc(size); + if (!buf) + return -ENOMEM; + + while (size) { + dump_size = size > rxff ? rxff : size; + + ret = rtw_ddma_to_fw_fifo(rtwdev, ocp_src + done_size, + dump_size); + if (ret) { + rtw_err(rtwdev, + "ddma fw 0x%x [+0x%x] to fw fifo fail\n", + ocp_src, done_size); + goto exit; + } + + ret = rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RXBUF_FW, 0, + dump_size, (u32 *)(buf + done_size)); + if (ret) { + rtw_err(rtwdev, + "dump fw 0x%x [+0x%x] from fw fifo fail\n", + ocp_src, done_size); + goto exit; + } + + size -= dump_size; + done_size += dump_size; + } + + print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 1, + buf, done_size, true); + +exit: + vfree(buf); + return ret; +} +EXPORT_SYMBOL(rtw_dump_fw); + +int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size, + const char *prefix_str) +{ + u8 *buf; + u32 i; + + if (addr & 0x3) { + WARN(1, "should be 4-byte aligned, addr = 0x%08x\n", addr); + return -EINVAL; + } + + buf = vzalloc(size); + if (!buf) + return -ENOMEM; + + for (i = 0; i < size; i += 4) + *(u32 *)(buf + i) = rtw_read32(rtwdev, addr + i); + + print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, + size, true); + + vfree(buf); + return 0; +} +EXPORT_SYMBOL(rtw_dump_reg); + void rtw_vif_assoc_changed(struct rtw_vif *rtwvif, struct ieee80211_bss_conf *conf) { @@ -419,10 +485,8 @@ void rtw_fw_recovery(struct rtw_dev *rtwdev) ieee80211_queue_work(rtwdev->hw, &rtwdev->fw_recovery_work); } -static void rtw_fw_recovery_work(struct work_struct *work) +static void __fw_recovery_work(struct rtw_dev *rtwdev) { - struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, - fw_recovery_work); /* rtw_fw_dump_crash_log() returns false indicates that there are * still more log to dump. Driver set 0x1cf[7:0] = 0x1 to tell firmware @@ -435,18 +499,26 @@ static void rtw_fw_recovery_work(struct work_struct *work) } rtwdev->fw.prev_dump_seq = 0; - WARN(1, "firmware crash, start reset and recover\n"); + set_bit(RTW_FLAG_RESTARTING, rtwdev->flags); + rtw_chip_dump_fw_crash(rtwdev); - mutex_lock(&rtwdev->mutex); + WARN(1, "firmware crash, start reset and recover\n"); - set_bit(RTW_FLAG_RESTARTING, rtwdev->flags); rcu_read_lock(); rtw_iterate_keys_rcu(rtwdev, NULL, rtw_reset_key_iter, rtwdev); rcu_read_unlock(); rtw_iterate_stas_atomic(rtwdev, rtw_reset_sta_iter, rtwdev); rtw_iterate_vifs_atomic(rtwdev, rtw_reset_vif_iter, rtwdev); rtw_enter_ips(rtwdev); +} +static void rtw_fw_recovery_work(struct work_struct *work) +{ + struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, + fw_recovery_work); + + mutex_lock(&rtwdev->mutex); + __fw_recovery_work(rtwdev); mutex_unlock(&rtwdev->mutex); ieee80211_restart_hw(rtwdev->hw); @@ -1138,6 +1210,7 @@ int rtw_core_start(struct rtw_dev *rtwdev) static void rtw_power_off(struct rtw_dev *rtwdev) { rtw_hci_stop(rtwdev); + rtw_coex_power_off_setting(rtwdev); rtw_mac_power_off(rtwdev); } @@ -1393,7 +1466,6 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev) struct rtw_chip_info *chip = rtwdev->chip; struct rtw_hal *hal = &rtwdev->hal; struct rtw_efuse *efuse = &rtwdev->efuse; - int ret = 0; switch (rtw_hci_type(rtwdev)) { case RTW_HCI_TYPE_PCIE: @@ -1431,7 +1503,7 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev) hal->bfee_sts_cap = 3; - return ret; + return 0; } static int rtw_chip_efuse_enable(struct rtw_dev *rtwdev) diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index 35afea91fd29..dc3744847ba9 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -625,6 +625,7 @@ struct rtw_rx_pkt_stat { struct rtw_sta_info *si; struct ieee80211_vif *vif; + struct ieee80211_hdr *hdr; }; DECLARE_EWMA(tp, 10, 2); @@ -805,6 +806,7 @@ struct rtw_regulatory { struct rtw_chip_ops { int (*mac_init)(struct rtw_dev *rtwdev); + void (*dump_fw_crash)(struct rtw_dev *rtwdev); void (*shutdown)(struct rtw_dev *rtwdev); int (*read_efuse)(struct rtw_dev *rtwdev, u8 *map); void (*phy_set_param)(struct rtw_dev *rtwdev); @@ -837,6 +839,8 @@ struct rtw_chip_ops { struct ieee80211_bss_conf *conf); void (*cfg_csi_rate)(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate, u8 fixrate_en, u8 *new_rate); + void (*cfo_init)(struct rtw_dev *rtwdev); + void (*cfo_track)(struct rtw_dev *rtwdev); /* for coex */ void (*coex_set_init)(struct rtw_dev *rtwdev); @@ -1166,6 +1170,7 @@ struct rtw_chip_info { bool en_dis_dpd; u16 dpd_ratemask; u8 iqk_threshold; + u8 lck_threshold; const struct rtw_pwr_track_tbl *pwr_track_tbl; u8 bfer_su_max_num; @@ -1497,9 +1502,46 @@ struct rtw_iqk_info { } result; }; +enum rtw_rf_band { + RF_BAND_2G_CCK, + RF_BAND_2G_OFDM, + RF_BAND_5G_L, + RF_BAND_5G_M, + RF_BAND_5G_H, + RF_BAND_MAX +}; + +#define RF_GAIN_NUM 11 +#define RF_HW_OFFSET_NUM 10 + +struct rtw_gapk_info { + u32 rf3f_bp[RF_BAND_MAX][RF_GAIN_NUM][RTW_RF_PATH_MAX]; + u32 rf3f_fs[RTW_RF_PATH_MAX][RF_GAIN_NUM]; + bool txgapk_bp_done; + s8 offset[RF_GAIN_NUM][RTW_RF_PATH_MAX]; + s8 fianl_offset[RF_GAIN_NUM][RTW_RF_PATH_MAX]; + u8 read_txgain; + u8 channel; +}; + +struct rtw_cfo_track { + bool is_adjust; + u8 crystal_cap; + s32 cfo_tail[RTW_RF_PATH_MAX]; + s32 cfo_cnt[RTW_RF_PATH_MAX]; + u32 packet_count; + u32 packet_count_pre; +}; + #define RRSR_INIT_2G 0x15f #define RRSR_INIT_5G 0x150 +enum rtw_dm_cap { + RTW_DM_CAP_NA, + RTW_DM_CAP_TXGAPK, + RTW_DM_CAP_NUM +}; + struct rtw_dm_info { u32 cck_fa_cnt; u32 ofdm_fa_cnt; @@ -1534,6 +1576,7 @@ struct rtw_dm_info { u32 rrsr_mask_min; u8 thermal_avg[RTW_RF_PATH_MAX]; u8 thermal_meter_k; + u8 thermal_meter_lck; s8 delta_power_index[RTW_RF_PATH_MAX]; s8 delta_power_index_last[RTW_RF_PATH_MAX]; u8 default_ofdm_index; @@ -1549,6 +1592,7 @@ struct rtw_dm_info { u8 dack_dck[RTW_RF_PATH_MAX][2][DACK_DCK_BACKUP_NUM]; struct rtw_dpk_info dpk_info; + struct rtw_cfo_track cfo_track; /* [bandwidth 0:20M/1:40M][number of path] */ u8 cck_pd_lv[2][RTW_RF_PATH_MAX]; @@ -1566,7 +1610,10 @@ struct rtw_dm_info { struct ewma_evm ewma_evm[RTW_EVM_NUM]; struct ewma_snr ewma_snr[RTW_SNR_NUM]; + u32 dm_flags; /* enum rtw_dm_cap */ struct rtw_iqk_info iqk; + struct rtw_gapk_info gapk; + bool is_bt_iqk_timeout; }; struct rtw_efuse { @@ -1876,6 +1923,12 @@ static inline void rtw_release_macid(struct rtw_dev *rtwdev, u8 mac_id) clear_bit(mac_id, rtwdev->mac_id_map); } +static inline void rtw_chip_dump_fw_crash(struct rtw_dev *rtwdev) +{ + if (rtwdev->chip->ops->dump_fw_crash) + rtwdev->chip->ops->dump_fw_crash(rtwdev); +} + void rtw_get_channel_params(struct cfg80211_chan_def *chandef, struct rtw_channel_params *ch_param); bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target); @@ -1905,5 +1958,9 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, bool fw_exist); void rtw_fw_recovery(struct rtw_dev *rtwdev); +int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size, + const char *prefix_str); +int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size, + const char *prefix_str); #endif diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index 786a48649946..f59a4c462e3b 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -581,23 +581,30 @@ static int rtw_pci_start(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + rtw_pci_napi_start(rtwdev); + spin_lock_bh(&rtwpci->irq_lock); + rtwpci->running = true; rtw_pci_enable_interrupt(rtwdev, rtwpci, false); spin_unlock_bh(&rtwpci->irq_lock); - rtw_pci_napi_start(rtwdev); - return 0; } static void rtw_pci_stop(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + struct pci_dev *pdev = rtwpci->pdev; + spin_lock_bh(&rtwpci->irq_lock); + rtwpci->running = false; + rtw_pci_disable_interrupt(rtwdev, rtwpci); + spin_unlock_bh(&rtwpci->irq_lock); + + synchronize_irq(pdev->irq); rtw_pci_napi_stop(rtwdev); spin_lock_bh(&rtwpci->irq_lock); - rtw_pci_disable_interrupt(rtwdev, rtwpci); rtw_pci_dma_release(rtwdev, rtwpci); spin_unlock_bh(&rtwpci->irq_lock); } @@ -671,6 +678,8 @@ static u8 ac_to_hwq[] = { [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK, }; +static_assert(ARRAY_SIZE(ac_to_hwq) == IEEE80211_NUM_ACS); + static u8 rtw_hw_queue_mapping(struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; @@ -727,6 +736,72 @@ static void rtw_pci_dma_check(struct rtw_dev *rtwdev, rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX; } +static u32 __pci_get_hw_tx_ring_rp(struct rtw_dev *rtwdev, u8 pci_q) +{ + u32 bd_idx_addr = rtw_pci_tx_queue_idx_addr[pci_q]; + u32 bd_idx = rtw_read16(rtwdev, bd_idx_addr + 2); + + return FIELD_GET(TRX_BD_IDX_MASK, bd_idx); +} + +static void __pci_flush_queue(struct rtw_dev *rtwdev, u8 pci_q, bool drop) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q]; + u32 cur_rp; + u8 i; + + /* Because the time taked by the I/O in __pci_get_hw_tx_ring_rp is a + * bit dynamic, it's hard to define a reasonable fixed total timeout to + * use read_poll_timeout* helper. Instead, we can ensure a reasonable + * polling times, so we just use for loop with udelay here. + */ + for (i = 0; i < 30; i++) { + cur_rp = __pci_get_hw_tx_ring_rp(rtwdev, pci_q); + if (cur_rp == ring->r.wp) + return; + + udelay(1); + } + + if (!drop) + rtw_warn(rtwdev, "timed out to flush pci tx ring[%d]\n", pci_q); +} + +static void __rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 pci_queues, + bool drop) +{ + u8 q; + + for (q = 0; q < RTK_MAX_TX_QUEUE_NUM; q++) { + /* It may be not necessary to flush BCN and H2C tx queues. */ + if (q == RTW_TX_QUEUE_BCN || q == RTW_TX_QUEUE_H2C) + continue; + + if (pci_queues & BIT(q)) + __pci_flush_queue(rtwdev, q, drop); + } +} + +static void rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop) +{ + u32 pci_queues = 0; + u8 i; + + /* If all of the hardware queues are requested to flush, + * flush all of the pci queues. + */ + if (queues == BIT(rtwdev->hw->queues) - 1) { + pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1; + } else { + for (i = 0; i < rtwdev->hw->queues; i++) + if (queues & BIT(i)) + pci_queues |= BIT(ac_to_hwq[i]); + } + + __rtw_pci_flush_queues(rtwdev, pci_queues, drop); +} + static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; @@ -882,10 +957,12 @@ static int rtw_pci_tx_write(struct rtw_dev *rtwdev, return ret; ring = &rtwpci->tx_rings[queue]; + spin_lock_bh(&rtwpci->irq_lock); if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) { ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb)); ring->queue_stopped = true; } + spin_unlock_bh(&rtwpci->irq_lock); return 0; } @@ -900,7 +977,7 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, struct sk_buff *skb; u32 count; u32 bd_idx_addr; - u32 bd_idx, cur_rp; + u32 bd_idx, cur_rp, rp_idx; u16 q_map; ring = &rtwpci->tx_rings[hw_queue]; @@ -909,6 +986,7 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, bd_idx = rtw_read32(rtwdev, bd_idx_addr); cur_rp = bd_idx >> 16; cur_rp &= TRX_BD_IDX_MASK; + rp_idx = ring->r.rp; if (cur_rp >= ring->r.rp) count = cur_rp - ring->r.rp; else @@ -932,12 +1010,15 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, } if (ring->queue_stopped && - avail_desc(ring->r.wp, ring->r.rp, ring->r.len) > 4) { + avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) { q_map = skb_get_queue_mapping(skb); ieee80211_wake_queue(hw, q_map); ring->queue_stopped = false; } + if (++rp_idx >= ring->r.len) + rp_idx = 0; + skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz); info = IEEE80211_SKB_CB(skb); @@ -1138,7 +1219,8 @@ static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev) rtw_fw_c2h_cmd_isr(rtwdev); /* all of the jobs for this interrupt have been done */ - rtw_pci_enable_interrupt(rtwdev, rtwpci, rx); + if (rtwpci->running) + rtw_pci_enable_interrupt(rtwdev, rtwpci, rx); spin_unlock_bh(&rtwpci->irq_lock); return IRQ_HANDLED; @@ -1490,6 +1572,7 @@ static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev) static struct rtw_hci_ops rtw_pci_ops = { .tx_write = rtw_pci_tx_write, .tx_kick_off = rtw_pci_tx_kick_off, + .flush_queues = rtw_pci_flush_queues, .setup = rtw_pci_setup, .start = rtw_pci_start, .stop = rtw_pci_stop, @@ -1558,7 +1641,8 @@ static int rtw_pci_napi_poll(struct napi_struct *napi, int budget) if (work_done < budget) { napi_complete_done(napi, work_done); spin_lock_bh(&rtwpci->irq_lock); - rtw_pci_enable_interrupt(rtwdev, rtwpci, false); + if (rtwpci->running) + rtw_pci_enable_interrupt(rtwdev, rtwpci, false); spin_unlock_bh(&rtwpci->irq_lock); /* When ISR happens during polling and before napi_complete * while no further data is received. Data on the dma_ring will diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h index e76fc549a788..0ffae887527a 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.h +++ b/drivers/net/wireless/realtek/rtw88/pci.h @@ -211,6 +211,7 @@ struct rtw_pci { spinlock_t irq_lock; u32 irq_mask[4]; bool irq_enabled; + bool running; /* napi structure */ struct net_device netdev; diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c index e114ddecac09..8146acaf1893 100644 --- a/drivers/net/wireless/realtek/rtw88/phy.c +++ b/drivers/net/wireless/realtek/rtw88/phy.c @@ -119,6 +119,14 @@ static void rtw_phy_cck_pd_init(struct rtw_dev *rtwdev) dm_info->cck_fa_avg = CCK_FA_AVG_RESET; } +static void rtw_phy_cfo_init(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + if (chip->ops->cfo_init) + chip->ops->cfo_init(rtwdev); +} + void rtw_phy_init(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; @@ -140,6 +148,7 @@ void rtw_phy_init(struct rtw_dev *rtwdev) rtw_phy_cck_pd_init(rtwdev); dm_info->iqk.done = false; + rtw_phy_cfo_init(rtwdev); } EXPORT_SYMBOL(rtw_phy_init); @@ -316,7 +325,8 @@ rtw_phy_dig_check_damping(struct rtw_dm_info *dm_info) return damping; } -static void rtw_phy_dig_get_boundary(struct rtw_dm_info *dm_info, +static void rtw_phy_dig_get_boundary(struct rtw_dev *rtwdev, + struct rtw_dm_info *dm_info, u8 *upper, u8 *lower, bool linked) { u8 dig_max, dig_min, dig_mid; @@ -325,8 +335,7 @@ static void rtw_phy_dig_get_boundary(struct rtw_dm_info *dm_info, if (linked) { dig_max = DIG_PERF_MAX; dig_mid = DIG_PERF_MID; - /* 22B=0x1c, 22C=0x20 */ - dig_min = 0x1c; + dig_min = rtwdev->chip->dig_min; min_rssi = max_t(u8, dm_info->min_rssi, dig_min); } else { dig_max = DIG_CVRG_MAX; @@ -437,7 +446,8 @@ static void rtw_phy_dig(struct rtw_dev *rtwdev) * the peers connected with us, meanwhile make sure the igi value does * not beyond the hardware limitation */ - rtw_phy_dig_get_boundary(dm_info, &upper_bound, &lower_bound, linked); + rtw_phy_dig_get_boundary(rtwdev, dm_info, &upper_bound, &lower_bound, + linked); cur_igi = clamp_t(u8, cur_igi, lower_bound, upper_bound); /* record current igi value and false alarm statistics for further @@ -527,6 +537,62 @@ static void rtw_phy_dpk_track(struct rtw_dev *rtwdev) chip->ops->dpk_track(rtwdev); } +struct rtw_rx_addr_match_data { + struct rtw_dev *rtwdev; + struct ieee80211_hdr *hdr; + struct rtw_rx_pkt_stat *pkt_stat; + u8 *bssid; +}; + +static void rtw_phy_parsing_cfo_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct rtw_rx_addr_match_data *iter_data = data; + struct rtw_dev *rtwdev = iter_data->rtwdev; + struct rtw_rx_pkt_stat *pkt_stat = iter_data->pkt_stat; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_cfo_track *cfo = &dm_info->cfo_track; + u8 *bssid = iter_data->bssid; + u8 i; + + if (!ether_addr_equal(vif->bss_conf.bssid, bssid)) + return; + + for (i = 0; i < rtwdev->hal.rf_path_num; i++) { + cfo->cfo_tail[i] += pkt_stat->cfo_tail[i]; + cfo->cfo_cnt[i]++; + } + + cfo->packet_count++; +} + +void rtw_phy_parsing_cfo(struct rtw_dev *rtwdev, + struct rtw_rx_pkt_stat *pkt_stat) +{ + struct ieee80211_hdr *hdr = pkt_stat->hdr; + struct rtw_rx_addr_match_data data = {}; + + if (pkt_stat->crc_err || pkt_stat->icv_err || !pkt_stat->phy_status || + ieee80211_is_ctl(hdr->frame_control)) + return; + + data.rtwdev = rtwdev; + data.hdr = hdr; + data.pkt_stat = pkt_stat; + data.bssid = get_hdr_bssid(hdr); + + rtw_iterate_vifs_atomic(rtwdev, rtw_phy_parsing_cfo_iter, &data); +} +EXPORT_SYMBOL(rtw_phy_parsing_cfo); + +static void rtw_phy_cfo_track(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + if (chip->ops->cfo_track) + chip->ops->cfo_track(rtwdev); +} + #define CCK_PD_FA_LV1_MIN 1000 #define CCK_PD_FA_LV0_MAX 500 @@ -617,6 +683,7 @@ static void rtw_phy_pwr_track(struct rtw_dev *rtwdev) static void rtw_phy_ra_track(struct rtw_dev *rtwdev) { + rtw_fw_update_wl_phy_info(rtwdev); rtw_phy_ra_info_update(rtwdev); rtw_phy_rrsr_update(rtwdev); } @@ -628,6 +695,7 @@ void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev) rtw_phy_dig(rtwdev); rtw_phy_cck_pd(rtwdev); rtw_phy_ra_track(rtwdev); + rtw_phy_cfo_track(rtwdev); rtw_phy_dpk_track(rtwdev); rtw_phy_pwr_track(rtwdev); } @@ -1584,7 +1652,7 @@ void rtw_phy_load_tables(struct rtw_dev *rtwdev) } EXPORT_SYMBOL(rtw_phy_load_tables); -static u8 rtw_get_channel_group(u8 channel) +static u8 rtw_get_channel_group(u8 channel, u8 rate) { switch (channel) { default: @@ -1628,6 +1696,7 @@ static u8 rtw_get_channel_group(u8 channel) case 106: return 4; case 14: + return rate <= DESC_RATE11M ? 5 : 4; case 108: case 110: case 112: @@ -1879,7 +1948,7 @@ void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw, s8 *remnant = &pwr_param->pwr_remnant; pwr_idx = &rtwdev->efuse.txpwr_idx_table[path]; - group = rtw_get_channel_group(ch); + group = rtw_get_channel_group(ch, rate); /* base power index for 2.4G/5G */ if (IS_CH_2G_BAND(ch)) { @@ -2219,6 +2288,20 @@ s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev, } EXPORT_SYMBOL(rtw_phy_pwrtrack_get_pwridx); +bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 delta_lck; + + delta_lck = abs(dm_info->thermal_avg[0] - dm_info->thermal_meter_lck); + if (delta_lck >= rtwdev->chip->lck_threshold) { + dm_info->thermal_meter_lck = dm_info->thermal_avg[0]; + return true; + } + return false; +} +EXPORT_SYMBOL(rtw_phy_pwrtrack_need_lck); + bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev) { struct rtw_dm_info *dm_info = &rtwdev->dm_info; diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h index a4fcfb878550..0b6f2fc8193c 100644 --- a/drivers/net/wireless/realtek/rtw88/phy.h +++ b/drivers/net/wireless/realtek/rtw88/phy.h @@ -55,9 +55,12 @@ u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path); s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev, struct rtw_swing_table *swing_table, u8 tbl_path, u8 therm_path, u8 delta); +bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev); bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev); void rtw_phy_config_swing_table(struct rtw_dev *rtwdev, struct rtw_swing_table *swing_table); +void rtw_phy_parsing_cfo(struct rtw_dev *rtwdev, + struct rtw_rx_pkt_stat *pkt_stat); struct rtw_txpwr_lmt_cfg_pair { u8 regd; diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h index ea518aa78552..f5ce75095e90 100644 --- a/drivers/net/wireless/realtek/rtw88/reg.h +++ b/drivers/net/wireless/realtek/rtw88/reg.h @@ -129,6 +129,9 @@ #define REG_MCU_TST_CFG 0x84 #define VAL_FW_TRIGGER 0x1 +#define REG_PMC_DBG_CTRL1 0xa8 +#define BITS_PMC_BT_IQK_STS GENMASK(22, 21) + #define REG_EFUSE_ACCESS 0x00CF #define EFUSE_ACCESS_ON 0x69 #define EFUSE_ACCESS_OFF 0x00 @@ -360,6 +363,7 @@ #define REG_TX_PTCL_CTRL 0x0520 #define BIT_SIFS_BK_EN BIT(12) #define REG_TXPAUSE 0x0522 +#define BIT_AC_QUEUE GENMASK(7, 0) #define REG_RD_CTRL 0x0524 #define BIT_DIS_TXOP_CFE BIT(10) #define BIT_DIS_LSIG_CFE BIT(9) @@ -516,6 +520,7 @@ #define BIT_RFE_BUF_EN BIT(3) #define REG_ANAPAR_XTAL_0 0x1040 +#define BIT_XCAP_0 GENMASK(23, 10) #define REG_CPU_DMEM_CON 0x1080 #define BIT_WL_PLATFORM_RST BIT(16) #define BIT_WL_SECURITY_CLK BIT(15) @@ -534,6 +539,7 @@ #define BIT_DDMACH0_OWN BIT(31) #define BIT_DDMACH0_CHKSUM_EN BIT(29) #define BIT_DDMACH0_CHKSUM_STS BIT(27) +#define BIT_DDMACH0_DDMA_MODE BIT(26) #define BIT_DDMACH0_RESET_CHKSUM_STS BIT(25) #define BIT_DDMACH0_CHKSUM_CONT BIT(24) #define BIT_MASK_DDMACH0_DLEN 0x3ffff @@ -642,21 +648,30 @@ #define RF_WLSEL 0x02 #define RF_DTXLOK 0x08 #define RF_CFGCH 0x18 +#define BIT_BAND GENMASK(18, 16) #define RF_RCK 0x1d #define RF_LUTWA 0x33 #define RF_LUTWD1 0x3e #define RF_LUTWD0 0x3f +#define BIT_GAIN_EXT BIT(12) +#define BIT_DATA_L GENMASK(11, 0) #define RF_T_METER 0x42 #define RF_BSPAD 0x54 #define RF_GAINTX 0x56 #define RF_TXATANK 0x64 #define RF_TRXIQ 0x66 #define RF_RXIQGEN 0x8d +#define RF_SYN_PFD 0xb0 #define RF_XTALX2 0xb8 +#define RF_SYN_CTRL 0xbb #define RF_MALSEL 0xbe +#define RF_SYN_AAC 0xc9 +#define RF_AAC_CTRL 0xca +#define RF_FAST_LCK 0xcc #define RF_RCKD 0xde #define RF_TXADBG 0xde #define RF_LUTDBG 0xdf +#define BIT_TXA_TANK BIT(4) #define RF_LUTWE2 0xee #define RF_LUTWE 0xef diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index 33c6cf1206c8..785b8181513f 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -581,7 +581,8 @@ static void rtw8821c_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc, pkt_stat->phy_status = GET_RX_DESC_PHYST(rx_desc); pkt_stat->icv_err = GET_RX_DESC_ICV_ERR(rx_desc); pkt_stat->crc_err = GET_RX_DESC_CRC32(rx_desc); - pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc); + pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc) && + GET_RX_DESC_ENC_TYPE(rx_desc) != RX_DESC_ENC_NONE; pkt_stat->is_c2h = GET_RX_DESC_C2H(rx_desc); pkt_stat->pkt_len = GET_RX_DESC_PKT_LEN(rx_desc); pkt_stat->drv_info_sz = GET_RX_DESC_DRV_INFO_SIZE(rx_desc); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index dd560c28abb2..6cb593cc33c2 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -17,6 +17,7 @@ #include "util.h" #include "bf.h" #include "efuse.h" +#include "coex.h" #define IQK_DONE_8822C 0xaa @@ -39,7 +40,7 @@ static int rtw8822c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map) efuse->rfe_option = map->rfe_option; efuse->rf_board_option = map->rf_board_option; - efuse->crystal_cap = map->xtal_k; + efuse->crystal_cap = map->xtal_k & XCAP_MASK; efuse->channel_plan = map->channel_plan; efuse->country_code[0] = map->country_code[0]; efuse->country_code[1] = map->country_code[1]; @@ -1094,14 +1095,719 @@ static void rtw8822c_pa_bias(struct rtw_dev *rtwdev) if (pg_pa_bias == EFUSE_READ_FAIL) return; pg_pa_bias = FIELD_GET(PPG_PABIAS_MASK, pg_pa_bias); - rtw_write_rf(rtwdev, path, 0x60, RF_PABIAS_2G_MASK, pg_pa_bias); + rtw_write_rf(rtwdev, path, RF_PA, RF_PABIAS_2G_MASK, pg_pa_bias); } for (path = 0; path < rtwdev->hal.rf_path_num; path++) { rtw_read8_physical_efuse(rtwdev, rf_efuse_5g[path], &pg_pa_bias); pg_pa_bias = FIELD_GET(PPG_PABIAS_MASK, pg_pa_bias); - rtw_write_rf(rtwdev, path, 0x60, RF_PABIAS_5G_MASK, pg_pa_bias); + rtw_write_rf(rtwdev, path, RF_PA, RF_PABIAS_5G_MASK, pg_pa_bias); + } +} + +static void rtw8822c_rfk_handshake(struct rtw_dev *rtwdev, bool is_before_k) +{ + struct rtw_dm_info *dm = &rtwdev->dm_info; + u8 u1b_tmp; + u8 u4b_tmp; + int ret; + + if (is_before_k) { + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[RFK] WiFi / BT RFK handshake start!!\n"); + + if (!dm->is_bt_iqk_timeout) { + ret = read_poll_timeout(rtw_read32_mask, u4b_tmp, + u4b_tmp == 0, 20, 600000, false, + rtwdev, REG_PMC_DBG_CTRL1, + BITS_PMC_BT_IQK_STS); + if (ret) { + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[RFK] Wait BT IQK finish timeout!!\n"); + dm->is_bt_iqk_timeout = true; + } + } + + rtw_fw_inform_rfk_status(rtwdev, true); + + ret = read_poll_timeout(rtw_read8_mask, u1b_tmp, + u1b_tmp == 1, 20, 100000, false, + rtwdev, REG_ARFR4, BIT_WL_RFK); + if (ret) + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[RFK] Send WiFi RFK start H2C cmd FAIL!!\n"); + } else { + rtw_fw_inform_rfk_status(rtwdev, false); + ret = read_poll_timeout(rtw_read8_mask, u1b_tmp, + u1b_tmp == 1, 20, 100000, false, + rtwdev, REG_ARFR4, + BIT_WL_RFK); + if (ret) + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[RFK] Send WiFi RFK finish H2C cmd FAIL!!\n"); + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[RFK] WiFi / BT RFK handshake finish!!\n"); + } +} + +static void rtw8822c_rfk_power_save(struct rtw_dev *rtwdev, + bool is_power_save) +{ + u8 path; + + for (path = 0; path < rtwdev->hal.rf_path_num; path++) { + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, path); + rtw_write32_mask(rtwdev, REG_DPD_CTL1_S0, BIT_PS_EN, + is_power_save ? 0 : 1); + } +} + +static void rtw8822c_txgapk_backup_bb_reg(struct rtw_dev *rtwdev, const u32 reg[], + u32 reg_backup[], u32 reg_num) +{ + u32 i; + + for (i = 0; i < reg_num; i++) { + reg_backup[i] = rtw_read32(rtwdev, reg[i]); + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] Backup BB 0x%x = 0x%x\n", + reg[i], reg_backup[i]); + } +} + +static void rtw8822c_txgapk_reload_bb_reg(struct rtw_dev *rtwdev, + const u32 reg[], u32 reg_backup[], + u32 reg_num) +{ + u32 i; + + for (i = 0; i < reg_num; i++) { + rtw_write32(rtwdev, reg[i], reg_backup[i]); + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] Reload BB 0x%x = 0x%x\n", + reg[i], reg_backup[i]); + } +} + +static bool check_rf_status(struct rtw_dev *rtwdev, u8 status) +{ + u8 reg_rf0_a, reg_rf0_b; + + reg_rf0_a = (u8)rtw_read_rf(rtwdev, RF_PATH_A, + RF_MODE_TRXAGC, BIT_RF_MODE); + reg_rf0_b = (u8)rtw_read_rf(rtwdev, RF_PATH_B, + RF_MODE_TRXAGC, BIT_RF_MODE); + + if (reg_rf0_a == status || reg_rf0_b == status) + return false; + + return true; +} + +static void rtw8822c_txgapk_tx_pause(struct rtw_dev *rtwdev) +{ + bool status; + int ret; + + rtw_write8(rtwdev, REG_TXPAUSE, BIT_AC_QUEUE); + rtw_write32_mask(rtwdev, REG_TX_FIFO, BIT_STOP_TX, 0x2); + + ret = read_poll_timeout_atomic(check_rf_status, status, status, + 2, 5000, false, rtwdev, 2); + if (ret) + rtw_warn(rtwdev, "failed to pause TX\n"); + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] Tx pause!!\n"); +} + +static void rtw8822c_txgapk_bb_dpk(struct rtw_dev *rtwdev, u8 path) +{ + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__); + + rtw_write32_mask(rtwdev, REG_ENFN, BIT_IQK_DPK_EN, 0x1); + rtw_write32_mask(rtwdev, REG_CH_DELAY_EXTR2, + BIT_IQK_DPK_CLOCK_SRC, 0x1); + rtw_write32_mask(rtwdev, REG_CH_DELAY_EXTR2, + BIT_IQK_DPK_RESET_SRC, 0x1); + rtw_write32_mask(rtwdev, REG_CH_DELAY_EXTR2, BIT_EN_IOQ_IQK_DPK, 0x1); + rtw_write32_mask(rtwdev, REG_CH_DELAY_EXTR2, BIT_TST_IQK2SET_SRC, 0x0); + rtw_write32_mask(rtwdev, REG_CCA_OFF, BIT_CCA_ON_BY_PW, 0x1ff); + + if (path == RF_PATH_A) { + rtw_write32_mask(rtwdev, REG_RFTXEN_GCK_A, + BIT_RFTXEN_GCK_FORCE_ON, 0x1); + rtw_write32_mask(rtwdev, REG_3WIRE, BIT_DIS_SHARERX_TXGAT, 0x1); + rtw_write32_mask(rtwdev, REG_DIS_SHARE_RX_A, + BIT_TX_SCALE_0DB, 0x1); + rtw_write32_mask(rtwdev, REG_3WIRE, BIT_3WIRE_EN, 0x0); + } else if (path == RF_PATH_B) { + rtw_write32_mask(rtwdev, REG_RFTXEN_GCK_B, + BIT_RFTXEN_GCK_FORCE_ON, 0x1); + rtw_write32_mask(rtwdev, REG_3WIRE2, + BIT_DIS_SHARERX_TXGAT, 0x1); + rtw_write32_mask(rtwdev, REG_DIS_SHARE_RX_B, + BIT_TX_SCALE_0DB, 0x1); + rtw_write32_mask(rtwdev, REG_3WIRE2, BIT_3WIRE_EN, 0x0); + } + rtw_write32_mask(rtwdev, REG_CCKSB, BIT_BBMODE, 0x2); +} + +static void rtw8822c_txgapk_afe_dpk(struct rtw_dev *rtwdev, u8 path) +{ + u32 reg; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__); + + if (path == RF_PATH_A) { + reg = REG_ANAPAR_A; + } else if (path == RF_PATH_B) { + reg = REG_ANAPAR_B; + } else { + rtw_err(rtwdev, "[TXGAPK] unknown path %d!!\n", path); + return; + } + + rtw_write32_mask(rtwdev, REG_IQK_CTRL, MASKDWORD, MASKDWORD); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x700f0001); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x700f0001); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x701f0001); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x702f0001); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x703f0001); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x704f0001); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x705f0001); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x706f0001); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x707f0001); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x708f0001); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x709f0001); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70af0001); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70bf0001); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70cf0001); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70df0001); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70ef0001); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70ff0001); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70ff0001); +} + +static void rtw8822c_txgapk_afe_dpk_restore(struct rtw_dev *rtwdev, u8 path) +{ + u32 reg; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__); + + if (path == RF_PATH_A) { + reg = REG_ANAPAR_A; + } else if (path == RF_PATH_B) { + reg = REG_ANAPAR_B; + } else { + rtw_err(rtwdev, "[TXGAPK] unknown path %d!!\n", path); + return; + } + rtw_write32_mask(rtwdev, REG_IQK_CTRL, MASKDWORD, 0xffa1005e); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x700b8041); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70144041); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70244041); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70344041); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70444041); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x705b8041); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70644041); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x707b8041); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x708b8041); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x709b8041); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70ab8041); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70bb8041); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70cb8041); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70db8041); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70eb8041); + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70fb8041); +} + +static void rtw8822c_txgapk_bb_dpk_restore(struct rtw_dev *rtwdev, u8 path) +{ + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__); + + rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_TX_GAIN, 0x0); + rtw_write_rf(rtwdev, path, RF_DIS_BYPASS_TXBB, BIT_TIA_BYPASS, 0x0); + rtw_write_rf(rtwdev, path, RF_DIS_BYPASS_TXBB, BIT_TXBB, 0x0); + + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, 0x0); + rtw_write32_mask(rtwdev, REG_IQK_CTL1, BIT_TX_CFIR, 0x0); + rtw_write32_mask(rtwdev, REG_SINGLE_TONE_SW, BIT_IRQ_TEST_MODE, 0x0); + rtw_write32_mask(rtwdev, REG_R_CONFIG, MASKBYTE0, 0x00); + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, 0x1); + rtw_write32_mask(rtwdev, REG_IQK_CTL1, BIT_TX_CFIR, 0x0); + rtw_write32_mask(rtwdev, REG_SINGLE_TONE_SW, BIT_IRQ_TEST_MODE, 0x0); + rtw_write32_mask(rtwdev, REG_R_CONFIG, MASKBYTE0, 0x00); + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, 0x0); + rtw_write32_mask(rtwdev, REG_CCA_OFF, BIT_CCA_ON_BY_PW, 0x0); + + if (path == RF_PATH_A) { + rtw_write32_mask(rtwdev, REG_RFTXEN_GCK_A, + BIT_RFTXEN_GCK_FORCE_ON, 0x0); + rtw_write32_mask(rtwdev, REG_3WIRE, BIT_DIS_SHARERX_TXGAT, 0x0); + rtw_write32_mask(rtwdev, REG_DIS_SHARE_RX_A, + BIT_TX_SCALE_0DB, 0x0); + rtw_write32_mask(rtwdev, REG_3WIRE, BIT_3WIRE_EN, 0x3); + } else if (path == RF_PATH_B) { + rtw_write32_mask(rtwdev, REG_RFTXEN_GCK_B, + BIT_RFTXEN_GCK_FORCE_ON, 0x0); + rtw_write32_mask(rtwdev, REG_3WIRE2, + BIT_DIS_SHARERX_TXGAT, 0x0); + rtw_write32_mask(rtwdev, REG_DIS_SHARE_RX_B, + BIT_TX_SCALE_0DB, 0x0); + rtw_write32_mask(rtwdev, REG_3WIRE2, BIT_3WIRE_EN, 0x3); + } + + rtw_write32_mask(rtwdev, REG_CCKSB, BIT_BBMODE, 0x0); + rtw_write32_mask(rtwdev, REG_IQK_CTL1, BIT_CFIR_EN, 0x5); +} + +static bool _rtw8822c_txgapk_gain_valid(struct rtw_dev *rtwdev, u32 gain) +{ + if ((FIELD_GET(BIT_GAIN_TX_PAD_H, gain) >= 0xc) && + (FIELD_GET(BIT_GAIN_TX_PAD_L, gain) >= 0xe)) + return true; + + return false; +} + +static void _rtw8822c_txgapk_write_gain_bb_table(struct rtw_dev *rtwdev, + u8 band, u8 path) +{ + struct rtw_gapk_info *txgapk = &rtwdev->dm_info.gapk; + u32 v, tmp_3f = 0; + u8 gain, check_txgain; + + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, path); + + switch (band) { + case RF_BAND_2G_OFDM: + rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x0); + break; + case RF_BAND_5G_L: + rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x2); + break; + case RF_BAND_5G_M: + rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x3); + break; + case RF_BAND_5G_H: + rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x4); + break; + default: + break; + } + + rtw_write32_mask(rtwdev, REG_TX_GAIN_SET, MASKBYTE0, 0x88); + + check_txgain = 0; + for (gain = 0; gain < RF_GAIN_NUM; gain++) { + v = txgapk->rf3f_bp[band][gain][path]; + if (_rtw8822c_txgapk_gain_valid(rtwdev, v)) { + if (!check_txgain) { + tmp_3f = txgapk->rf3f_bp[band][gain][path]; + check_txgain = 1; + } + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[TXGAPK] tx_gain=0x%03X >= 0xCEX\n", + txgapk->rf3f_bp[band][gain][path]); + } else { + tmp_3f = txgapk->rf3f_bp[band][gain][path]; + } + + rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_Q_GAIN, tmp_3f); + rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_I_GAIN, gain); + rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_GAIN_RST, 0x1); + rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_GAIN_RST, 0x0); + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[TXGAPK] Band=%d 0x1b98[11:0]=0x%03X path=%d\n", + band, tmp_3f, path); + } +} + +static void rtw8822c_txgapk_write_gain_bb_table(struct rtw_dev *rtwdev) +{ + u8 path, band; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s channel=%d\n", + __func__, rtwdev->dm_info.gapk.channel); + + for (band = 0; band < RF_BAND_MAX; band++) { + for (path = 0; path < rtwdev->hal.rf_path_num; path++) { + _rtw8822c_txgapk_write_gain_bb_table(rtwdev, + band, path); + } + } +} + +static void rtw8822c_txgapk_read_offset(struct rtw_dev *rtwdev, u8 path) +{ + static const u32 cfg1_1b00[2] = {0x00000d18, 0x00000d2a}; + static const u32 cfg2_1b00[2] = {0x00000d19, 0x00000d2b}; + static const u32 set_pi[2] = {REG_RSV_CTRL, REG_WLRF1}; + static const u32 path_setting[2] = {REG_ORITXCODE, REG_ORITXCODE2}; + struct rtw_gapk_info *txgapk = &rtwdev->dm_info.gapk; + u8 channel = txgapk->channel; + u32 val; + int i; + + if (path >= ARRAY_SIZE(cfg1_1b00) || + path >= ARRAY_SIZE(cfg2_1b00) || + path >= ARRAY_SIZE(set_pi) || + path >= ARRAY_SIZE(path_setting)) { + rtw_warn(rtwdev, "[TXGAPK] wrong path %d\n", path); + return; + } + + rtw_write32_mask(rtwdev, REG_ANTMAP0, BIT_ANT_PATH, path + 1); + rtw_write32_mask(rtwdev, REG_TXLGMAP, MASKDWORD, 0xe4e40000); + rtw_write32_mask(rtwdev, REG_TXANTSEG, BIT_ANTSEG, 0x3); + rtw_write32_mask(rtwdev, path_setting[path], MASK20BITS, 0x33312); + rtw_write32_mask(rtwdev, path_setting[path], BIT_PATH_EN, 0x1); + rtw_write32_mask(rtwdev, set_pi[path], BITS_RFC_DIRECT, 0x0); + rtw_write_rf(rtwdev, path, RF_LUTDBG, BIT_TXA_TANK, 0x1); + rtw_write_rf(rtwdev, path, RF_IDAC, BIT_TX_MODE, 0x820); + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, path); + rtw_write32_mask(rtwdev, REG_IQKSTAT, MASKBYTE0, 0x0); + + rtw_write32_mask(rtwdev, REG_TX_TONE_IDX, MASKBYTE0, 0x018); + fsleep(1000); + if (channel >= 1 && channel <= 14) + rtw_write32_mask(rtwdev, REG_R_CONFIG, MASKBYTE0, BIT_2G_SWING); + else + rtw_write32_mask(rtwdev, REG_R_CONFIG, MASKBYTE0, BIT_5G_SWING); + fsleep(1000); + + rtw_write32_mask(rtwdev, REG_NCTL0, MASKDWORD, cfg1_1b00[path]); + rtw_write32_mask(rtwdev, REG_NCTL0, MASKDWORD, cfg2_1b00[path]); + + read_poll_timeout(rtw_read32_mask, val, + val == 0x55, 1000, 100000, false, + rtwdev, REG_RPT_CIP, BIT_RPT_CIP_STATUS); + + rtw_write32_mask(rtwdev, set_pi[path], BITS_RFC_DIRECT, 0x2); + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, path); + rtw_write32_mask(rtwdev, REG_RXSRAM_CTL, BIT_RPT_EN, 0x1); + rtw_write32_mask(rtwdev, REG_RXSRAM_CTL, BIT_RPT_SEL, 0x12); + rtw_write32_mask(rtwdev, REG_TX_GAIN_SET, BIT_GAPK_RPT_IDX, 0x3); + val = rtw_read32(rtwdev, REG_STAT_RPT); + + txgapk->offset[0][path] = (s8)FIELD_GET(BIT_GAPK_RPT0, val); + txgapk->offset[1][path] = (s8)FIELD_GET(BIT_GAPK_RPT1, val); + txgapk->offset[2][path] = (s8)FIELD_GET(BIT_GAPK_RPT2, val); + txgapk->offset[3][path] = (s8)FIELD_GET(BIT_GAPK_RPT3, val); + txgapk->offset[4][path] = (s8)FIELD_GET(BIT_GAPK_RPT4, val); + txgapk->offset[5][path] = (s8)FIELD_GET(BIT_GAPK_RPT5, val); + txgapk->offset[6][path] = (s8)FIELD_GET(BIT_GAPK_RPT6, val); + txgapk->offset[7][path] = (s8)FIELD_GET(BIT_GAPK_RPT7, val); + + rtw_write32_mask(rtwdev, REG_TX_GAIN_SET, BIT_GAPK_RPT_IDX, 0x4); + val = rtw_read32(rtwdev, REG_STAT_RPT); + + txgapk->offset[8][path] = (s8)FIELD_GET(BIT_GAPK_RPT0, val); + txgapk->offset[9][path] = (s8)FIELD_GET(BIT_GAPK_RPT1, val); + + for (i = 0; i < RF_HW_OFFSET_NUM; i++) + if (txgapk->offset[i][path] & BIT(3)) + txgapk->offset[i][path] = txgapk->offset[i][path] | + 0xf0; + for (i = 0; i < RF_HW_OFFSET_NUM; i++) + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[TXGAPK] offset %d %d path=%d\n", + txgapk->offset[i][path], i, path); +} + +static void rtw8822c_txgapk_calculate_offset(struct rtw_dev *rtwdev, u8 path) +{ + static const u32 bb_reg[] = {REG_ANTMAP0, REG_TXLGMAP, REG_TXANTSEG, + REG_ORITXCODE, REG_ORITXCODE2}; + struct rtw_gapk_info *txgapk = &rtwdev->dm_info.gapk; + u8 channel = txgapk->channel; + u32 reg_backup[ARRAY_SIZE(bb_reg)] = {0}; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s channel=%d\n", + __func__, channel); + + rtw8822c_txgapk_backup_bb_reg(rtwdev, bb_reg, + reg_backup, ARRAY_SIZE(bb_reg)); + + if (channel >= 1 && channel <= 14) { + rtw_write32_mask(rtwdev, + REG_SINGLE_TONE_SW, BIT_IRQ_TEST_MODE, 0x0); + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, path); + rtw_write32_mask(rtwdev, REG_R_CONFIG, BIT_IQ_SWITCH, 0x3f); + rtw_write32_mask(rtwdev, REG_IQK_CTL1, BIT_TX_CFIR, 0x0); + rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_TX_GAIN, 0x1); + rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC, RFREG_MASK, 0x5000f); + rtw_write_rf(rtwdev, path, RF_TX_GAIN_OFFSET, BIT_RF_GAIN, 0x0); + rtw_write_rf(rtwdev, path, RF_RXG_GAIN, BIT_RXG_GAIN, 0x1); + rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC, BIT_RXAGC, 0x0f); + rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_TRXBW, 0x1); + rtw_write_rf(rtwdev, path, RF_BW_TRXBB, BIT_BW_TXBB, 0x1); + rtw_write_rf(rtwdev, path, RF_BW_TRXBB, BIT_BW_RXBB, 0x0); + rtw_write_rf(rtwdev, path, RF_EXT_TIA_BW, BIT_PW_EXT_TIA, 0x1); + + rtw_write32_mask(rtwdev, REG_IQKSTAT, MASKBYTE0, 0x00); + rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x0); + + rtw8822c_txgapk_read_offset(rtwdev, path); + rtw_dbg(rtwdev, RTW_DBG_RFK, "=============================\n"); + + } else { + rtw_write32_mask(rtwdev, + REG_SINGLE_TONE_SW, BIT_IRQ_TEST_MODE, 0x0); + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, path); + rtw_write32_mask(rtwdev, REG_R_CONFIG, BIT_IQ_SWITCH, 0x3f); + rtw_write32_mask(rtwdev, REG_IQK_CTL1, BIT_TX_CFIR, 0x0); + rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_TX_GAIN, 0x1); + rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC, RFREG_MASK, 0x50011); + rtw_write_rf(rtwdev, path, RF_TXA_LB_SW, BIT_TXA_LB_ATT, 0x3); + rtw_write_rf(rtwdev, path, RF_TXA_LB_SW, BIT_LB_ATT, 0x3); + rtw_write_rf(rtwdev, path, RF_TXA_LB_SW, BIT_LB_SW, 0x1); + rtw_write_rf(rtwdev, path, + RF_RXA_MIX_GAIN, BIT_RXA_MIX_GAIN, 0x2); + rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC, BIT_RXAGC, 0x12); + rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_TRXBW, 0x1); + rtw_write_rf(rtwdev, path, RF_BW_TRXBB, BIT_BW_RXBB, 0x0); + rtw_write_rf(rtwdev, path, RF_EXT_TIA_BW, BIT_PW_EXT_TIA, 0x1); + rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC, BIT_RF_MODE, 0x5); + + rtw_write32_mask(rtwdev, REG_IQKSTAT, MASKBYTE0, 0x0); + + if (channel >= 36 && channel <= 64) + rtw_write32_mask(rtwdev, + REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x2); + else if (channel >= 100 && channel <= 144) + rtw_write32_mask(rtwdev, + REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x3); + else if (channel >= 149 && channel <= 177) + rtw_write32_mask(rtwdev, + REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x4); + + rtw8822c_txgapk_read_offset(rtwdev, path); + rtw_dbg(rtwdev, RTW_DBG_RFK, "=============================\n"); } + rtw8822c_txgapk_reload_bb_reg(rtwdev, bb_reg, + reg_backup, ARRAY_SIZE(bb_reg)); +} + +static void rtw8822c_txgapk_rf_restore(struct rtw_dev *rtwdev, u8 path) +{ + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__); + + if (path >= rtwdev->hal.rf_path_num) + return; + + rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC, BIT_RF_MODE, 0x3); + rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_TRXBW, 0x0); + rtw_write_rf(rtwdev, path, RF_EXT_TIA_BW, BIT_PW_EXT_TIA, 0x0); +} + +static u32 rtw8822c_txgapk_cal_gain(struct rtw_dev *rtwdev, u32 gain, s8 offset) +{ + u32 gain_x2, new_gain; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__); + + if (_rtw8822c_txgapk_gain_valid(rtwdev, gain)) { + new_gain = gain; + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[TXGAPK] gain=0x%03X(>=0xCEX) offset=%d new_gain=0x%03X\n", + gain, offset, new_gain); + return new_gain; + } + + gain_x2 = (gain << 1) + offset; + new_gain = (gain_x2 >> 1) | (gain_x2 & BIT(0) ? BIT_GAIN_EXT : 0); + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[TXGAPK] gain=0x%X offset=%d new_gain=0x%X\n", + gain, offset, new_gain); + + return new_gain; +} + +static void rtw8822c_txgapk_write_tx_gain(struct rtw_dev *rtwdev) +{ + struct rtw_gapk_info *txgapk = &rtwdev->dm_info.gapk; + u32 i, j, tmp = 0x20, tmp_3f, v; + s8 offset_tmp[RF_GAIN_NUM] = {0}; + u8 path, band = RF_BAND_2G_OFDM, channel = txgapk->channel; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__); + + if (channel >= 1 && channel <= 14) { + tmp = 0x20; + band = RF_BAND_2G_OFDM; + } else if (channel >= 36 && channel <= 64) { + tmp = 0x200; + band = RF_BAND_5G_L; + } else if (channel >= 100 && channel <= 144) { + tmp = 0x280; + band = RF_BAND_5G_M; + } else if (channel >= 149 && channel <= 177) { + tmp = 0x300; + band = RF_BAND_5G_H; + } else { + rtw_err(rtwdev, "[TXGAPK] unknown channel %d!!\n", channel); + return; + } + + for (path = 0; path < rtwdev->hal.rf_path_num; path++) { + for (i = 0; i < RF_GAIN_NUM; i++) { + offset_tmp[i] = 0; + for (j = i; j < RF_GAIN_NUM; j++) { + v = txgapk->rf3f_bp[band][j][path]; + if (_rtw8822c_txgapk_gain_valid(rtwdev, v)) + continue; + + offset_tmp[i] += txgapk->offset[j][path]; + txgapk->fianl_offset[i][path] = offset_tmp[i]; + } + + v = txgapk->rf3f_bp[band][i][path]; + if (_rtw8822c_txgapk_gain_valid(rtwdev, v)) { + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[TXGAPK] tx_gain=0x%03X >= 0xCEX\n", + txgapk->rf3f_bp[band][i][path]); + } else { + txgapk->rf3f_fs[path][i] = offset_tmp[i]; + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[TXGAPK] offset %d %d\n", + offset_tmp[i], i); + } + } + + rtw_write_rf(rtwdev, path, RF_LUTWE2, RFREG_MASK, 0x10000); + for (i = 0; i < RF_GAIN_NUM; i++) { + rtw_write_rf(rtwdev, path, + RF_LUTWA, RFREG_MASK, tmp + i); + + tmp_3f = rtw8822c_txgapk_cal_gain(rtwdev, + txgapk->rf3f_bp[band][i][path], + offset_tmp[i]); + rtw_write_rf(rtwdev, path, RF_LUTWD0, + BIT_GAIN_EXT | BIT_DATA_L, tmp_3f); + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[TXGAPK] 0x33=0x%05X 0x3f=0x%04X\n", + tmp + i, tmp_3f); + } + rtw_write_rf(rtwdev, path, RF_LUTWE2, RFREG_MASK, 0x0); + } +} + +static void rtw8822c_txgapk_save_all_tx_gain_table(struct rtw_dev *rtwdev) +{ + struct rtw_gapk_info *txgapk = &rtwdev->dm_info.gapk; + static const u32 three_wire[2] = {REG_3WIRE, REG_3WIRE2}; + static const u8 ch_num[RF_BAND_MAX] = {1, 1, 36, 100, 149}; + static const u8 band_num[RF_BAND_MAX] = {0x0, 0x0, 0x1, 0x3, 0x5}; + static const u8 cck[RF_BAND_MAX] = {0x1, 0x0, 0x0, 0x0, 0x0}; + u8 path, band, gain, rf0_idx; + u32 rf18, v; + + if (rtwdev->dm_info.dm_flags & BIT(RTW_DM_CAP_TXGAPK)) + return; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__); + + if (txgapk->read_txgain == 1) { + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[TXGAPK] Already Read txgapk->read_txgain return!!!\n"); + rtw8822c_txgapk_write_gain_bb_table(rtwdev); + return; + } + + for (band = 0; band < RF_BAND_MAX; band++) { + for (path = 0; path < rtwdev->hal.rf_path_num; path++) { + rf18 = rtw_read_rf(rtwdev, path, RF_CFGCH, RFREG_MASK); + + rtw_write32_mask(rtwdev, + three_wire[path], BIT_3WIRE_EN, 0x0); + rtw_write_rf(rtwdev, path, + RF_CFGCH, MASKBYTE0, ch_num[band]); + rtw_write_rf(rtwdev, path, + RF_CFGCH, BIT_BAND, band_num[band]); + rtw_write_rf(rtwdev, path, + RF_BW_TRXBB, BIT_DBG_CCK_CCA, cck[band]); + rtw_write_rf(rtwdev, path, + RF_BW_TRXBB, BIT_TX_CCK_IND, cck[band]); + gain = 0; + for (rf0_idx = 1; rf0_idx < 32; rf0_idx += 3) { + rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC, + MASKBYTE0, rf0_idx); + v = rtw_read_rf(rtwdev, path, + RF_TX_RESULT, RFREG_MASK); + txgapk->rf3f_bp[band][gain][path] = v & BIT_DATA_L; + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[TXGAPK] 0x5f=0x%03X band=%d path=%d\n", + txgapk->rf3f_bp[band][gain][path], + band, path); + gain++; + } + rtw_write_rf(rtwdev, path, RF_CFGCH, RFREG_MASK, rf18); + rtw_write32_mask(rtwdev, + three_wire[path], BIT_3WIRE_EN, 0x3); + } + } + rtw8822c_txgapk_write_gain_bb_table(rtwdev); + txgapk->read_txgain = 1; +} + +static void rtw8822c_txgapk(struct rtw_dev *rtwdev) +{ + static const u32 bb_reg[2] = {REG_TX_PTCL_CTRL, REG_TX_FIFO}; + struct rtw_gapk_info *txgapk = &rtwdev->dm_info.gapk; + u32 bb_reg_backup[2]; + u8 path; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__); + + rtw8822c_txgapk_save_all_tx_gain_table(rtwdev); + + if (txgapk->read_txgain == 0) { + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[TXGAPK] txgapk->read_txgain == 0 return!!!\n"); + return; + } + + if (rtwdev->efuse.power_track_type >= 4 && + rtwdev->efuse.power_track_type <= 7) { + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[TXGAPK] Normal Mode in TSSI mode. return!!!\n"); + return; + } + + rtw8822c_txgapk_backup_bb_reg(rtwdev, bb_reg, + bb_reg_backup, ARRAY_SIZE(bb_reg)); + rtw8822c_txgapk_tx_pause(rtwdev); + for (path = 0; path < rtwdev->hal.rf_path_num; path++) { + txgapk->channel = rtw_read_rf(rtwdev, path, + RF_CFGCH, RFREG_MASK) & MASKBYTE0; + rtw8822c_txgapk_bb_dpk(rtwdev, path); + rtw8822c_txgapk_afe_dpk(rtwdev, path); + rtw8822c_txgapk_calculate_offset(rtwdev, path); + rtw8822c_txgapk_rf_restore(rtwdev, path); + rtw8822c_txgapk_afe_dpk_restore(rtwdev, path); + rtw8822c_txgapk_bb_dpk_restore(rtwdev, path); + } + rtw8822c_txgapk_write_tx_gain(rtwdev); + rtw8822c_txgapk_reload_bb_reg(rtwdev, bb_reg, + bb_reg_backup, ARRAY_SIZE(bb_reg)); +} + +static void rtw8822c_do_gapk(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm = &rtwdev->dm_info; + + if (dm->dm_flags & BIT(RTW_DM_CAP_TXGAPK)) { + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] feature disable!!!\n"); + return; + } + rtw8822c_rfk_handshake(rtwdev, true); + rtw8822c_txgapk(rtwdev); + rtw8822c_rfk_handshake(rtwdev, false); } static void rtw8822c_rf_init(struct rtw_dev *rtwdev) @@ -1126,6 +1832,7 @@ static void rtw8822c_pwrtrack_init(struct rtw_dev *rtwdev) dm_info->pwr_trk_triggered = false; dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k; + dm_info->thermal_meter_lck = rtwdev->efuse.thermal_meter_k; } static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev) @@ -1396,6 +2103,15 @@ static int rtw8822c_mac_init(struct rtw_dev *rtwdev) return 0; } +static void rtw8822c_dump_fw_crash(struct rtw_dev *rtwdev) +{ + rtw_dump_reg(rtwdev, 0x0, 0x2000, "rtw8822c reg_"); + rtw_dump_fw(rtwdev, OCPBASE_DMEM_88XX, 0x10000, "rtw8822c DMEM_"); + rtw_dump_fw(rtwdev, OCPBASE_IMEM_88XX, 0x10000, "rtw8822c IMEM_"); + rtw_dump_fw(rtwdev, OCPBASE_EMEM_88XX, 0x20000, "rtw8822c EMEM_"); + rtw_dump_fw(rtwdev, OCPBASE_ROM_88XX, 0x10000, "rtw8822c ROM_"); +} + static void rtw8822c_rstb_3wire(struct rtw_dev *rtwdev, bool enable) { if (enable) { @@ -1856,6 +2572,7 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status, } dm_info->rx_evm_dbm[path] = evm_dbm; } + rtw_phy_parsing_cfo(rtwdev, pkt_stat); } static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status, @@ -1911,6 +2628,7 @@ static void rtw8822c_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc, hdr = (struct ieee80211_hdr *)(rx_desc + desc_sz + pkt_stat->shift + pkt_stat->drv_info_sz); + pkt_stat->hdr = hdr; if (pkt_stat->phy_status) { phy_status = rx_desc + desc_sz + pkt_stat->shift; query_phy_status(rtwdev, phy_status, pkt_stat); @@ -2108,6 +2826,26 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev) rtw_write32_set(rtwdev, REG_RX_BREAK, BIT_COM_RX_GCK_EN); } +static void rtw8822c_do_lck(struct rtw_dev *rtwdev) +{ + u32 val; + + rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_CTRL, RFREG_MASK, 0x80010); + rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_PFD, RFREG_MASK, 0x1F0FA); + fsleep(1); + rtw_write_rf(rtwdev, RF_PATH_A, RF_AAC_CTRL, RFREG_MASK, 0x80000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_AAC, RFREG_MASK, 0x80001); + read_poll_timeout(rtw_read_rf, val, val != 0x1, 1000, 100000, + true, rtwdev, RF_PATH_A, RF_AAC_CTRL, 0x1000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_PFD, RFREG_MASK, 0x1F0F8); + rtw_write_rf(rtwdev, RF_PATH_B, RF_SYN_CTRL, RFREG_MASK, 0x80010); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x0f000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x4f000); + fsleep(1); + rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x0f000); +} + static void rtw8822c_do_iqk(struct rtw_dev *rtwdev) { struct rtw_iqk_para para = {0}; @@ -2513,9 +3251,9 @@ static void rtw8822c_dpk_pre_setting(struct rtw_dev *rtwdev) rtw_write_rf(rtwdev, path, RF_RXAGC_OFFSET, RFREG_MASK, 0x0); rtw_write32(rtwdev, REG_NCTL0, 0x8 | (path << 1)); if (rtwdev->dm_info.dpk_info.dpk_band == RTW_BAND_2G) - rtw_write32(rtwdev, REG_DPD_LUT3, 0x1f100000); + rtw_write32(rtwdev, REG_DPD_CTL1_S1, 0x1f100000); else - rtw_write32(rtwdev, REG_DPD_LUT3, 0x1f0d0000); + rtw_write32(rtwdev, REG_DPD_CTL1_S1, 0x1f0d0000); rtw_write32_mask(rtwdev, REG_DPD_LUT0, BIT_GLOSS_DB, 0x4); rtw_write32_mask(rtwdev, REG_IQK_CTL1, BIT_TX_CFIR, 0x3); } @@ -2533,11 +3271,11 @@ static u32 rtw8822c_dpk_rf_setting(struct rtw_dev *rtwdev, u8 path) rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_TX_GAIN, 0x1); rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_PWR_TRIM, 0x1); - rtw_write_rf(rtwdev, path, RF_TX_GAIN_OFFSET, BIT_TX_OFFSET_VAL, 0x0); + rtw_write_rf(rtwdev, path, RF_TX_GAIN_OFFSET, BIT_BB_GAIN, 0x0); rtw_write_rf(rtwdev, path, RF_TX_GAIN, RFREG_MASK, ori_txbb); if (rtwdev->dm_info.dpk_info.dpk_band == RTW_BAND_2G) { - rtw_write_rf(rtwdev, path, RF_TX_GAIN_OFFSET, BIT_LB_ATT, 0x1); + rtw_write_rf(rtwdev, path, RF_TX_GAIN_OFFSET, BIT_RF_GAIN, 0x1); rtw_write_rf(rtwdev, path, RF_RXG_GAIN, BIT_RXG_GAIN, 0x0); } else { rtw_write_rf(rtwdev, path, RF_TXA_LB_SW, BIT_TXA_LB_ATT, 0x0); @@ -3284,9 +4022,9 @@ static void rtw8822c_dpk_reload_data(struct rtw_dev *rtwdev) rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, 0x8 | (path << 1)); if (dpk_info->dpk_band == RTW_BAND_2G) - rtw_write32(rtwdev, REG_DPD_LUT3, 0x1f100000); + rtw_write32(rtwdev, REG_DPD_CTL1_S1, 0x1f100000); else - rtw_write32(rtwdev, REG_DPD_LUT3, 0x1f0d0000); + rtw_write32(rtwdev, REG_DPD_CTL1_S1, 0x1f0d0000); rtw_write8(rtwdev, REG_DPD_AGC, dpk_info->dpk_txagc[path]); @@ -3370,8 +4108,11 @@ static void rtw8822c_do_dpk(struct rtw_dev *rtwdev) static void rtw8822c_phy_calibration(struct rtw_dev *rtwdev) { + rtw8822c_rfk_power_save(rtwdev, false); + rtw8822c_do_gapk(rtwdev); rtw8822c_do_iqk(rtwdev); rtw8822c_do_dpk(rtwdev); + rtw8822c_rfk_power_save(rtwdev, true); } static void rtw8822c_dpk_track(struct rtw_dev *rtwdev) @@ -3406,6 +4147,128 @@ static void rtw8822c_dpk_track(struct rtw_dev *rtwdev) } } +#define XCAP_EXTEND(val) ({typeof(val) _v = (val); _v | _v << 7; }) +static void rtw8822c_set_crystal_cap_reg(struct rtw_dev *rtwdev, u8 crystal_cap) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_cfo_track *cfo = &dm_info->cfo_track; + u32 val = 0; + + val = XCAP_EXTEND(crystal_cap); + cfo->crystal_cap = crystal_cap; + rtw_write32_mask(rtwdev, REG_ANAPAR_XTAL_0, BIT_XCAP_0, val); +} + +static void rtw8822c_set_crystal_cap(struct rtw_dev *rtwdev, u8 crystal_cap) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_cfo_track *cfo = &dm_info->cfo_track; + + if (cfo->crystal_cap == crystal_cap) + return; + + rtw8822c_set_crystal_cap_reg(rtwdev, crystal_cap); +} + +static void rtw8822c_cfo_tracking_reset(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_cfo_track *cfo = &dm_info->cfo_track; + + cfo->is_adjust = true; + + if (cfo->crystal_cap > rtwdev->efuse.crystal_cap) + rtw8822c_set_crystal_cap(rtwdev, cfo->crystal_cap - 1); + else if (cfo->crystal_cap < rtwdev->efuse.crystal_cap) + rtw8822c_set_crystal_cap(rtwdev, cfo->crystal_cap + 1); +} + +static void rtw8822c_cfo_init(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_cfo_track *cfo = &dm_info->cfo_track; + + cfo->crystal_cap = rtwdev->efuse.crystal_cap; + cfo->is_adjust = true; +} + +#define REPORT_TO_KHZ(val) ({typeof(val) _v = (val); (_v << 1) + (_v >> 1); }) +static s32 rtw8822c_cfo_calc_avg(struct rtw_dev *rtwdev, u8 path_num) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_cfo_track *cfo = &dm_info->cfo_track; + s32 cfo_avg, cfo_path_sum = 0, cfo_rpt_sum; + u8 i; + + for (i = 0; i < path_num; i++) { + cfo_rpt_sum = REPORT_TO_KHZ(cfo->cfo_tail[i]); + + if (cfo->cfo_cnt[i]) + cfo_avg = cfo_rpt_sum / cfo->cfo_cnt[i]; + else + cfo_avg = 0; + + cfo_path_sum += cfo_avg; + } + + for (i = 0; i < path_num; i++) { + cfo->cfo_tail[i] = 0; + cfo->cfo_cnt[i] = 0; + } + + return cfo_path_sum / path_num; +} + +static void rtw8822c_cfo_need_adjust(struct rtw_dev *rtwdev, s32 cfo_avg) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_cfo_track *cfo = &dm_info->cfo_track; + + if (!cfo->is_adjust) { + if (abs(cfo_avg) > CFO_TRK_ENABLE_TH) + cfo->is_adjust = true; + } else { + if (abs(cfo_avg) <= CFO_TRK_STOP_TH) + cfo->is_adjust = false; + } + + if (!rtw_coex_disabled(rtwdev)) { + cfo->is_adjust = false; + rtw8822c_set_crystal_cap(rtwdev, rtwdev->efuse.crystal_cap); + } +} + +static void rtw8822c_cfo_track(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_cfo_track *cfo = &dm_info->cfo_track; + u8 path_num = rtwdev->hal.rf_path_num; + s8 crystal_cap = cfo->crystal_cap; + s32 cfo_avg = 0; + + if (rtwdev->sta_cnt != 1) { + rtw8822c_cfo_tracking_reset(rtwdev); + return; + } + + if (cfo->packet_count == cfo->packet_count_pre) + return; + + cfo->packet_count_pre = cfo->packet_count; + cfo_avg = rtw8822c_cfo_calc_avg(rtwdev, path_num); + rtw8822c_cfo_need_adjust(rtwdev, cfo_avg); + + if (cfo->is_adjust) { + if (cfo_avg > CFO_TRK_ADJ_TH) + crystal_cap++; + else if (cfo_avg < -CFO_TRK_ADJ_TH) + crystal_cap--; + + crystal_cap = clamp_t(s8, crystal_cap, 0, XCAP_MASK); + rtw8822c_set_crystal_cap(rtwdev, (u8)crystal_cap); + } +} + static const struct rtw_phy_cck_pd_reg rtw8822c_cck_pd_reg[RTW_CHANNEL_WIDTH_40 + 1][RTW_RF_PATH_MAX] = { { @@ -3538,11 +4401,12 @@ static void __rtw8822c_pwr_track(struct rtw_dev *rtwdev) rtw_phy_config_swing_table(rtwdev, &swing_table); + if (rtw_phy_pwrtrack_need_lck(rtwdev)) + rtw8822c_do_lck(rtwdev); + for (i = 0; i < rtwdev->hal.rf_path_num; i++) rtw8822c_pwr_track_path(rtwdev, &swing_table, i); - if (rtw_phy_pwrtrack_need_iqk(rtwdev)) - rtw8822c_do_iqk(rtwdev); } static void rtw8822c_pwr_track(struct rtw_dev *rtwdev) @@ -3971,6 +4835,7 @@ static struct rtw_chip_ops rtw8822c_ops = { .query_rx_desc = rtw8822c_query_rx_desc, .set_channel = rtw8822c_set_channel, .mac_init = rtw8822c_mac_init, + .dump_fw_crash = rtw8822c_dump_fw_crash, .read_rf = rtw_phy_read_rf, .write_rf = rtw_phy_write_rf_reg_mix, .set_tx_power_index = rtw8822c_set_tx_power_index, @@ -3984,6 +4849,8 @@ static struct rtw_chip_ops rtw8822c_ops = { .config_bfee = rtw8822c_bf_config_bfee, .set_gid_table = rtw_bf_set_gid_table, .cfg_csi_rate = rtw_bf_cfg_csi_rate, + .cfo_init = rtw8822c_cfo_init, + .cfo_track = rtw8822c_cfo_track, .coex_set_init = rtw8822c_coex_cfg_init, .coex_set_ant_switch = NULL, @@ -4351,6 +5218,7 @@ struct rtw_chip_info rtw8822c_hw_spec = { .dpd_ratemask = DIS_DPD_RATEALL, .pwr_track_tbl = &rtw8822c_rtw_pwr_track_tbl, .iqk_threshold = 8, + .lck_threshold = 8, .bfer_su_max_num = 2, .bfer_mu_max_num = 1, .rx_ldpc = true, @@ -4360,7 +5228,7 @@ struct rtw_chip_info rtw8822c_hw_spec = { .wowlan_stub = &rtw_wowlan_stub_8822c, .max_sched_scan_ssids = 4, #endif - .coex_para_ver = 0x201029, + .coex_para_ver = 0x2103181c, .bt_desired_ver = 0x1c, .scbd_support = true, .new_scbd10_def = true, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h index bb2495b8609e..364afc6d851b 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h @@ -164,175 +164,248 @@ const struct rtw_table name ## _tbl = { \ #define REG_ANAPARLDO_POW_MAC 0x0029 #define BIT_LDOE25_PON BIT(0) +#define XCAP_MASK GENMASK(6, 0) +#define CFO_TRK_ENABLE_TH 20 +#define CFO_TRK_STOP_TH 10 +#define CFO_TRK_ADJ_TH 10 -#define REG_TXDFIR0 0x808 -#define REG_DFIRBW 0x810 -#define REG_ANTMAP0 0x820 -#define REG_ANTMAP 0x824 -#define REG_DYMPRITH 0x86c -#define REG_DYMENTH0 0x870 -#define REG_DYMENTH 0x874 -#define REG_SBD 0x88c +#define REG_TXDFIR0 0x808 +#define REG_DFIRBW 0x810 +#define REG_ANTMAP0 0x820 +#define BIT_ANT_PATH GENMASK(1, 0) +#define REG_ANTMAP 0x824 +#define REG_DYMPRITH 0x86c +#define REG_DYMENTH0 0x870 +#define REG_DYMENTH 0x874 +#define REG_SBD 0x88c #define BITS_SUBTUNE GENMASK(15, 12) -#define REG_DYMTHMIN 0x8a4 -#define REG_TXBWCTL 0x9b0 -#define REG_TXCLK 0x9b4 -#define REG_SCOTRK 0xc30 -#define REG_MRCM 0xc38 -#define REG_AGCSWSH 0xc44 -#define REG_ANTWTPD 0xc54 -#define REG_PT_CHSMO 0xcbc +#define REG_DYMTHMIN 0x8a4 + +#define REG_TXBWCTL 0x9b0 +#define REG_TXCLK 0x9b4 + +#define REG_SCOTRK 0xc30 +#define REG_MRCM 0xc38 +#define REG_AGCSWSH 0xc44 +#define REG_ANTWTPD 0xc54 +#define REG_PT_CHSMO 0xcbc #define BIT_PT_OPT BIT(21) -#define REG_ORITXCODE 0x1800 -#define REG_3WIRE 0x180c + +#define REG_ORITXCODE 0x1800 +#define BIT_PATH_EN BIT(31) +#define REG_3WIRE 0x180c +#define BIT_DIS_SHARERX_TXGAT BIT(27) #define BIT_3WIRE_TX_EN BIT(0) #define BIT_3WIRE_RX_EN BIT(1) +#define BIT_3WIRE_EN GENMASK(1, 0) #define BIT_3WIRE_PI_ON BIT(28) -#define REG_ANAPAR_A 0x1830 +#define REG_ANAPAR_A 0x1830 #define BIT_ANAPAR_UPDATE BIT(29) -#define REG_RXAGCCTL0 0x18ac +#define REG_RFTXEN_GCK_A 0x1864 +#define BIT_RFTXEN_GCK_FORCE_ON BIT(31) +#define REG_DIS_SHARE_RX_A 0x186c +#define BIT_TX_SCALE_0DB BIT(7) +#define REG_RXAGCCTL0 0x18ac #define BITS_RXAGC_CCK GENMASK(15, 12) #define BITS_RXAGC_OFDM GENMASK(8, 4) -#define REG_DCKA_I_0 0x18bc -#define REG_DCKA_I_1 0x18c0 -#define REG_DCKA_Q_0 0x18d8 -#define REG_DCKA_Q_1 0x18dc -#define REG_CCKSB 0x1a00 -#define REG_RXCCKSEL 0x1a04 -#define REG_BGCTRL 0x1a14 +#define REG_DCKA_I_0 0x18bc +#define REG_DCKA_I_1 0x18c0 +#define REG_DCKA_Q_0 0x18d8 +#define REG_DCKA_Q_1 0x18dc + +#define REG_CCKSB 0x1a00 +#define BIT_BBMODE GENMASK(2, 1) +#define REG_RXCCKSEL 0x1a04 +#define REG_BGCTRL 0x1a14 #define BITS_RX_IQ_WEIGHT (BIT(8) | BIT(9)) -#define REG_TXF0 0x1a20 -#define REG_TXF1 0x1a24 -#define REG_TXF2 0x1a28 -#define REG_CCANRX 0x1a2c +#define REG_TXF0 0x1a20 +#define REG_TXF1 0x1a24 +#define REG_TXF2 0x1a28 +#define REG_CCANRX 0x1a2c #define BIT_CCK_FA_RST (BIT(14) | BIT(15)) #define BIT_OFDM_FA_RST (BIT(12) | BIT(13)) -#define REG_CCK_FACNT 0x1a5c -#define REG_CCKTXONLY 0x1a80 +#define REG_CCK_FACNT 0x1a5c +#define REG_CCKTXONLY 0x1a80 #define BIT_BB_CCK_CHECK_EN BIT(18) -#define REG_TXF3 0x1a98 -#define REG_TXF4 0x1a9c -#define REG_TXF5 0x1aa0 -#define REG_TXF6 0x1aac -#define REG_TXF7 0x1ab0 -#define REG_CCK_SOURCE 0x1abc +#define REG_TXF3 0x1a98 +#define REG_TXF4 0x1a9c +#define REG_TXF5 0x1aa0 +#define REG_TXF6 0x1aac +#define REG_TXF7 0x1ab0 +#define REG_CCK_SOURCE 0x1abc #define BIT_NBI_EN BIT(30) -#define REG_IQKSTAT 0x1b10 -#define REG_TXANT 0x1c28 -#define REG_ENCCK 0x1c3c -#define BIT_CCK_BLK_EN BIT(1) -#define BIT_CCK_OFDM_BLK_EN (BIT(0) | BIT(1)) -#define REG_CCAMSK 0x1c80 -#define REG_RSTB 0x1c90 -#define BIT_RSTB_3WIRE BIT(8) -#define REG_RX_BREAK 0x1d2c -#define BIT_COM_RX_GCK_EN BIT(31) -#define REG_RXFNCTL 0x1d30 -#define REG_RXIGI 0x1d70 -#define REG_ENFN 0x1e24 -#define REG_TXANTSEG 0x1e28 -#define REG_TXLGMAP 0x1e2c -#define REG_CCKPATH 0x1e5c -#define REG_CNT_CTRL 0x1eb4 -#define BIT_ALL_CNT_RST BIT(25) -#define REG_OFDM_FACNT 0x2d00 -#define REG_OFDM_FACNT1 0x2d04 -#define REG_OFDM_FACNT2 0x2d08 -#define REG_OFDM_FACNT3 0x2d0c -#define REG_OFDM_FACNT4 0x2d10 -#define REG_OFDM_FACNT5 0x2d20 -#define REG_RPT_CIP 0x2d9c -#define REG_OFDM_TXCNT 0x2de0 -#define REG_ORITXCODE2 0x4100 -#define REG_3WIRE2 0x410c -#define REG_ANAPAR_B 0x4130 -#define REG_RXAGCCTL 0x41ac -#define REG_DCKB_I_0 0x41bc -#define REG_DCKB_I_1 0x41c0 -#define REG_DCKB_Q_0 0x41d8 -#define REG_DCKB_Q_1 0x41dc - -#define RF_MODE_TRXAGC 0x00 -#define RF_RXAGC_OFFSET 0x19 -#define RF_BW_TRXBB 0x1a -#define RF_TX_GAIN_OFFSET 0x55 -#define RF_TX_GAIN 0x56 -#define RF_TXA_LB_SW 0x63 -#define RF_RXG_GAIN 0x87 -#define RF_RXA_MIX_GAIN 0x8a -#define RF_EXT_TIA_BW 0x8f -#define RF_DEBUG 0xde #define REG_NCTL0 0x1b00 +#define BIT_SEL_PATH GENMASK(2, 1) +#define BIT_SUBPAGE GENMASK(3, 0) #define REG_DPD_CTL0_S0 0x1b04 +#define BIT_GS_PWSF GENMASK(27, 0) #define REG_DPD_CTL1_S0 0x1b08 +#define BIT_DPD_EN BIT(31) +#define BIT_PS_EN BIT(7) +#define REG_IQKSTAT 0x1b10 #define REG_IQK_CTL1 0x1b20 +#define BIT_TX_CFIR GENMASK(31, 30) +#define BIT_CFIR_EN GENMASK(26, 24) +#define BIT_BYPASS_DPD BIT(25) + +#define REG_TX_TONE_IDX 0x1b2c #define REG_DPD_LUT0 0x1b44 +#define BIT_GLOSS_DB GENMASK(14, 12) #define REG_DPD_CTL0_S1 0x1b5c -#define REG_DPD_LUT3 0x1b60 #define REG_DPD_CTL1_S1 0x1b60 #define REG_DPD_AGC 0x1b67 +#define REG_TABLE_SEL 0x1b98 +#define BIT_I_GAIN GENMASK(19, 16) +#define BIT_GAIN_RST BIT(15) +#define BIT_Q_GAIN_SEL GENMASK(14, 12) +#define BIT_Q_GAIN GENMASK(11, 0) +#define REG_TX_GAIN_SET 0x1b9c +#define BIT_GAPK_RPT_IDX GENMASK(11, 8) #define REG_DPD_CTL0 0x1bb4 +#define REG_SINGLE_TONE_SW 0x1bb8 +#define BIT_IRQ_TEST_MODE BIT(20) #define REG_R_CONFIG 0x1bcc +#define BIT_INNER_LB BIT(21) +#define BIT_IQ_SWITCH GENMASK(5, 0) +#define BIT_2G_SWING 0x2d +#define BIT_5G_SWING 0x36 #define REG_RXSRAM_CTL 0x1bd4 +#define BIT_RPT_EN BIT(21) +#define BIT_RPT_SEL GENMASK(20, 16) +#define BIT_DPD_CLK GENMASK(7, 4) #define REG_DPD_CTL11 0x1be4 #define REG_DPD_CTL12 0x1be8 #define REG_DPD_CTL15 0x1bf4 #define REG_DPD_CTL16 0x1bf8 #define REG_STAT_RPT 0x1bfc +#define BIT_RPT_DGAIN GENMASK(27, 16) +#define BIT_GAPK_RPT0 GENMASK(3, 0) +#define BIT_GAPK_RPT1 GENMASK(7, 4) +#define BIT_GAPK_RPT2 GENMASK(11, 8) +#define BIT_GAPK_RPT3 GENMASK(15, 12) +#define BIT_GAPK_RPT4 GENMASK(19, 16) +#define BIT_GAPK_RPT5 GENMASK(23, 20) +#define BIT_GAPK_RPT6 GENMASK(27, 24) +#define BIT_GAPK_RPT7 GENMASK(31, 28) + +#define REG_TXANT 0x1c28 +#define REG_IQK_CTRL 0x1c38 +#define REG_ENCCK 0x1c3c +#define BIT_CCK_BLK_EN BIT(1) +#define BIT_CCK_OFDM_BLK_EN (BIT(0) | BIT(1)) +#define REG_CCAMSK 0x1c80 +#define REG_RSTB 0x1c90 +#define BIT_RSTB_3WIRE BIT(8) +#define REG_CH_DELAY_EXTR2 0x1cd0 +#define BIT_TST_IQK2SET_SRC BIT(31) +#define BIT_EN_IOQ_IQK_DPK BIT(30) +#define BIT_IQK_DPK_RESET_SRC BIT(29) +#define BIT_IQK_DPK_CLOCK_SRC BIT(28) + +#define REG_RX_BREAK 0x1d2c +#define BIT_COM_RX_GCK_EN BIT(31) +#define REG_RXFNCTL 0x1d30 +#define REG_CCA_OFF 0x1d58 +#define BIT_CCA_ON_BY_PW GENMASK(11, 3) +#define REG_RXIGI 0x1d70 + +#define REG_ENFN 0x1e24 +#define BIT_IQK_DPK_EN BIT(17) +#define REG_TXANTSEG 0x1e28 +#define BIT_ANTSEG GENMASK(3, 0) +#define REG_TXLGMAP 0x1e2c +#define REG_CCKPATH 0x1e5c +#define REG_TX_FIFO 0x1e70 +#define BIT_STOP_TX GENMASK(3, 0) +#define REG_CNT_CTRL 0x1eb4 +#define BIT_ALL_CNT_RST BIT(25) + +#define REG_OFDM_FACNT 0x2d00 +#define REG_OFDM_FACNT1 0x2d04 +#define REG_OFDM_FACNT2 0x2d08 +#define REG_OFDM_FACNT3 0x2d0c +#define REG_OFDM_FACNT4 0x2d10 +#define REG_OFDM_FACNT5 0x2d20 +#define REG_RPT_CIP 0x2d9c +#define BIT_RPT_CIP_STATUS GENMASK(7, 0) +#define REG_OFDM_TXCNT 0x2de0 +#define REG_ORITXCODE2 0x4100 +#define REG_3WIRE2 0x410c +#define REG_ANAPAR_B 0x4130 +#define REG_RFTXEN_GCK_B 0x4164 +#define REG_DIS_SHARE_RX_B 0x416c #define BIT_EXT_TIA_BW BIT(1) -#define BIT_DE_TRXBW BIT(2) -#define BIT_DE_TX_GAIN BIT(16) -#define BIT_RXG_GAIN BIT(18) -#define BIT_DE_PWR_TRIM BIT(19) -#define BIT_INNER_LB BIT(21) -#define BIT_BYPASS_DPD BIT(25) -#define BIT_DPD_EN BIT(31) -#define BIT_SUBPAGE GENMASK(3, 0) +#define REG_RXAGCCTL 0x41ac +#define REG_DCKB_I_0 0x41bc +#define REG_DCKB_I_1 0x41c0 +#define REG_DCKB_Q_0 0x41d8 +#define REG_DCKB_Q_1 0x41dc + +#define RF_MODE_TRXAGC 0x00 +#define BIT_RF_MODE GENMASK(19, 16) +#define BIT_RXAGC GENMASK(9, 5) #define BIT_TXAGC GENMASK(4, 0) +#define RF_RXAGC_OFFSET 0x19 +#define RF_BW_TRXBB 0x1a +#define BIT_TX_CCK_IND BIT(16) +#define BIT_BW_TXBB GENMASK(14, 12) +#define BIT_BW_RXBB GENMASK(11, 10) +#define BIT_DBG_CCK_CCA BIT(1) +#define RF_TX_GAIN_OFFSET 0x55 +#define BIT_BB_GAIN GENMASK(18, 14) +#define BIT_RF_GAIN GENMASK(4, 2) +#define RF_TX_GAIN 0x56 #define BIT_GAIN_TXBB GENMASK(4, 0) +#define RF_IDAC 0x58 +#define BIT_TX_MODE GENMASK(19, 8) +#define RF_TX_RESULT 0x5f +#define BIT_GAIN_TX_PAD_H GENMASK(11, 8) +#define BIT_GAIN_TX_PAD_L GENMASK(7, 4) +#define RF_PA 0x60 +#define RF_PABIAS_2G_MASK GENMASK(15, 12) +#define RF_PABIAS_5G_MASK GENMASK(19, 16) +#define RF_TXA_LB_SW 0x63 +#define BIT_TXA_LB_ATT GENMASK(15, 14) +#define BIT_LB_SW GENMASK(13, 12) #define BIT_LB_ATT GENMASK(4, 2) +#define RF_RXG_GAIN 0x87 +#define BIT_RXG_GAIN BIT(18) +#define RF_RXA_MIX_GAIN 0x8a #define BIT_RXA_MIX_GAIN GENMASK(4, 3) -#define BIT_IQ_SWITCH GENMASK(5, 0) -#define BIT_DPD_CLK GENMASK(7, 4) -#define BIT_RXAGC GENMASK(9, 5) -#define BIT_BW_RXBB GENMASK(11, 10) -#define BIT_LB_SW GENMASK(13, 12) -#define BIT_BW_TXBB GENMASK(14, 12) -#define BIT_GLOSS_DB GENMASK(14, 12) -#define BIT_TXA_LB_ATT GENMASK(15, 14) -#define BIT_TX_OFFSET_VAL GENMASK(18, 14) -#define BIT_RPT_SEL GENMASK(20, 16) -#define BIT_GS_PWSF GENMASK(27, 0) -#define BIT_RPT_DGAIN GENMASK(27, 16) -#define BIT_TX_CFIR GENMASK(31, 30) - -#define PPG_THERMAL_A 0x1ef -#define PPG_THERMAL_B 0x1b0 -#define RF_THEMAL_MASK GENMASK(19, 16) -#define PPG_2GL_TXAB 0x1d4 -#define PPG_2GM_TXAB 0x1ee -#define PPG_2GH_TXAB 0x1d2 -#define PPG_2G_A_MASK GENMASK(3, 0) -#define PPG_2G_B_MASK GENMASK(7, 4) -#define PPG_5GL1_TXA 0x1ec -#define PPG_5GL2_TXA 0x1e8 -#define PPG_5GM1_TXA 0x1e4 -#define PPG_5GM2_TXA 0x1e0 -#define PPG_5GH1_TXA 0x1dc -#define PPG_5GL1_TXB 0x1eb -#define PPG_5GL2_TXB 0x1e7 -#define PPG_5GM1_TXB 0x1e3 -#define PPG_5GM2_TXB 0x1df -#define PPG_5GH1_TXB 0x1db -#define PPG_5G_MASK GENMASK(4, 0) -#define PPG_PABIAS_2GA 0x1d6 -#define PPG_PABIAS_2GB 0x1d5 -#define PPG_PABIAS_5GA 0x1d8 -#define PPG_PABIAS_5GB 0x1d7 -#define PPG_PABIAS_MASK GENMASK(3, 0) -#define RF_PABIAS_2G_MASK GENMASK(15, 12) -#define RF_PABIAS_5G_MASK GENMASK(19, 16) +#define RF_EXT_TIA_BW 0x8f +#define BIT_PW_EXT_TIA BIT(1) +#define RF_DIS_BYPASS_TXBB 0x9e +#define BIT_TXBB BIT(10) +#define BIT_TIA_BYPASS BIT(5) +#define RF_DEBUG 0xde +#define BIT_DE_PWR_TRIM BIT(19) +#define BIT_DE_TX_GAIN BIT(16) +#define BIT_DE_TRXBW BIT(2) +#define PPG_THERMAL_B 0x1b0 +#define RF_THEMAL_MASK GENMASK(19, 16) +#define PPG_2GH_TXAB 0x1d2 +#define PPG_2G_A_MASK GENMASK(3, 0) +#define PPG_2G_B_MASK GENMASK(7, 4) +#define PPG_2GL_TXAB 0x1d4 +#define PPG_PABIAS_2GB 0x1d5 +#define PPG_PABIAS_2GA 0x1d6 +#define PPG_PABIAS_MASK GENMASK(3, 0) +#define PPG_PABIAS_5GB 0x1d7 +#define PPG_PABIAS_5GA 0x1d8 +#define PPG_5G_MASK GENMASK(4, 0) +#define PPG_5GH1_TXB 0x1db +#define PPG_5GH1_TXA 0x1dc +#define PPG_5GM2_TXB 0x1df +#define PPG_5GM2_TXA 0x1e0 +#define PPG_5GM1_TXB 0x1e3 +#define PPG_5GM1_TXA 0x1e4 +#define PPG_5GL2_TXB 0x1e7 +#define PPG_5GL2_TXA 0x1e8 +#define PPG_5GL1_TXB 0x1eb +#define PPG_5GL1_TXA 0x1ec +#define PPG_2GM_TXAB 0x1ee +#define PPG_THERMAL_A 0x1ef #endif diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c index ad5715c65de3..822f3da91f1b 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c @@ -40863,7 +40863,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 8, 1, 0, 1, 144, 76, }, { 9, 1, 0, 1, 144, 127, }, { 0, 1, 0, 1, 149, 76, }, - { 2, 1, 0, 1, 149, -128, }, + { 2, 1, 0, 1, 149, 54, }, { 1, 1, 0, 1, 149, 127, }, { 3, 1, 0, 1, 149, 76, }, { 4, 1, 0, 1, 149, 74, }, @@ -40871,9 +40871,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 1, 149, 76, }, { 7, 1, 0, 1, 149, 54, }, { 8, 1, 0, 1, 149, 76, }, - { 9, 1, 0, 1, 149, -128, }, + { 9, 1, 0, 1, 149, 54, }, { 0, 1, 0, 1, 153, 76, }, - { 2, 1, 0, 1, 153, -128, }, + { 2, 1, 0, 1, 153, 54, }, { 1, 1, 0, 1, 153, 127, }, { 3, 1, 0, 1, 153, 76, }, { 4, 1, 0, 1, 153, 74, }, @@ -40881,9 +40881,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 1, 153, 76, }, { 7, 1, 0, 1, 153, 54, }, { 8, 1, 0, 1, 153, 76, }, - { 9, 1, 0, 1, 153, -128, }, + { 9, 1, 0, 1, 153, 54, }, { 0, 1, 0, 1, 157, 76, }, - { 2, 1, 0, 1, 157, -128, }, + { 2, 1, 0, 1, 157, 54, }, { 1, 1, 0, 1, 157, 127, }, { 3, 1, 0, 1, 157, 76, }, { 4, 1, 0, 1, 157, 74, }, @@ -40891,9 +40891,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 1, 157, 76, }, { 7, 1, 0, 1, 157, 54, }, { 8, 1, 0, 1, 157, 76, }, - { 9, 1, 0, 1, 157, -128, }, + { 9, 1, 0, 1, 157, 54, }, { 0, 1, 0, 1, 161, 76, }, - { 2, 1, 0, 1, 161, -128, }, + { 2, 1, 0, 1, 161, 54, }, { 1, 1, 0, 1, 161, 127, }, { 3, 1, 0, 1, 161, 76, }, { 4, 1, 0, 1, 161, 74, }, @@ -40901,9 +40901,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 1, 161, 76, }, { 7, 1, 0, 1, 161, 54, }, { 8, 1, 0, 1, 161, 76, }, - { 9, 1, 0, 1, 161, -128, }, + { 9, 1, 0, 1, 161, 54, }, { 0, 1, 0, 1, 165, 76, }, - { 2, 1, 0, 1, 165, -128, }, + { 2, 1, 0, 1, 165, 54, }, { 1, 1, 0, 1, 165, 127, }, { 3, 1, 0, 1, 165, 76, }, { 4, 1, 0, 1, 165, 74, }, @@ -40911,7 +40911,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 1, 165, 76, }, { 7, 1, 0, 1, 165, 54, }, { 8, 1, 0, 1, 165, 76, }, - { 9, 1, 0, 1, 165, -128, }, + { 9, 1, 0, 1, 165, 54, }, { 0, 1, 0, 2, 36, 72, }, { 2, 1, 0, 2, 36, 62, }, { 1, 1, 0, 2, 36, 62, }, @@ -41113,7 +41113,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 8, 1, 0, 2, 144, 76, }, { 9, 1, 0, 2, 144, 127, }, { 0, 1, 0, 2, 149, 76, }, - { 2, 1, 0, 2, 149, -128, }, + { 2, 1, 0, 2, 149, 54, }, { 1, 1, 0, 2, 149, 127, }, { 3, 1, 0, 2, 149, 76, }, { 4, 1, 0, 2, 149, 74, }, @@ -41121,9 +41121,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 2, 149, 76, }, { 7, 1, 0, 2, 149, 54, }, { 8, 1, 0, 2, 149, 76, }, - { 9, 1, 0, 2, 149, -128, }, + { 9, 1, 0, 2, 149, 54, }, { 0, 1, 0, 2, 153, 76, }, - { 2, 1, 0, 2, 153, -128, }, + { 2, 1, 0, 2, 153, 54, }, { 1, 1, 0, 2, 153, 127, }, { 3, 1, 0, 2, 153, 76, }, { 4, 1, 0, 2, 153, 74, }, @@ -41131,9 +41131,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 2, 153, 76, }, { 7, 1, 0, 2, 153, 54, }, { 8, 1, 0, 2, 153, 76, }, - { 9, 1, 0, 2, 153, -128, }, + { 9, 1, 0, 2, 153, 54, }, { 0, 1, 0, 2, 157, 76, }, - { 2, 1, 0, 2, 157, -128, }, + { 2, 1, 0, 2, 157, 54, }, { 1, 1, 0, 2, 157, 127, }, { 3, 1, 0, 2, 157, 76, }, { 4, 1, 0, 2, 157, 74, }, @@ -41141,9 +41141,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 2, 157, 76, }, { 7, 1, 0, 2, 157, 54, }, { 8, 1, 0, 2, 157, 76, }, - { 9, 1, 0, 2, 157, -128, }, + { 9, 1, 0, 2, 157, 54, }, { 0, 1, 0, 2, 161, 76, }, - { 2, 1, 0, 2, 161, -128, }, + { 2, 1, 0, 2, 161, 54, }, { 1, 1, 0, 2, 161, 127, }, { 3, 1, 0, 2, 161, 76, }, { 4, 1, 0, 2, 161, 74, }, @@ -41151,9 +41151,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 2, 161, 76, }, { 7, 1, 0, 2, 161, 54, }, { 8, 1, 0, 2, 161, 76, }, - { 9, 1, 0, 2, 161, -128, }, + { 9, 1, 0, 2, 161, 54, }, { 0, 1, 0, 2, 165, 76, }, - { 2, 1, 0, 2, 165, -128, }, + { 2, 1, 0, 2, 165, 54, }, { 1, 1, 0, 2, 165, 127, }, { 3, 1, 0, 2, 165, 76, }, { 4, 1, 0, 2, 165, 74, }, @@ -41161,7 +41161,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 2, 165, 76, }, { 7, 1, 0, 2, 165, 54, }, { 8, 1, 0, 2, 165, 76, }, - { 9, 1, 0, 2, 165, -128, }, + { 9, 1, 0, 2, 165, 54, }, { 0, 1, 0, 3, 36, 68, }, { 2, 1, 0, 3, 36, 38, }, { 1, 1, 0, 3, 36, 50, }, @@ -41363,7 +41363,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 8, 1, 0, 3, 144, 68, }, { 9, 1, 0, 3, 144, 127, }, { 0, 1, 0, 3, 149, 76, }, - { 2, 1, 0, 3, 149, -128, }, + { 2, 1, 0, 3, 149, 30, }, { 1, 1, 0, 3, 149, 127, }, { 3, 1, 0, 3, 149, 76, }, { 4, 1, 0, 3, 149, 60, }, @@ -41371,9 +41371,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 3, 149, 76, }, { 7, 1, 0, 3, 149, 30, }, { 8, 1, 0, 3, 149, 72, }, - { 9, 1, 0, 3, 149, -128, }, + { 9, 1, 0, 3, 149, 30, }, { 0, 1, 0, 3, 153, 76, }, - { 2, 1, 0, 3, 153, -128, }, + { 2, 1, 0, 3, 153, 30, }, { 1, 1, 0, 3, 153, 127, }, { 3, 1, 0, 3, 153, 76, }, { 4, 1, 0, 3, 153, 60, }, @@ -41381,9 +41381,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 3, 153, 76, }, { 7, 1, 0, 3, 153, 30, }, { 8, 1, 0, 3, 153, 76, }, - { 9, 1, 0, 3, 153, -128, }, + { 9, 1, 0, 3, 153, 30, }, { 0, 1, 0, 3, 157, 76, }, - { 2, 1, 0, 3, 157, -128, }, + { 2, 1, 0, 3, 157, 30, }, { 1, 1, 0, 3, 157, 127, }, { 3, 1, 0, 3, 157, 76, }, { 4, 1, 0, 3, 157, 60, }, @@ -41391,9 +41391,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 3, 157, 76, }, { 7, 1, 0, 3, 157, 30, }, { 8, 1, 0, 3, 157, 76, }, - { 9, 1, 0, 3, 157, -128, }, + { 9, 1, 0, 3, 157, 30, }, { 0, 1, 0, 3, 161, 76, }, - { 2, 1, 0, 3, 161, -128, }, + { 2, 1, 0, 3, 161, 30, }, { 1, 1, 0, 3, 161, 127, }, { 3, 1, 0, 3, 161, 76, }, { 4, 1, 0, 3, 161, 60, }, @@ -41401,9 +41401,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 3, 161, 76, }, { 7, 1, 0, 3, 161, 30, }, { 8, 1, 0, 3, 161, 76, }, - { 9, 1, 0, 3, 161, -128, }, + { 9, 1, 0, 3, 161, 30, }, { 0, 1, 0, 3, 165, 76, }, - { 2, 1, 0, 3, 165, -128, }, + { 2, 1, 0, 3, 165, 30, }, { 1, 1, 0, 3, 165, 127, }, { 3, 1, 0, 3, 165, 76, }, { 4, 1, 0, 3, 165, 60, }, @@ -41411,7 +41411,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 3, 165, 76, }, { 7, 1, 0, 3, 165, 30, }, { 8, 1, 0, 3, 165, 76, }, - { 9, 1, 0, 3, 165, -128, }, + { 9, 1, 0, 3, 165, 30, }, { 0, 1, 1, 2, 38, 66, }, { 2, 1, 1, 2, 38, 64, }, { 1, 1, 1, 2, 38, 62, }, @@ -41513,7 +41513,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 8, 1, 1, 2, 142, 72, }, { 9, 1, 1, 2, 142, 127, }, { 0, 1, 1, 2, 151, 72, }, - { 2, 1, 1, 2, 151, -128, }, + { 2, 1, 1, 2, 151, 54, }, { 1, 1, 1, 2, 151, 127, }, { 3, 1, 1, 2, 151, 72, }, { 4, 1, 1, 2, 151, 72, }, @@ -41521,9 +41521,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 1, 2, 151, 72, }, { 7, 1, 1, 2, 151, 54, }, { 8, 1, 1, 2, 151, 72, }, - { 9, 1, 1, 2, 151, -128, }, + { 9, 1, 1, 2, 151, 54, }, { 0, 1, 1, 2, 159, 72, }, - { 2, 1, 1, 2, 159, -128, }, + { 2, 1, 1, 2, 159, 54, }, { 1, 1, 1, 2, 159, 127, }, { 3, 1, 1, 2, 159, 72, }, { 4, 1, 1, 2, 159, 72, }, @@ -41531,7 +41531,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 1, 2, 159, 72, }, { 7, 1, 1, 2, 159, 54, }, { 8, 1, 1, 2, 159, 72, }, - { 9, 1, 1, 2, 159, -128, }, + { 9, 1, 1, 2, 159, 54, }, { 0, 1, 1, 3, 38, 60, }, { 2, 1, 1, 3, 38, 40, }, { 1, 1, 1, 3, 38, 50, }, @@ -41633,7 +41633,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 8, 1, 1, 3, 142, 68, }, { 9, 1, 1, 3, 142, 127, }, { 0, 1, 1, 3, 151, 72, }, - { 2, 1, 1, 3, 151, -128, }, + { 2, 1, 1, 3, 151, 30, }, { 1, 1, 1, 3, 151, 127, }, { 3, 1, 1, 3, 151, 72, }, { 4, 1, 1, 3, 151, 66, }, @@ -41641,9 +41641,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 1, 3, 151, 72, }, { 7, 1, 1, 3, 151, 30, }, { 8, 1, 1, 3, 151, 68, }, - { 9, 1, 1, 3, 151, -128, }, + { 9, 1, 1, 3, 151, 30, }, { 0, 1, 1, 3, 159, 72, }, - { 2, 1, 1, 3, 159, -128, }, + { 2, 1, 1, 3, 159, 30, }, { 1, 1, 1, 3, 159, 127, }, { 3, 1, 1, 3, 159, 72, }, { 4, 1, 1, 3, 159, 66, }, @@ -41651,7 +41651,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 1, 3, 159, 72, }, { 7, 1, 1, 3, 159, 30, }, { 8, 1, 1, 3, 159, 72, }, - { 9, 1, 1, 3, 159, -128, }, + { 9, 1, 1, 3, 159, 30, }, { 0, 1, 2, 4, 42, 64, }, { 2, 1, 2, 4, 42, 64, }, { 1, 1, 2, 4, 42, 64, }, @@ -41703,7 +41703,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 8, 1, 2, 4, 138, 72, }, { 9, 1, 2, 4, 138, 127, }, { 0, 1, 2, 4, 155, 72, }, - { 2, 1, 2, 4, 155, -128, }, + { 2, 1, 2, 4, 155, 54, }, { 1, 1, 2, 4, 155, 127, }, { 3, 1, 2, 4, 155, 72, }, { 4, 1, 2, 4, 155, 68, }, @@ -41711,7 +41711,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 2, 4, 155, 72, }, { 7, 1, 2, 4, 155, 54, }, { 8, 1, 2, 4, 155, 68, }, - { 9, 1, 2, 4, 155, -128, }, + { 9, 1, 2, 4, 155, 54, }, { 0, 1, 2, 5, 42, 54, }, { 2, 1, 2, 5, 42, 40, }, { 1, 1, 2, 5, 42, 50, }, @@ -41763,7 +41763,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 8, 1, 2, 5, 138, 66, }, { 9, 1, 2, 5, 138, 127, }, { 0, 1, 2, 5, 155, 62, }, - { 2, 1, 2, 5, 155, -128, }, + { 2, 1, 2, 5, 155, 30, }, { 1, 1, 2, 5, 155, 127, }, { 3, 1, 2, 5, 155, 62, }, { 4, 1, 2, 5, 155, 58, }, @@ -41771,145 +41771,145 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 2, 5, 155, 62, }, { 7, 1, 2, 5, 155, 30, }, { 8, 1, 2, 5, 155, 62, }, - { 9, 1, 2, 5, 155, -128, }, + { 9, 1, 2, 5, 155, 30, }, }; RTW_DECL_TABLE_TXPWR_LMT(rtw8822c_txpwr_lmt_type0); static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 0, 0, 0, 0, 1, 72, }, - { 2, 0, 0, 0, 1, 60, }, - { 1, 0, 0, 0, 1, 68, }, + { 2, 0, 0, 0, 1, 56, }, + { 1, 0, 0, 0, 1, 72, }, { 3, 0, 0, 0, 1, 72, }, { 4, 0, 0, 0, 1, 76, }, - { 5, 0, 0, 0, 1, 60, }, + { 5, 0, 0, 0, 1, 56, }, { 6, 0, 0, 0, 1, 72, }, { 7, 0, 0, 0, 1, 60, }, { 8, 0, 0, 0, 1, 72, }, { 9, 0, 0, 0, 1, 60, }, { 0, 0, 0, 0, 2, 72, }, - { 2, 0, 0, 0, 2, 60, }, - { 1, 0, 0, 0, 2, 68, }, + { 2, 0, 0, 0, 2, 56, }, + { 1, 0, 0, 0, 2, 72, }, { 3, 0, 0, 0, 2, 72, }, { 4, 0, 0, 0, 2, 76, }, - { 5, 0, 0, 0, 2, 60, }, + { 5, 0, 0, 0, 2, 56, }, { 6, 0, 0, 0, 2, 72, }, { 7, 0, 0, 0, 2, 60, }, { 8, 0, 0, 0, 2, 72, }, { 9, 0, 0, 0, 2, 60, }, { 0, 0, 0, 0, 3, 76, }, - { 2, 0, 0, 0, 3, 60, }, - { 1, 0, 0, 0, 3, 68, }, + { 2, 0, 0, 0, 3, 56, }, + { 1, 0, 0, 0, 3, 72, }, { 3, 0, 0, 0, 3, 76, }, { 4, 0, 0, 0, 3, 76, }, - { 5, 0, 0, 0, 3, 60, }, + { 5, 0, 0, 0, 3, 56, }, { 6, 0, 0, 0, 3, 76, }, { 7, 0, 0, 0, 3, 60, }, { 8, 0, 0, 0, 3, 76, }, { 9, 0, 0, 0, 3, 60, }, { 0, 0, 0, 0, 4, 76, }, - { 2, 0, 0, 0, 4, 60, }, - { 1, 0, 0, 0, 4, 68, }, + { 2, 0, 0, 0, 4, 56, }, + { 1, 0, 0, 0, 4, 72, }, { 3, 0, 0, 0, 4, 76, }, { 4, 0, 0, 0, 4, 76, }, - { 5, 0, 0, 0, 4, 60, }, + { 5, 0, 0, 0, 4, 56, }, { 6, 0, 0, 0, 4, 76, }, { 7, 0, 0, 0, 4, 60, }, { 8, 0, 0, 0, 4, 76, }, { 9, 0, 0, 0, 4, 60, }, { 0, 0, 0, 0, 5, 76, }, - { 2, 0, 0, 0, 5, 60, }, - { 1, 0, 0, 0, 5, 68, }, + { 2, 0, 0, 0, 5, 56, }, + { 1, 0, 0, 0, 5, 72, }, { 3, 0, 0, 0, 5, 76, }, { 4, 0, 0, 0, 5, 76, }, - { 5, 0, 0, 0, 5, 60, }, + { 5, 0, 0, 0, 5, 56, }, { 6, 0, 0, 0, 5, 76, }, { 7, 0, 0, 0, 5, 60, }, { 8, 0, 0, 0, 5, 76, }, { 9, 0, 0, 0, 5, 60, }, { 0, 0, 0, 0, 6, 76, }, - { 2, 0, 0, 0, 6, 60, }, - { 1, 0, 0, 0, 6, 68, }, + { 2, 0, 0, 0, 6, 56, }, + { 1, 0, 0, 0, 6, 72, }, { 3, 0, 0, 0, 6, 76, }, { 4, 0, 0, 0, 6, 76, }, - { 5, 0, 0, 0, 6, 60, }, + { 5, 0, 0, 0, 6, 56, }, { 6, 0, 0, 0, 6, 76, }, { 7, 0, 0, 0, 6, 60, }, { 8, 0, 0, 0, 6, 76, }, { 9, 0, 0, 0, 6, 60, }, { 0, 0, 0, 0, 7, 76, }, - { 2, 0, 0, 0, 7, 60, }, - { 1, 0, 0, 0, 7, 68, }, + { 2, 0, 0, 0, 7, 56, }, + { 1, 0, 0, 0, 7, 72, }, { 3, 0, 0, 0, 7, 76, }, { 4, 0, 0, 0, 7, 76, }, - { 5, 0, 0, 0, 7, 60, }, + { 5, 0, 0, 0, 7, 56, }, { 6, 0, 0, 0, 7, 76, }, { 7, 0, 0, 0, 7, 60, }, { 8, 0, 0, 0, 7, 76, }, { 9, 0, 0, 0, 7, 60, }, { 0, 0, 0, 0, 8, 76, }, - { 2, 0, 0, 0, 8, 60, }, - { 1, 0, 0, 0, 8, 68, }, + { 2, 0, 0, 0, 8, 56, }, + { 1, 0, 0, 0, 8, 72, }, { 3, 0, 0, 0, 8, 76, }, { 4, 0, 0, 0, 8, 76, }, - { 5, 0, 0, 0, 8, 60, }, + { 5, 0, 0, 0, 8, 56, }, { 6, 0, 0, 0, 8, 76, }, { 7, 0, 0, 0, 8, 60, }, { 8, 0, 0, 0, 8, 76, }, { 9, 0, 0, 0, 8, 60, }, { 0, 0, 0, 0, 9, 76, }, - { 2, 0, 0, 0, 9, 60, }, - { 1, 0, 0, 0, 9, 68, }, + { 2, 0, 0, 0, 9, 56, }, + { 1, 0, 0, 0, 9, 72, }, { 3, 0, 0, 0, 9, 76, }, { 4, 0, 0, 0, 9, 76, }, - { 5, 0, 0, 0, 9, 60, }, + { 5, 0, 0, 0, 9, 56, }, { 6, 0, 0, 0, 9, 76, }, { 7, 0, 0, 0, 9, 60, }, { 8, 0, 0, 0, 9, 76, }, { 9, 0, 0, 0, 9, 60, }, { 0, 0, 0, 0, 10, 72, }, - { 2, 0, 0, 0, 10, 60, }, - { 1, 0, 0, 0, 10, 68, }, + { 2, 0, 0, 0, 10, 56, }, + { 1, 0, 0, 0, 10, 72, }, { 3, 0, 0, 0, 10, 72, }, { 4, 0, 0, 0, 10, 76, }, - { 5, 0, 0, 0, 10, 60, }, + { 5, 0, 0, 0, 10, 56, }, { 6, 0, 0, 0, 10, 72, }, { 7, 0, 0, 0, 10, 60, }, { 8, 0, 0, 0, 10, 72, }, { 9, 0, 0, 0, 10, 60, }, { 0, 0, 0, 0, 11, 72, }, - { 2, 0, 0, 0, 11, 60, }, - { 1, 0, 0, 0, 11, 68, }, + { 2, 0, 0, 0, 11, 56, }, + { 1, 0, 0, 0, 11, 72, }, { 3, 0, 0, 0, 11, 72, }, { 4, 0, 0, 0, 11, 76, }, - { 5, 0, 0, 0, 11, 60, }, + { 5, 0, 0, 0, 11, 56, }, { 6, 0, 0, 0, 11, 72, }, { 7, 0, 0, 0, 11, 60, }, { 8, 0, 0, 0, 11, 72, }, { 9, 0, 0, 0, 11, 60, }, { 0, 0, 0, 0, 12, 44, }, - { 2, 0, 0, 0, 12, 60, }, - { 1, 0, 0, 0, 12, 68, }, + { 2, 0, 0, 0, 12, 56, }, + { 1, 0, 0, 0, 12, 72, }, { 3, 0, 0, 0, 12, 52, }, { 4, 0, 0, 0, 12, 76, }, - { 5, 0, 0, 0, 12, 60, }, + { 5, 0, 0, 0, 12, 56, }, { 6, 0, 0, 0, 12, 52, }, { 7, 0, 0, 0, 12, 60, }, { 8, 0, 0, 0, 12, 52, }, { 9, 0, 0, 0, 12, 60, }, { 0, 0, 0, 0, 13, 40, }, - { 2, 0, 0, 0, 13, 60, }, - { 1, 0, 0, 0, 13, 68, }, + { 2, 0, 0, 0, 13, 56, }, + { 1, 0, 0, 0, 13, 72, }, { 3, 0, 0, 0, 13, 48, }, { 4, 0, 0, 0, 13, 76, }, - { 5, 0, 0, 0, 13, 60, }, + { 5, 0, 0, 0, 13, 56, }, { 6, 0, 0, 0, 13, 48, }, { 7, 0, 0, 0, 13, 60, }, { 8, 0, 0, 0, 13, 48, }, { 9, 0, 0, 0, 13, 60, }, { 0, 0, 0, 0, 14, 127, }, { 2, 0, 0, 0, 14, 127, }, - { 1, 0, 0, 0, 14, 68, }, + { 1, 0, 0, 0, 14, 72, }, { 3, 0, 0, 0, 14, 127, }, { 4, 0, 0, 0, 14, 127, }, { 5, 0, 0, 0, 14, 127, }, @@ -42041,7 +42041,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 1, 13, 60, }, { 1, 0, 0, 1, 13, 76, }, { 3, 0, 0, 1, 13, 28, }, - { 4, 0, 0, 1, 13, 70, }, + { 4, 0, 0, 1, 13, 74, }, { 5, 0, 0, 1, 13, 60, }, { 6, 0, 0, 1, 13, 28, }, { 7, 0, 0, 1, 13, 60, }, @@ -42181,7 +42181,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 2, 13, 60, }, { 1, 0, 0, 2, 13, 76, }, { 3, 0, 0, 2, 13, 28, }, - { 4, 0, 0, 2, 13, 72, }, + { 4, 0, 0, 2, 13, 74, }, { 5, 0, 0, 2, 13, 60, }, { 6, 0, 0, 2, 13, 28, }, { 7, 0, 0, 2, 13, 60, }, @@ -42201,7 +42201,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 1, 36, }, { 1, 0, 0, 3, 1, 66, }, { 3, 0, 0, 3, 1, 52, }, - { 4, 0, 0, 3, 1, 68, }, + { 4, 0, 0, 3, 1, 72, }, { 5, 0, 0, 3, 1, 36, }, { 6, 0, 0, 3, 1, 52, }, { 7, 0, 0, 3, 1, 36, }, @@ -42211,7 +42211,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 2, 36, }, { 1, 0, 0, 3, 2, 66, }, { 3, 0, 0, 3, 2, 60, }, - { 4, 0, 0, 3, 2, 70, }, + { 4, 0, 0, 3, 2, 72, }, { 5, 0, 0, 3, 2, 36, }, { 6, 0, 0, 3, 2, 60, }, { 7, 0, 0, 3, 2, 36, }, @@ -42221,7 +42221,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 3, 36, }, { 1, 0, 0, 3, 3, 66, }, { 3, 0, 0, 3, 3, 64, }, - { 4, 0, 0, 3, 3, 70, }, + { 4, 0, 0, 3, 3, 72, }, { 5, 0, 0, 3, 3, 36, }, { 6, 0, 0, 3, 3, 64, }, { 7, 0, 0, 3, 3, 36, }, @@ -42231,7 +42231,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 4, 36, }, { 1, 0, 0, 3, 4, 66, }, { 3, 0, 0, 3, 4, 68, }, - { 4, 0, 0, 3, 4, 70, }, + { 4, 0, 0, 3, 4, 72, }, { 5, 0, 0, 3, 4, 36, }, { 6, 0, 0, 3, 4, 68, }, { 7, 0, 0, 3, 4, 36, }, @@ -42241,7 +42241,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 5, 36, }, { 1, 0, 0, 3, 5, 66, }, { 3, 0, 0, 3, 5, 76, }, - { 4, 0, 0, 3, 5, 70, }, + { 4, 0, 0, 3, 5, 72, }, { 5, 0, 0, 3, 5, 36, }, { 6, 0, 0, 3, 5, 76, }, { 7, 0, 0, 3, 5, 36, }, @@ -42251,7 +42251,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 6, 36, }, { 1, 0, 0, 3, 6, 66, }, { 3, 0, 0, 3, 6, 76, }, - { 4, 0, 0, 3, 6, 70, }, + { 4, 0, 0, 3, 6, 72, }, { 5, 0, 0, 3, 6, 36, }, { 6, 0, 0, 3, 6, 76, }, { 7, 0, 0, 3, 6, 36, }, @@ -42261,7 +42261,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 7, 36, }, { 1, 0, 0, 3, 7, 66, }, { 3, 0, 0, 3, 7, 76, }, - { 4, 0, 0, 3, 7, 70, }, + { 4, 0, 0, 3, 7, 72, }, { 5, 0, 0, 3, 7, 36, }, { 6, 0, 0, 3, 7, 76, }, { 7, 0, 0, 3, 7, 36, }, @@ -42271,7 +42271,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 8, 36, }, { 1, 0, 0, 3, 8, 66, }, { 3, 0, 0, 3, 8, 68, }, - { 4, 0, 0, 3, 8, 70, }, + { 4, 0, 0, 3, 8, 72, }, { 5, 0, 0, 3, 8, 36, }, { 6, 0, 0, 3, 8, 68, }, { 7, 0, 0, 3, 8, 36, }, @@ -42281,7 +42281,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 9, 36, }, { 1, 0, 0, 3, 9, 66, }, { 3, 0, 0, 3, 9, 64, }, - { 4, 0, 0, 3, 9, 70, }, + { 4, 0, 0, 3, 9, 72, }, { 5, 0, 0, 3, 9, 36, }, { 6, 0, 0, 3, 9, 64, }, { 7, 0, 0, 3, 9, 36, }, @@ -42291,7 +42291,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 10, 36, }, { 1, 0, 0, 3, 10, 66, }, { 3, 0, 0, 3, 10, 60, }, - { 4, 0, 0, 3, 10, 70, }, + { 4, 0, 0, 3, 10, 72, }, { 5, 0, 0, 3, 10, 36, }, { 6, 0, 0, 3, 10, 60, }, { 7, 0, 0, 3, 10, 36, }, @@ -42301,7 +42301,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 11, 36, }, { 1, 0, 0, 3, 11, 66, }, { 3, 0, 0, 3, 11, 52, }, - { 4, 0, 0, 3, 11, 70, }, + { 4, 0, 0, 3, 11, 72, }, { 5, 0, 0, 3, 11, 36, }, { 6, 0, 0, 3, 11, 52, }, { 7, 0, 0, 3, 11, 36, }, @@ -42311,7 +42311,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 12, 36, }, { 1, 0, 0, 3, 12, 66, }, { 3, 0, 0, 3, 12, 40, }, - { 4, 0, 0, 3, 12, 70, }, + { 4, 0, 0, 3, 12, 72, }, { 5, 0, 0, 3, 12, 36, }, { 6, 0, 0, 3, 12, 40, }, { 7, 0, 0, 3, 12, 36, }, @@ -42321,7 +42321,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 13, 36, }, { 1, 0, 0, 3, 13, 66, }, { 3, 0, 0, 3, 13, 28, }, - { 4, 0, 0, 3, 13, 62, }, + { 4, 0, 0, 3, 13, 68, }, { 5, 0, 0, 3, 13, 36, }, { 6, 0, 0, 3, 13, 28, }, { 7, 0, 0, 3, 13, 36, }, @@ -42501,7 +42501,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 1, 3, 3, 36, }, { 1, 0, 1, 3, 3, 66, }, { 3, 0, 1, 3, 3, 48, }, - { 4, 0, 1, 3, 3, 66, }, + { 4, 0, 1, 3, 3, 68, }, { 5, 0, 1, 3, 3, 36, }, { 6, 0, 1, 3, 3, 48, }, { 7, 0, 1, 3, 3, 36, }, @@ -42618,137 +42618,137 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 8, 0, 1, 3, 14, 127, }, { 9, 0, 1, 3, 14, 127, }, { 0, 1, 0, 1, 36, 74, }, - { 2, 1, 0, 1, 36, 62, }, - { 1, 1, 0, 1, 36, 60, }, + { 2, 1, 0, 1, 36, 58, }, + { 1, 1, 0, 1, 36, 62, }, { 3, 1, 0, 1, 36, 62, }, - { 4, 1, 0, 1, 36, 76, }, - { 5, 1, 0, 1, 36, 62, }, + { 4, 1, 0, 1, 36, 74, }, + { 5, 1, 0, 1, 36, 58, }, { 6, 1, 0, 1, 36, 64, }, { 7, 1, 0, 1, 36, 54, }, { 8, 1, 0, 1, 36, 62, }, { 9, 1, 0, 1, 36, 62, }, { 0, 1, 0, 1, 40, 76, }, - { 2, 1, 0, 1, 40, 62, }, + { 2, 1, 0, 1, 40, 58, }, { 1, 1, 0, 1, 40, 62, }, { 3, 1, 0, 1, 40, 62, }, { 4, 1, 0, 1, 40, 76, }, - { 5, 1, 0, 1, 40, 62, }, + { 5, 1, 0, 1, 40, 58, }, { 6, 1, 0, 1, 40, 64, }, { 7, 1, 0, 1, 40, 54, }, { 8, 1, 0, 1, 40, 62, }, { 9, 1, 0, 1, 40, 62, }, { 0, 1, 0, 1, 44, 76, }, - { 2, 1, 0, 1, 44, 62, }, + { 2, 1, 0, 1, 44, 58, }, { 1, 1, 0, 1, 44, 62, }, { 3, 1, 0, 1, 44, 62, }, { 4, 1, 0, 1, 44, 76, }, - { 5, 1, 0, 1, 44, 62, }, + { 5, 1, 0, 1, 44, 58, }, { 6, 1, 0, 1, 44, 64, }, { 7, 1, 0, 1, 44, 54, }, { 8, 1, 0, 1, 44, 62, }, { 9, 1, 0, 1, 44, 62, }, { 0, 1, 0, 1, 48, 76, }, - { 2, 1, 0, 1, 48, 62, }, + { 2, 1, 0, 1, 48, 58, }, { 1, 1, 0, 1, 48, 62, }, { 3, 1, 0, 1, 48, 62, }, - { 4, 1, 0, 1, 48, 54, }, - { 5, 1, 0, 1, 48, 62, }, + { 4, 1, 0, 1, 48, 58, }, + { 5, 1, 0, 1, 48, 58, }, { 6, 1, 0, 1, 48, 64, }, { 7, 1, 0, 1, 48, 54, }, { 8, 1, 0, 1, 48, 62, }, { 9, 1, 0, 1, 48, 62, }, { 0, 1, 0, 1, 52, 76, }, - { 2, 1, 0, 1, 52, 62, }, + { 2, 1, 0, 1, 52, 58, }, { 1, 1, 0, 1, 52, 62, }, { 3, 1, 0, 1, 52, 64, }, { 4, 1, 0, 1, 52, 76, }, - { 5, 1, 0, 1, 52, 62, }, + { 5, 1, 0, 1, 52, 58, }, { 6, 1, 0, 1, 52, 76, }, { 7, 1, 0, 1, 52, 54, }, { 8, 1, 0, 1, 52, 76, }, { 9, 1, 0, 1, 52, 62, }, { 0, 1, 0, 1, 56, 76, }, - { 2, 1, 0, 1, 56, 62, }, + { 2, 1, 0, 1, 56, 58, }, { 1, 1, 0, 1, 56, 62, }, { 3, 1, 0, 1, 56, 64, }, { 4, 1, 0, 1, 56, 76, }, - { 5, 1, 0, 1, 56, 62, }, + { 5, 1, 0, 1, 56, 58, }, { 6, 1, 0, 1, 56, 76, }, { 7, 1, 0, 1, 56, 54, }, { 8, 1, 0, 1, 56, 76, }, { 9, 1, 0, 1, 56, 62, }, { 0, 1, 0, 1, 60, 76, }, - { 2, 1, 0, 1, 60, 62, }, + { 2, 1, 0, 1, 60, 58, }, { 1, 1, 0, 1, 60, 62, }, { 3, 1, 0, 1, 60, 64, }, { 4, 1, 0, 1, 60, 76, }, - { 5, 1, 0, 1, 60, 62, }, + { 5, 1, 0, 1, 60, 58, }, { 6, 1, 0, 1, 60, 76, }, { 7, 1, 0, 1, 60, 54, }, { 8, 1, 0, 1, 60, 76, }, { 9, 1, 0, 1, 60, 62, }, - { 0, 1, 0, 1, 64, 74, }, - { 2, 1, 0, 1, 64, 62, }, - { 1, 1, 0, 1, 64, 60, }, + { 0, 1, 0, 1, 64, 76, }, + { 2, 1, 0, 1, 64, 58, }, + { 1, 1, 0, 1, 64, 62, }, { 3, 1, 0, 1, 64, 64, }, { 4, 1, 0, 1, 64, 76, }, - { 5, 1, 0, 1, 64, 62, }, + { 5, 1, 0, 1, 64, 58, }, { 6, 1, 0, 1, 64, 74, }, { 7, 1, 0, 1, 64, 54, }, { 8, 1, 0, 1, 64, 74, }, { 9, 1, 0, 1, 64, 62, }, - { 0, 1, 0, 1, 100, 72, }, - { 2, 1, 0, 1, 100, 62, }, + { 0, 1, 0, 1, 100, 68, }, + { 2, 1, 0, 1, 100, 58, }, { 1, 1, 0, 1, 100, 76, }, - { 3, 1, 0, 1, 100, 72, }, + { 3, 1, 0, 1, 100, 68, }, { 4, 1, 0, 1, 100, 76, }, - { 5, 1, 0, 1, 100, 62, }, + { 5, 1, 0, 1, 100, 58, }, { 6, 1, 0, 1, 100, 72, }, { 7, 1, 0, 1, 100, 54, }, { 8, 1, 0, 1, 100, 72, }, { 9, 1, 0, 1, 100, 127, }, { 0, 1, 0, 1, 104, 76, }, - { 2, 1, 0, 1, 104, 62, }, + { 2, 1, 0, 1, 104, 58, }, { 1, 1, 0, 1, 104, 76, }, { 3, 1, 0, 1, 104, 76, }, { 4, 1, 0, 1, 104, 76, }, - { 5, 1, 0, 1, 104, 62, }, + { 5, 1, 0, 1, 104, 58, }, { 6, 1, 0, 1, 104, 76, }, { 7, 1, 0, 1, 104, 54, }, { 8, 1, 0, 1, 104, 76, }, { 9, 1, 0, 1, 104, 127, }, { 0, 1, 0, 1, 108, 76, }, - { 2, 1, 0, 1, 108, 62, }, + { 2, 1, 0, 1, 108, 58, }, { 1, 1, 0, 1, 108, 76, }, { 3, 1, 0, 1, 108, 76, }, { 4, 1, 0, 1, 108, 76, }, - { 5, 1, 0, 1, 108, 62, }, + { 5, 1, 0, 1, 108, 58, }, { 6, 1, 0, 1, 108, 76, }, { 7, 1, 0, 1, 108, 54, }, { 8, 1, 0, 1, 108, 76, }, { 9, 1, 0, 1, 108, 127, }, { 0, 1, 0, 1, 112, 76, }, - { 2, 1, 0, 1, 112, 62, }, + { 2, 1, 0, 1, 112, 58, }, { 1, 1, 0, 1, 112, 76, }, { 3, 1, 0, 1, 112, 76, }, { 4, 1, 0, 1, 112, 76, }, - { 5, 1, 0, 1, 112, 62, }, + { 5, 1, 0, 1, 112, 58, }, { 6, 1, 0, 1, 112, 76, }, { 7, 1, 0, 1, 112, 54, }, { 8, 1, 0, 1, 112, 76, }, { 9, 1, 0, 1, 112, 127, }, { 0, 1, 0, 1, 116, 76, }, - { 2, 1, 0, 1, 116, 62, }, + { 2, 1, 0, 1, 116, 58, }, { 1, 1, 0, 1, 116, 76, }, { 3, 1, 0, 1, 116, 76, }, { 4, 1, 0, 1, 116, 76, }, - { 5, 1, 0, 1, 116, 62, }, + { 5, 1, 0, 1, 116, 58, }, { 6, 1, 0, 1, 116, 76, }, { 7, 1, 0, 1, 116, 54, }, { 8, 1, 0, 1, 116, 76, }, { 9, 1, 0, 1, 116, 127, }, { 0, 1, 0, 1, 120, 76, }, - { 2, 1, 0, 1, 120, 62, }, + { 2, 1, 0, 1, 120, 58, }, { 1, 1, 0, 1, 120, 76, }, { 3, 1, 0, 1, 120, 127, }, { 4, 1, 0, 1, 120, 76, }, @@ -42758,7 +42758,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 8, 1, 0, 1, 120, 76, }, { 9, 1, 0, 1, 120, 127, }, { 0, 1, 0, 1, 124, 76, }, - { 2, 1, 0, 1, 124, 62, }, + { 2, 1, 0, 1, 124, 58, }, { 1, 1, 0, 1, 124, 76, }, { 3, 1, 0, 1, 124, 127, }, { 4, 1, 0, 1, 124, 76, }, @@ -42768,7 +42768,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 8, 1, 0, 1, 124, 76, }, { 9, 1, 0, 1, 124, 127, }, { 0, 1, 0, 1, 128, 76, }, - { 2, 1, 0, 1, 128, 62, }, + { 2, 1, 0, 1, 128, 58, }, { 1, 1, 0, 1, 128, 76, }, { 3, 1, 0, 1, 128, 127, }, { 4, 1, 0, 1, 128, 76, }, @@ -42778,38 +42778,38 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 8, 1, 0, 1, 128, 76, }, { 9, 1, 0, 1, 128, 127, }, { 0, 1, 0, 1, 132, 76, }, - { 2, 1, 0, 1, 132, 62, }, + { 2, 1, 0, 1, 132, 58, }, { 1, 1, 0, 1, 132, 76, }, { 3, 1, 0, 1, 132, 76, }, { 4, 1, 0, 1, 132, 76, }, - { 5, 1, 0, 1, 132, 62, }, + { 5, 1, 0, 1, 132, 58, }, { 6, 1, 0, 1, 132, 76, }, { 7, 1, 0, 1, 132, 54, }, { 8, 1, 0, 1, 132, 76, }, { 9, 1, 0, 1, 132, 127, }, { 0, 1, 0, 1, 136, 76, }, - { 2, 1, 0, 1, 136, 62, }, + { 2, 1, 0, 1, 136, 58, }, { 1, 1, 0, 1, 136, 76, }, { 3, 1, 0, 1, 136, 76, }, { 4, 1, 0, 1, 136, 76, }, - { 5, 1, 0, 1, 136, 62, }, + { 5, 1, 0, 1, 136, 58, }, { 6, 1, 0, 1, 136, 76, }, { 7, 1, 0, 1, 136, 54, }, { 8, 1, 0, 1, 136, 76, }, { 9, 1, 0, 1, 136, 127, }, - { 0, 1, 0, 1, 140, 72, }, - { 2, 1, 0, 1, 140, 62, }, + { 0, 1, 0, 1, 140, 74, }, + { 2, 1, 0, 1, 140, 58, }, { 1, 1, 0, 1, 140, 76, }, - { 3, 1, 0, 1, 140, 72, }, + { 3, 1, 0, 1, 140, 74, }, { 4, 1, 0, 1, 140, 76, }, - { 5, 1, 0, 1, 140, 62, }, + { 5, 1, 0, 1, 140, 58, }, { 6, 1, 0, 1, 140, 72, }, { 7, 1, 0, 1, 140, 54, }, { 8, 1, 0, 1, 140, 72, }, { 9, 1, 0, 1, 140, 127, }, { 0, 1, 0, 1, 144, 76, }, { 2, 1, 0, 1, 144, 127, }, - { 1, 1, 0, 1, 144, 127, }, + { 1, 1, 0, 1, 144, 76, }, { 3, 1, 0, 1, 144, 76, }, { 4, 1, 0, 1, 144, 76, }, { 5, 1, 0, 1, 144, 127, }, @@ -42818,7 +42818,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 8, 1, 0, 1, 144, 76, }, { 9, 1, 0, 1, 144, 127, }, { 0, 1, 0, 1, 149, 76, }, - { 2, 1, 0, 1, 149, -128, }, + { 2, 1, 0, 1, 149, 28, }, { 1, 1, 0, 1, 149, 127, }, { 3, 1, 0, 1, 149, 76, }, { 4, 1, 0, 1, 149, 74, }, @@ -42826,9 +42826,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 0, 1, 149, 76, }, { 7, 1, 0, 1, 149, 54, }, { 8, 1, 0, 1, 149, 76, }, - { 9, 1, 0, 1, 149, -128, }, + { 9, 1, 0, 1, 149, 28, }, { 0, 1, 0, 1, 153, 76, }, - { 2, 1, 0, 1, 153, -128, }, + { 2, 1, 0, 1, 153, 28, }, { 1, 1, 0, 1, 153, 127, }, { 3, 1, 0, 1, 153, 76, }, { 4, 1, 0, 1, 153, 74, }, @@ -42836,9 +42836,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 0, 1, 153, 76, }, { 7, 1, 0, 1, 153, 54, }, { 8, 1, 0, 1, 153, 76, }, - { 9, 1, 0, 1, 153, -128, }, + { 9, 1, 0, 1, 153, 28, }, { 0, 1, 0, 1, 157, 76, }, - { 2, 1, 0, 1, 157, -128, }, + { 2, 1, 0, 1, 157, 28, }, { 1, 1, 0, 1, 157, 127, }, { 3, 1, 0, 1, 157, 76, }, { 4, 1, 0, 1, 157, 74, }, @@ -42846,9 +42846,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 0, 1, 157, 76, }, { 7, 1, 0, 1, 157, 54, }, { 8, 1, 0, 1, 157, 76, }, - { 9, 1, 0, 1, 157, -128, }, + { 9, 1, 0, 1, 157, 28, }, { 0, 1, 0, 1, 161, 76, }, - { 2, 1, 0, 1, 161, -128, }, + { 2, 1, 0, 1, 161, 28, }, { 1, 1, 0, 1, 161, 127, }, { 3, 1, 0, 1, 161, 76, }, { 4, 1, 0, 1, 161, 74, }, @@ -42856,9 +42856,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 0, 1, 161, 76, }, { 7, 1, 0, 1, 161, 54, }, { 8, 1, 0, 1, 161, 76, }, - { 9, 1, 0, 1, 161, -128, }, + { 9, 1, 0, 1, 161, 28, }, { 0, 1, 0, 1, 165, 76, }, - { 2, 1, 0, 1, 165, -128, }, + { 2, 1, 0, 1, 165, 28, }, { 1, 1, 0, 1, 165, 127, }, { 3, 1, 0, 1, 165, 76, }, { 4, 1, 0, 1, 165, 74, }, @@ -42866,139 +42866,139 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 0, 1, 165, 76, }, { 7, 1, 0, 1, 165, 54, }, { 8, 1, 0, 1, 165, 76, }, - { 9, 1, 0, 1, 165, -128, }, - { 0, 1, 0, 2, 36, 72, }, - { 2, 1, 0, 2, 36, 62, }, - { 1, 1, 0, 2, 36, 62, }, + { 9, 1, 0, 1, 165, 28, }, + { 0, 1, 0, 2, 36, 70, }, + { 2, 1, 0, 2, 36, 58, }, + { 1, 1, 0, 2, 36, 64, }, { 3, 1, 0, 2, 36, 62, }, { 4, 1, 0, 2, 36, 76, }, - { 5, 1, 0, 2, 36, 62, }, + { 5, 1, 0, 2, 36, 58, }, { 6, 1, 0, 2, 36, 64, }, { 7, 1, 0, 2, 36, 54, }, { 8, 1, 0, 2, 36, 62, }, { 9, 1, 0, 2, 36, 62, }, { 0, 1, 0, 2, 40, 76, }, - { 2, 1, 0, 2, 40, 62, }, + { 2, 1, 0, 2, 40, 58, }, { 1, 1, 0, 2, 40, 62, }, { 3, 1, 0, 2, 40, 62, }, { 4, 1, 0, 2, 40, 76, }, - { 5, 1, 0, 2, 40, 62, }, + { 5, 1, 0, 2, 40, 58, }, { 6, 1, 0, 2, 40, 64, }, { 7, 1, 0, 2, 40, 54, }, { 8, 1, 0, 2, 40, 62, }, { 9, 1, 0, 2, 40, 62, }, { 0, 1, 0, 2, 44, 76, }, - { 2, 1, 0, 2, 44, 62, }, + { 2, 1, 0, 2, 44, 58, }, { 1, 1, 0, 2, 44, 62, }, { 3, 1, 0, 2, 44, 62, }, { 4, 1, 0, 2, 44, 76, }, - { 5, 1, 0, 2, 44, 62, }, + { 5, 1, 0, 2, 44, 58, }, { 6, 1, 0, 2, 44, 64, }, { 7, 1, 0, 2, 44, 54, }, { 8, 1, 0, 2, 44, 62, }, { 9, 1, 0, 2, 44, 62, }, { 0, 1, 0, 2, 48, 76, }, - { 2, 1, 0, 2, 48, 62, }, + { 2, 1, 0, 2, 48, 58, }, { 1, 1, 0, 2, 48, 62, }, { 3, 1, 0, 2, 48, 62, }, - { 4, 1, 0, 2, 48, 54, }, - { 5, 1, 0, 2, 48, 62, }, + { 4, 1, 0, 2, 48, 58, }, + { 5, 1, 0, 2, 48, 58, }, { 6, 1, 0, 2, 48, 64, }, { 7, 1, 0, 2, 48, 54, }, { 8, 1, 0, 2, 48, 62, }, { 9, 1, 0, 2, 48, 62, }, { 0, 1, 0, 2, 52, 76, }, - { 2, 1, 0, 2, 52, 62, }, + { 2, 1, 0, 2, 52, 58, }, { 1, 1, 0, 2, 52, 62, }, { 3, 1, 0, 2, 52, 64, }, { 4, 1, 0, 2, 52, 76, }, - { 5, 1, 0, 2, 52, 62, }, + { 5, 1, 0, 2, 52, 58, }, { 6, 1, 0, 2, 52, 76, }, { 7, 1, 0, 2, 52, 54, }, { 8, 1, 0, 2, 52, 76, }, { 9, 1, 0, 2, 52, 62, }, { 0, 1, 0, 2, 56, 76, }, - { 2, 1, 0, 2, 56, 62, }, + { 2, 1, 0, 2, 56, 58, }, { 1, 1, 0, 2, 56, 62, }, { 3, 1, 0, 2, 56, 64, }, { 4, 1, 0, 2, 56, 76, }, - { 5, 1, 0, 2, 56, 62, }, + { 5, 1, 0, 2, 56, 58, }, { 6, 1, 0, 2, 56, 76, }, { 7, 1, 0, 2, 56, 54, }, { 8, 1, 0, 2, 56, 76, }, { 9, 1, 0, 2, 56, 62, }, { 0, 1, 0, 2, 60, 76, }, - { 2, 1, 0, 2, 60, 62, }, + { 2, 1, 0, 2, 60, 58, }, { 1, 1, 0, 2, 60, 62, }, { 3, 1, 0, 2, 60, 64, }, { 4, 1, 0, 2, 60, 76, }, - { 5, 1, 0, 2, 60, 62, }, + { 5, 1, 0, 2, 60, 58, }, { 6, 1, 0, 2, 60, 76, }, { 7, 1, 0, 2, 60, 54, }, { 8, 1, 0, 2, 60, 76, }, { 9, 1, 0, 2, 60, 62, }, - { 0, 1, 0, 2, 64, 74, }, - { 2, 1, 0, 2, 64, 62, }, - { 1, 1, 0, 2, 64, 60, }, + { 0, 1, 0, 2, 64, 70, }, + { 2, 1, 0, 2, 64, 58, }, + { 1, 1, 0, 2, 64, 62, }, { 3, 1, 0, 2, 64, 64, }, { 4, 1, 0, 2, 64, 74, }, - { 5, 1, 0, 2, 64, 62, }, + { 5, 1, 0, 2, 64, 58, }, { 6, 1, 0, 2, 64, 74, }, { 7, 1, 0, 2, 64, 54, }, { 8, 1, 0, 2, 64, 74, }, { 9, 1, 0, 2, 64, 62, }, - { 0, 1, 0, 2, 100, 70, }, - { 2, 1, 0, 2, 100, 62, }, + { 0, 1, 0, 2, 100, 66, }, + { 2, 1, 0, 2, 100, 58, }, { 1, 1, 0, 2, 100, 76, }, - { 3, 1, 0, 2, 100, 70, }, + { 3, 1, 0, 2, 100, 66, }, { 4, 1, 0, 2, 100, 76, }, - { 5, 1, 0, 2, 100, 62, }, + { 5, 1, 0, 2, 100, 58, }, { 6, 1, 0, 2, 100, 70, }, { 7, 1, 0, 2, 100, 54, }, { 8, 1, 0, 2, 100, 70, }, { 9, 1, 0, 2, 100, 127, }, { 0, 1, 0, 2, 104, 76, }, - { 2, 1, 0, 2, 104, 62, }, + { 2, 1, 0, 2, 104, 58, }, { 1, 1, 0, 2, 104, 76, }, { 3, 1, 0, 2, 104, 76, }, { 4, 1, 0, 2, 104, 76, }, - { 5, 1, 0, 2, 104, 62, }, + { 5, 1, 0, 2, 104, 58, }, { 6, 1, 0, 2, 104, 76, }, { 7, 1, 0, 2, 104, 54, }, { 8, 1, 0, 2, 104, 76, }, { 9, 1, 0, 2, 104, 127, }, { 0, 1, 0, 2, 108, 76, }, - { 2, 1, 0, 2, 108, 62, }, + { 2, 1, 0, 2, 108, 58, }, { 1, 1, 0, 2, 108, 76, }, { 3, 1, 0, 2, 108, 76, }, { 4, 1, 0, 2, 108, 76, }, - { 5, 1, 0, 2, 108, 62, }, + { 5, 1, 0, 2, 108, 58, }, { 6, 1, 0, 2, 108, 76, }, { 7, 1, 0, 2, 108, 54, }, { 8, 1, 0, 2, 108, 76, }, { 9, 1, 0, 2, 108, 127, }, { 0, 1, 0, 2, 112, 76, }, - { 2, 1, 0, 2, 112, 62, }, + { 2, 1, 0, 2, 112, 58, }, { 1, 1, 0, 2, 112, 76, }, { 3, 1, 0, 2, 112, 76, }, { 4, 1, 0, 2, 112, 76, }, - { 5, 1, 0, 2, 112, 62, }, + { 5, 1, 0, 2, 112, 58, }, { 6, 1, 0, 2, 112, 76, }, { 7, 1, 0, 2, 112, 54, }, { 8, 1, 0, 2, 112, 76, }, { 9, 1, 0, 2, 112, 127, }, { 0, 1, 0, 2, 116, 76, }, - { 2, 1, 0, 2, 116, 62, }, + { 2, 1, 0, 2, 116, 58, }, { 1, 1, 0, 2, 116, 76, }, { 3, 1, 0, 2, 116, 76, }, { 4, 1, 0, 2, 116, 76, }, - { 5, 1, 0, 2, 116, 62, }, + { 5, 1, 0, 2, 116, 58, }, { 6, 1, 0, 2, 116, 76, }, { 7, 1, 0, 2, 116, 54, }, { 8, 1, 0, 2, 116, 76, }, { 9, 1, 0, 2, 116, 127, }, { 0, 1, 0, 2, 120, 76, }, - { 2, 1, 0, 2, 120, 62, }, + { 2, 1, 0, 2, 120, 58, }, { 1, 1, 0, 2, 120, 76, }, { 3, 1, 0, 2, 120, 127, }, { 4, 1, 0, 2, 120, 76, }, @@ -43008,7 +43008,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 8, 1, 0, 2, 120, 76, }, { 9, 1, 0, 2, 120, 127, }, { 0, 1, 0, 2, 124, 76, }, - { 2, 1, 0, 2, 124, 62, }, + { 2, 1, 0, 2, 124, 58, }, { 1, 1, 0, 2, 124, 76, }, { 3, 1, 0, 2, 124, 127, }, { 4, 1, 0, 2, 124, 76, }, @@ -43018,7 +43018,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 8, 1, 0, 2, 124, 76, }, { 9, 1, 0, 2, 124, 127, }, { 0, 1, 0, 2, 128, 76, }, - { 2, 1, 0, 2, 128, 62, }, + { 2, 1, 0, 2, 128, 58, }, { 1, 1, 0, 2, 128, 76, }, { 3, 1, 0, 2, 128, 127, }, { 4, 1, 0, 2, 128, 76, }, @@ -43028,38 +43028,38 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 8, 1, 0, 2, 128, 76, }, { 9, 1, 0, 2, 128, 127, }, { 0, 1, 0, 2, 132, 76, }, - { 2, 1, 0, 2, 132, 62, }, + { 2, 1, 0, 2, 132, 58, }, { 1, 1, 0, 2, 132, 76, }, { 3, 1, 0, 2, 132, 76, }, { 4, 1, 0, 2, 132, 76, }, - { 5, 1, 0, 2, 132, 62, }, + { 5, 1, 0, 2, 132, 58, }, { 6, 1, 0, 2, 132, 76, }, { 7, 1, 0, 2, 132, 54, }, { 8, 1, 0, 2, 132, 76, }, { 9, 1, 0, 2, 132, 127, }, { 0, 1, 0, 2, 136, 76, }, - { 2, 1, 0, 2, 136, 62, }, + { 2, 1, 0, 2, 136, 58, }, { 1, 1, 0, 2, 136, 76, }, { 3, 1, 0, 2, 136, 76, }, { 4, 1, 0, 2, 136, 76, }, - { 5, 1, 0, 2, 136, 62, }, + { 5, 1, 0, 2, 136, 58, }, { 6, 1, 0, 2, 136, 76, }, { 7, 1, 0, 2, 136, 54, }, { 8, 1, 0, 2, 136, 76, }, { 9, 1, 0, 2, 136, 127, }, - { 0, 1, 0, 2, 140, 70, }, - { 2, 1, 0, 2, 140, 62, }, + { 0, 1, 0, 2, 140, 66, }, + { 2, 1, 0, 2, 140, 58, }, { 1, 1, 0, 2, 140, 76, }, - { 3, 1, 0, 2, 140, 70, }, + { 3, 1, 0, 2, 140, 66, }, { 4, 1, 0, 2, 140, 76, }, - { 5, 1, 0, 2, 140, 62, }, + { 5, 1, 0, 2, 140, 58, }, { 6, 1, 0, 2, 140, 70, }, { 7, 1, 0, 2, 140, 54, }, { 8, 1, 0, 2, 140, 70, }, { 9, 1, 0, 2, 140, 127, }, { 0, 1, 0, 2, 144, 76, }, { 2, 1, 0, 2, 144, 127, }, - { 1, 1, 0, 2, 144, 127, }, + { 1, 1, 0, 2, 144, 76, }, { 3, 1, 0, 2, 144, 76, }, { 4, 1, 0, 2, 144, 76, }, { 5, 1, 0, 2, 144, 127, }, @@ -43068,7 +43068,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 8, 1, 0, 2, 144, 76, }, { 9, 1, 0, 2, 144, 127, }, { 0, 1, 0, 2, 149, 76, }, - { 2, 1, 0, 2, 149, -128, }, + { 2, 1, 0, 2, 149, 28, }, { 1, 1, 0, 2, 149, 127, }, { 3, 1, 0, 2, 149, 76, }, { 4, 1, 0, 2, 149, 74, }, @@ -43076,9 +43076,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 0, 2, 149, 76, }, { 7, 1, 0, 2, 149, 54, }, { 8, 1, 0, 2, 149, 76, }, - { 9, 1, 0, 2, 149, -128, }, + { 9, 1, 0, 2, 149, 28, }, { 0, 1, 0, 2, 153, 76, }, - { 2, 1, 0, 2, 153, -128, }, + { 2, 1, 0, 2, 153, 28, }, { 1, 1, 0, 2, 153, 127, }, { 3, 1, 0, 2, 153, 76, }, { 4, 1, 0, 2, 153, 74, }, @@ -43086,9 +43086,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 0, 2, 153, 76, }, { 7, 1, 0, 2, 153, 54, }, { 8, 1, 0, 2, 153, 76, }, - { 9, 1, 0, 2, 153, -128, }, + { 9, 1, 0, 2, 153, 28, }, { 0, 1, 0, 2, 157, 76, }, - { 2, 1, 0, 2, 157, -128, }, + { 2, 1, 0, 2, 157, 28, }, { 1, 1, 0, 2, 157, 127, }, { 3, 1, 0, 2, 157, 76, }, { 4, 1, 0, 2, 157, 74, }, @@ -43096,9 +43096,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 0, 2, 157, 76, }, { 7, 1, 0, 2, 157, 54, }, { 8, 1, 0, 2, 157, 76, }, - { 9, 1, 0, 2, 157, -128, }, + { 9, 1, 0, 2, 157, 28, }, { 0, 1, 0, 2, 161, 76, }, - { 2, 1, 0, 2, 161, -128, }, + { 2, 1, 0, 2, 161, 28, }, { 1, 1, 0, 2, 161, 127, }, { 3, 1, 0, 2, 161, 76, }, { 4, 1, 0, 2, 161, 74, }, @@ -43106,9 +43106,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 0, 2, 161, 76, }, { 7, 1, 0, 2, 161, 54, }, { 8, 1, 0, 2, 161, 76, }, - { 9, 1, 0, 2, 161, -128, }, + { 9, 1, 0, 2, 161, 28, }, { 0, 1, 0, 2, 165, 76, }, - { 2, 1, 0, 2, 165, -128, }, + { 2, 1, 0, 2, 165, 28, }, { 1, 1, 0, 2, 165, 127, }, { 3, 1, 0, 2, 165, 76, }, { 4, 1, 0, 2, 165, 74, }, @@ -43116,262 +43116,262 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 0, 2, 165, 76, }, { 7, 1, 0, 2, 165, 54, }, { 8, 1, 0, 2, 165, 76, }, - { 9, 1, 0, 2, 165, -128, }, - { 0, 1, 0, 3, 36, 68, }, - { 2, 1, 0, 3, 36, 38, }, + { 9, 1, 0, 2, 165, 28, }, + { 0, 1, 0, 3, 36, 64, }, + { 2, 1, 0, 3, 36, 36, }, { 1, 1, 0, 3, 36, 50, }, { 3, 1, 0, 3, 36, 38, }, { 4, 1, 0, 3, 36, 66, }, - { 5, 1, 0, 3, 36, 38, }, + { 5, 1, 0, 3, 36, 36, }, { 6, 1, 0, 3, 36, 52, }, { 7, 1, 0, 3, 36, 30, }, { 8, 1, 0, 3, 36, 50, }, { 9, 1, 0, 3, 36, 38, }, { 0, 1, 0, 3, 40, 68, }, - { 2, 1, 0, 3, 40, 38, }, + { 2, 1, 0, 3, 40, 36, }, { 1, 1, 0, 3, 40, 50, }, { 3, 1, 0, 3, 40, 38, }, { 4, 1, 0, 3, 40, 66, }, - { 5, 1, 0, 3, 40, 38, }, + { 5, 1, 0, 3, 40, 36, }, { 6, 1, 0, 3, 40, 52, }, { 7, 1, 0, 3, 40, 30, }, { 8, 1, 0, 3, 40, 50, }, { 9, 1, 0, 3, 40, 38, }, { 0, 1, 0, 3, 44, 68, }, - { 2, 1, 0, 3, 44, 38, }, + { 2, 1, 0, 3, 44, 36, }, { 1, 1, 0, 3, 44, 50, }, { 3, 1, 0, 3, 44, 38, }, { 4, 1, 0, 3, 44, 66, }, - { 5, 1, 0, 3, 44, 38, }, + { 5, 1, 0, 3, 44, 36, }, { 6, 1, 0, 3, 44, 52, }, { 7, 1, 0, 3, 44, 30, }, { 8, 1, 0, 3, 44, 50, }, { 9, 1, 0, 3, 44, 38, }, { 0, 1, 0, 3, 48, 68, }, - { 2, 1, 0, 3, 48, 38, }, + { 2, 1, 0, 3, 48, 36, }, { 1, 1, 0, 3, 48, 50, }, { 3, 1, 0, 3, 48, 38, }, - { 4, 1, 0, 3, 48, 36, }, - { 5, 1, 0, 3, 48, 38, }, + { 4, 1, 0, 3, 48, 42, }, + { 5, 1, 0, 3, 48, 36, }, { 6, 1, 0, 3, 48, 52, }, { 7, 1, 0, 3, 48, 30, }, { 8, 1, 0, 3, 48, 50, }, { 9, 1, 0, 3, 48, 38, }, { 0, 1, 0, 3, 52, 68, }, - { 2, 1, 0, 3, 52, 38, }, + { 2, 1, 0, 3, 52, 36, }, { 1, 1, 0, 3, 52, 50, }, { 3, 1, 0, 3, 52, 40, }, { 4, 1, 0, 3, 52, 66, }, - { 5, 1, 0, 3, 52, 38, }, + { 5, 1, 0, 3, 52, 36, }, { 6, 1, 0, 3, 52, 68, }, { 7, 1, 0, 3, 52, 30, }, { 8, 1, 0, 3, 52, 68, }, { 9, 1, 0, 3, 52, 38, }, { 0, 1, 0, 3, 56, 68, }, - { 2, 1, 0, 3, 56, 38, }, + { 2, 1, 0, 3, 56, 36, }, { 1, 1, 0, 3, 56, 50, }, { 3, 1, 0, 3, 56, 40, }, { 4, 1, 0, 3, 56, 66, }, - { 5, 1, 0, 3, 56, 38, }, + { 5, 1, 0, 3, 56, 36, }, { 6, 1, 0, 3, 56, 68, }, { 7, 1, 0, 3, 56, 30, }, { 8, 1, 0, 3, 56, 68, }, { 9, 1, 0, 3, 56, 38, }, - { 0, 1, 0, 3, 60, 66, }, - { 2, 1, 0, 3, 60, 38, }, + { 0, 1, 0, 3, 60, 68, }, + { 2, 1, 0, 3, 60, 36, }, { 1, 1, 0, 3, 60, 50, }, { 3, 1, 0, 3, 60, 40, }, { 4, 1, 0, 3, 60, 66, }, - { 5, 1, 0, 3, 60, 38, }, + { 5, 1, 0, 3, 60, 36, }, { 6, 1, 0, 3, 60, 66, }, { 7, 1, 0, 3, 60, 30, }, { 8, 1, 0, 3, 60, 66, }, { 9, 1, 0, 3, 60, 38, }, - { 0, 1, 0, 3, 64, 68, }, - { 2, 1, 0, 3, 64, 38, }, + { 0, 1, 0, 3, 64, 66, }, + { 2, 1, 0, 3, 64, 36, }, { 1, 1, 0, 3, 64, 50, }, { 3, 1, 0, 3, 64, 40, }, { 4, 1, 0, 3, 64, 66, }, - { 5, 1, 0, 3, 64, 38, }, + { 5, 1, 0, 3, 64, 36, }, { 6, 1, 0, 3, 64, 68, }, { 7, 1, 0, 3, 64, 30, }, { 8, 1, 0, 3, 64, 68, }, { 9, 1, 0, 3, 64, 38, }, - { 0, 1, 0, 3, 100, 60, }, - { 2, 1, 0, 3, 100, 38, }, + { 0, 1, 0, 3, 100, 64, }, + { 2, 1, 0, 3, 100, 36, }, { 1, 1, 0, 3, 100, 70, }, - { 3, 1, 0, 3, 100, 60, }, - { 4, 1, 0, 3, 100, 64, }, - { 5, 1, 0, 3, 100, 38, }, + { 3, 1, 0, 3, 100, 64, }, + { 4, 1, 0, 3, 100, 66, }, + { 5, 1, 0, 3, 100, 36, }, { 6, 1, 0, 3, 100, 60, }, { 7, 1, 0, 3, 100, 30, }, { 8, 1, 0, 3, 100, 60, }, { 9, 1, 0, 3, 100, 127, }, { 0, 1, 0, 3, 104, 68, }, - { 2, 1, 0, 3, 104, 38, }, + { 2, 1, 0, 3, 104, 36, }, { 1, 1, 0, 3, 104, 70, }, { 3, 1, 0, 3, 104, 68, }, - { 4, 1, 0, 3, 104, 64, }, - { 5, 1, 0, 3, 104, 38, }, + { 4, 1, 0, 3, 104, 66, }, + { 5, 1, 0, 3, 104, 36, }, { 6, 1, 0, 3, 104, 68, }, { 7, 1, 0, 3, 104, 30, }, { 8, 1, 0, 3, 104, 68, }, { 9, 1, 0, 3, 104, 127, }, { 0, 1, 0, 3, 108, 68, }, - { 2, 1, 0, 3, 108, 38, }, + { 2, 1, 0, 3, 108, 36, }, { 1, 1, 0, 3, 108, 70, }, { 3, 1, 0, 3, 108, 68, }, - { 4, 1, 0, 3, 108, 64, }, - { 5, 1, 0, 3, 108, 38, }, + { 4, 1, 0, 3, 108, 66, }, + { 5, 1, 0, 3, 108, 36, }, { 6, 1, 0, 3, 108, 68, }, { 7, 1, 0, 3, 108, 30, }, { 8, 1, 0, 3, 108, 68, }, { 9, 1, 0, 3, 108, 127, }, { 0, 1, 0, 3, 112, 68, }, - { 2, 1, 0, 3, 112, 38, }, + { 2, 1, 0, 3, 112, 36, }, { 1, 1, 0, 3, 112, 70, }, { 3, 1, 0, 3, 112, 68, }, - { 4, 1, 0, 3, 112, 64, }, - { 5, 1, 0, 3, 112, 38, }, + { 4, 1, 0, 3, 112, 66, }, + { 5, 1, 0, 3, 112, 36, }, { 6, 1, 0, 3, 112, 68, }, { 7, 1, 0, 3, 112, 30, }, { 8, 1, 0, 3, 112, 68, }, { 9, 1, 0, 3, 112, 127, }, { 0, 1, 0, 3, 116, 68, }, - { 2, 1, 0, 3, 116, 38, }, + { 2, 1, 0, 3, 116, 36, }, { 1, 1, 0, 3, 116, 70, }, { 3, 1, 0, 3, 116, 68, }, - { 4, 1, 0, 3, 116, 64, }, - { 5, 1, 0, 3, 116, 38, }, + { 4, 1, 0, 3, 116, 66, }, + { 5, 1, 0, 3, 116, 36, }, { 6, 1, 0, 3, 116, 68, }, { 7, 1, 0, 3, 116, 30, }, { 8, 1, 0, 3, 116, 68, }, { 9, 1, 0, 3, 116, 127, }, { 0, 1, 0, 3, 120, 68, }, - { 2, 1, 0, 3, 120, 38, }, + { 2, 1, 0, 3, 120, 36, }, { 1, 1, 0, 3, 120, 70, }, { 3, 1, 0, 3, 120, 127, }, - { 4, 1, 0, 3, 120, 64, }, + { 4, 1, 0, 3, 120, 66, }, { 5, 1, 0, 3, 120, 127, }, { 6, 1, 0, 3, 120, 68, }, { 7, 1, 0, 3, 120, 30, }, { 8, 1, 0, 3, 120, 68, }, { 9, 1, 0, 3, 120, 127, }, { 0, 1, 0, 3, 124, 68, }, - { 2, 1, 0, 3, 124, 38, }, + { 2, 1, 0, 3, 124, 36, }, { 1, 1, 0, 3, 124, 70, }, { 3, 1, 0, 3, 124, 127, }, - { 4, 1, 0, 3, 124, 64, }, + { 4, 1, 0, 3, 124, 66, }, { 5, 1, 0, 3, 124, 127, }, { 6, 1, 0, 3, 124, 68, }, { 7, 1, 0, 3, 124, 30, }, { 8, 1, 0, 3, 124, 68, }, { 9, 1, 0, 3, 124, 127, }, { 0, 1, 0, 3, 128, 68, }, - { 2, 1, 0, 3, 128, 38, }, + { 2, 1, 0, 3, 128, 36, }, { 1, 1, 0, 3, 128, 70, }, { 3, 1, 0, 3, 128, 127, }, - { 4, 1, 0, 3, 128, 64, }, + { 4, 1, 0, 3, 128, 66, }, { 5, 1, 0, 3, 128, 127, }, { 6, 1, 0, 3, 128, 68, }, { 7, 1, 0, 3, 128, 30, }, { 8, 1, 0, 3, 128, 68, }, { 9, 1, 0, 3, 128, 127, }, { 0, 1, 0, 3, 132, 68, }, - { 2, 1, 0, 3, 132, 38, }, + { 2, 1, 0, 3, 132, 36, }, { 1, 1, 0, 3, 132, 70, }, { 3, 1, 0, 3, 132, 68, }, - { 4, 1, 0, 3, 132, 64, }, - { 5, 1, 0, 3, 132, 38, }, + { 4, 1, 0, 3, 132, 66, }, + { 5, 1, 0, 3, 132, 36, }, { 6, 1, 0, 3, 132, 68, }, { 7, 1, 0, 3, 132, 30, }, { 8, 1, 0, 3, 132, 68, }, { 9, 1, 0, 3, 132, 127, }, { 0, 1, 0, 3, 136, 68, }, - { 2, 1, 0, 3, 136, 38, }, + { 2, 1, 0, 3, 136, 36, }, { 1, 1, 0, 3, 136, 70, }, { 3, 1, 0, 3, 136, 68, }, - { 4, 1, 0, 3, 136, 64, }, - { 5, 1, 0, 3, 136, 38, }, + { 4, 1, 0, 3, 136, 66, }, + { 5, 1, 0, 3, 136, 36, }, { 6, 1, 0, 3, 136, 68, }, { 7, 1, 0, 3, 136, 30, }, { 8, 1, 0, 3, 136, 68, }, { 9, 1, 0, 3, 136, 127, }, - { 0, 1, 0, 3, 140, 60, }, - { 2, 1, 0, 3, 140, 38, }, + { 0, 1, 0, 3, 140, 58, }, + { 2, 1, 0, 3, 140, 36, }, { 1, 1, 0, 3, 140, 70, }, - { 3, 1, 0, 3, 140, 60, }, - { 4, 1, 0, 3, 140, 64, }, - { 5, 1, 0, 3, 140, 38, }, + { 3, 1, 0, 3, 140, 58, }, + { 4, 1, 0, 3, 140, 66, }, + { 5, 1, 0, 3, 140, 36, }, { 6, 1, 0, 3, 140, 60, }, { 7, 1, 0, 3, 140, 30, }, { 8, 1, 0, 3, 140, 60, }, { 9, 1, 0, 3, 140, 127, }, { 0, 1, 0, 3, 144, 68, }, { 2, 1, 0, 3, 144, 127, }, - { 1, 1, 0, 3, 144, 127, }, + { 1, 1, 0, 3, 144, 70, }, { 3, 1, 0, 3, 144, 68, }, - { 4, 1, 0, 3, 144, 64, }, + { 4, 1, 0, 3, 144, 66, }, { 5, 1, 0, 3, 144, 127, }, { 6, 1, 0, 3, 144, 68, }, { 7, 1, 0, 3, 144, 127, }, { 8, 1, 0, 3, 144, 68, }, { 9, 1, 0, 3, 144, 127, }, { 0, 1, 0, 3, 149, 76, }, - { 2, 1, 0, 3, 149, -128, }, + { 2, 1, 0, 3, 149, 4, }, { 1, 1, 0, 3, 149, 127, }, { 3, 1, 0, 3, 149, 76, }, - { 4, 1, 0, 3, 149, 60, }, + { 4, 1, 0, 3, 149, 62, }, { 5, 1, 0, 3, 149, 76, }, { 6, 1, 0, 3, 149, 76, }, { 7, 1, 0, 3, 149, 30, }, { 8, 1, 0, 3, 149, 72, }, - { 9, 1, 0, 3, 149, -128, }, + { 9, 1, 0, 3, 149, 4, }, { 0, 1, 0, 3, 153, 76, }, - { 2, 1, 0, 3, 153, -128, }, + { 2, 1, 0, 3, 153, 4, }, { 1, 1, 0, 3, 153, 127, }, { 3, 1, 0, 3, 153, 76, }, - { 4, 1, 0, 3, 153, 60, }, + { 4, 1, 0, 3, 153, 62, }, { 5, 1, 0, 3, 153, 76, }, { 6, 1, 0, 3, 153, 76, }, { 7, 1, 0, 3, 153, 30, }, { 8, 1, 0, 3, 153, 76, }, - { 9, 1, 0, 3, 153, -128, }, + { 9, 1, 0, 3, 153, 4, }, { 0, 1, 0, 3, 157, 76, }, - { 2, 1, 0, 3, 157, -128, }, + { 2, 1, 0, 3, 157, 4, }, { 1, 1, 0, 3, 157, 127, }, { 3, 1, 0, 3, 157, 76, }, - { 4, 1, 0, 3, 157, 60, }, + { 4, 1, 0, 3, 157, 62, }, { 5, 1, 0, 3, 157, 76, }, { 6, 1, 0, 3, 157, 76, }, { 7, 1, 0, 3, 157, 30, }, { 8, 1, 0, 3, 157, 76, }, - { 9, 1, 0, 3, 157, -128, }, + { 9, 1, 0, 3, 157, 4, }, { 0, 1, 0, 3, 161, 76, }, - { 2, 1, 0, 3, 161, -128, }, + { 2, 1, 0, 3, 161, 4, }, { 1, 1, 0, 3, 161, 127, }, { 3, 1, 0, 3, 161, 76, }, - { 4, 1, 0, 3, 161, 60, }, + { 4, 1, 0, 3, 161, 62, }, { 5, 1, 0, 3, 161, 76, }, { 6, 1, 0, 3, 161, 76, }, { 7, 1, 0, 3, 161, 30, }, { 8, 1, 0, 3, 161, 76, }, - { 9, 1, 0, 3, 161, -128, }, + { 9, 1, 0, 3, 161, 4, }, { 0, 1, 0, 3, 165, 76, }, - { 2, 1, 0, 3, 165, -128, }, + { 2, 1, 0, 3, 165, 4, }, { 1, 1, 0, 3, 165, 127, }, { 3, 1, 0, 3, 165, 76, }, - { 4, 1, 0, 3, 165, 60, }, + { 4, 1, 0, 3, 165, 62, }, { 5, 1, 0, 3, 165, 76, }, { 6, 1, 0, 3, 165, 76, }, { 7, 1, 0, 3, 165, 30, }, { 8, 1, 0, 3, 165, 76, }, - { 9, 1, 0, 3, 165, -128, }, + { 9, 1, 0, 3, 165, 4, }, { 0, 1, 1, 2, 38, 66, }, { 2, 1, 1, 2, 38, 64, }, - { 1, 1, 1, 2, 38, 62, }, + { 1, 1, 1, 2, 38, 64, }, { 3, 1, 1, 2, 38, 64, }, - { 4, 1, 1, 2, 38, 72, }, + { 4, 1, 1, 2, 38, 64, }, { 5, 1, 1, 2, 38, 64, }, { 6, 1, 1, 2, 38, 64, }, { 7, 1, 1, 2, 38, 54, }, @@ -43379,9 +43379,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 9, 1, 1, 2, 38, 64, }, { 0, 1, 1, 2, 46, 72, }, { 2, 1, 1, 2, 46, 64, }, - { 1, 1, 1, 2, 46, 62, }, + { 1, 1, 1, 2, 46, 64, }, { 3, 1, 1, 2, 46, 64, }, - { 4, 1, 1, 2, 46, 60, }, + { 4, 1, 1, 2, 46, 70, }, { 5, 1, 1, 2, 46, 64, }, { 6, 1, 1, 2, 46, 64, }, { 7, 1, 1, 2, 46, 54, }, @@ -43389,7 +43389,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 9, 1, 1, 2, 46, 64, }, { 0, 1, 1, 2, 54, 72, }, { 2, 1, 1, 2, 54, 64, }, - { 1, 1, 1, 2, 54, 62, }, + { 1, 1, 1, 2, 54, 64, }, { 3, 1, 1, 2, 54, 64, }, { 4, 1, 1, 2, 54, 72, }, { 5, 1, 1, 2, 54, 64, }, @@ -43397,21 +43397,21 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 7, 1, 1, 2, 54, 54, }, { 8, 1, 1, 2, 54, 72, }, { 9, 1, 1, 2, 54, 64, }, - { 0, 1, 1, 2, 62, 64, }, + { 0, 1, 1, 2, 62, 60, }, { 2, 1, 1, 2, 62, 64, }, { 1, 1, 1, 2, 62, 62, }, - { 3, 1, 1, 2, 62, 64, }, - { 4, 1, 1, 2, 62, 70, }, + { 3, 1, 1, 2, 62, 60, }, + { 4, 1, 1, 2, 62, 60, }, { 5, 1, 1, 2, 62, 64, }, { 6, 1, 1, 2, 62, 64, }, { 7, 1, 1, 2, 62, 54, }, { 8, 1, 1, 2, 62, 64, }, { 9, 1, 1, 2, 62, 64, }, - { 0, 1, 1, 2, 102, 58, }, + { 0, 1, 1, 2, 102, 60, }, { 2, 1, 1, 2, 102, 64, }, { 1, 1, 1, 2, 102, 72, }, - { 3, 1, 1, 2, 102, 58, }, - { 4, 1, 1, 2, 102, 72, }, + { 3, 1, 1, 2, 102, 60, }, + { 4, 1, 1, 2, 102, 64, }, { 5, 1, 1, 2, 102, 64, }, { 6, 1, 1, 2, 102, 58, }, { 7, 1, 1, 2, 102, 54, }, @@ -43459,7 +43459,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 9, 1, 1, 2, 134, 127, }, { 0, 1, 1, 2, 142, 72, }, { 2, 1, 1, 2, 142, 127, }, - { 1, 1, 1, 2, 142, 127, }, + { 1, 1, 1, 2, 142, 72, }, { 3, 1, 1, 2, 142, 72, }, { 4, 1, 1, 2, 142, 72, }, { 5, 1, 1, 2, 142, 127, }, @@ -43468,7 +43468,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 8, 1, 1, 2, 142, 72, }, { 9, 1, 1, 2, 142, 127, }, { 0, 1, 1, 2, 151, 72, }, - { 2, 1, 1, 2, 151, -128, }, + { 2, 1, 1, 2, 151, 28, }, { 1, 1, 1, 2, 151, 127, }, { 3, 1, 1, 2, 151, 72, }, { 4, 1, 1, 2, 151, 72, }, @@ -43476,9 +43476,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 1, 2, 151, 72, }, { 7, 1, 1, 2, 151, 54, }, { 8, 1, 1, 2, 151, 72, }, - { 9, 1, 1, 2, 151, -128, }, + { 9, 1, 1, 2, 151, 28, }, { 0, 1, 1, 2, 159, 72, }, - { 2, 1, 1, 2, 159, -128, }, + { 2, 1, 1, 2, 159, 28, }, { 1, 1, 1, 2, 159, 127, }, { 3, 1, 1, 2, 159, 72, }, { 4, 1, 1, 2, 159, 72, }, @@ -43486,12 +43486,12 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 1, 2, 159, 72, }, { 7, 1, 1, 2, 159, 54, }, { 8, 1, 1, 2, 159, 72, }, - { 9, 1, 1, 2, 159, -128, }, + { 9, 1, 1, 2, 159, 28, }, { 0, 1, 1, 3, 38, 60, }, { 2, 1, 1, 3, 38, 40, }, { 1, 1, 1, 3, 38, 50, }, { 3, 1, 1, 3, 38, 40, }, - { 4, 1, 1, 3, 38, 62, }, + { 4, 1, 1, 3, 38, 54, }, { 5, 1, 1, 3, 38, 40, }, { 6, 1, 1, 3, 38, 52, }, { 7, 1, 1, 3, 38, 30, }, @@ -43501,7 +43501,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 1, 1, 3, 46, 40, }, { 1, 1, 1, 3, 46, 50, }, { 3, 1, 1, 3, 46, 40, }, - { 4, 1, 1, 3, 46, 46, }, + { 4, 1, 1, 3, 46, 54, }, { 5, 1, 1, 3, 46, 40, }, { 6, 1, 1, 3, 46, 52, }, { 7, 1, 1, 3, 46, 30, }, @@ -43511,7 +43511,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 1, 1, 3, 54, 40, }, { 1, 1, 1, 3, 54, 50, }, { 3, 1, 1, 3, 54, 40, }, - { 4, 1, 1, 3, 54, 62, }, + { 4, 1, 1, 3, 54, 66, }, { 5, 1, 1, 3, 54, 40, }, { 6, 1, 1, 3, 54, 68, }, { 7, 1, 1, 3, 54, 30, }, @@ -43521,17 +43521,17 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 1, 1, 3, 62, 40, }, { 1, 1, 1, 3, 62, 48, }, { 3, 1, 1, 3, 62, 40, }, - { 4, 1, 1, 3, 62, 58, }, + { 4, 1, 1, 3, 62, 50, }, { 5, 1, 1, 3, 62, 40, }, { 6, 1, 1, 3, 62, 58, }, { 7, 1, 1, 3, 62, 30, }, { 8, 1, 1, 3, 62, 58, }, { 9, 1, 1, 3, 62, 40, }, - { 0, 1, 1, 3, 102, 54, }, + { 0, 1, 1, 3, 102, 56, }, { 2, 1, 1, 3, 102, 40, }, { 1, 1, 1, 3, 102, 70, }, - { 3, 1, 1, 3, 102, 54, }, - { 4, 1, 1, 3, 102, 64, }, + { 3, 1, 1, 3, 102, 56, }, + { 4, 1, 1, 3, 102, 54, }, { 5, 1, 1, 3, 102, 40, }, { 6, 1, 1, 3, 102, 54, }, { 7, 1, 1, 3, 102, 30, }, @@ -43541,7 +43541,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 1, 1, 3, 110, 40, }, { 1, 1, 1, 3, 110, 70, }, { 3, 1, 1, 3, 110, 68, }, - { 4, 1, 1, 3, 110, 64, }, + { 4, 1, 1, 3, 110, 66, }, { 5, 1, 1, 3, 110, 40, }, { 6, 1, 1, 3, 110, 68, }, { 7, 1, 1, 3, 110, 30, }, @@ -43551,7 +43551,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 1, 1, 3, 118, 40, }, { 1, 1, 1, 3, 118, 70, }, { 3, 1, 1, 3, 118, 127, }, - { 4, 1, 1, 3, 118, 64, }, + { 4, 1, 1, 3, 118, 66, }, { 5, 1, 1, 3, 118, 127, }, { 6, 1, 1, 3, 118, 68, }, { 7, 1, 1, 3, 118, 30, }, @@ -43561,7 +43561,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 1, 1, 3, 126, 40, }, { 1, 1, 1, 3, 126, 70, }, { 3, 1, 1, 3, 126, 127, }, - { 4, 1, 1, 3, 126, 64, }, + { 4, 1, 1, 3, 126, 66, }, { 5, 1, 1, 3, 126, 127, }, { 6, 1, 1, 3, 126, 68, }, { 7, 1, 1, 3, 126, 30, }, @@ -43571,7 +43571,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 1, 1, 3, 134, 40, }, { 1, 1, 1, 3, 134, 70, }, { 3, 1, 1, 3, 134, 68, }, - { 4, 1, 1, 3, 134, 64, }, + { 4, 1, 1, 3, 134, 66, }, { 5, 1, 1, 3, 134, 40, }, { 6, 1, 1, 3, 134, 68, }, { 7, 1, 1, 3, 134, 30, }, @@ -43579,16 +43579,16 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 9, 1, 1, 3, 134, 127, }, { 0, 1, 1, 3, 142, 68, }, { 2, 1, 1, 3, 142, 127, }, - { 1, 1, 1, 3, 142, 127, }, + { 1, 1, 1, 3, 142, 70, }, { 3, 1, 1, 3, 142, 68, }, - { 4, 1, 1, 3, 142, 64, }, + { 4, 1, 1, 3, 142, 66, }, { 5, 1, 1, 3, 142, 127, }, { 6, 1, 1, 3, 142, 68, }, { 7, 1, 1, 3, 142, 127, }, { 8, 1, 1, 3, 142, 68, }, { 9, 1, 1, 3, 142, 127, }, { 0, 1, 1, 3, 151, 72, }, - { 2, 1, 1, 3, 151, -128, }, + { 2, 1, 1, 3, 151, 4, }, { 1, 1, 1, 3, 151, 127, }, { 3, 1, 1, 3, 151, 72, }, { 4, 1, 1, 3, 151, 66, }, @@ -43596,9 +43596,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 1, 3, 151, 72, }, { 7, 1, 1, 3, 151, 30, }, { 8, 1, 1, 3, 151, 68, }, - { 9, 1, 1, 3, 151, -128, }, + { 9, 1, 1, 3, 151, 4, }, { 0, 1, 1, 3, 159, 72, }, - { 2, 1, 1, 3, 159, -128, }, + { 2, 1, 1, 3, 159, 4, }, { 1, 1, 1, 3, 159, 127, }, { 3, 1, 1, 3, 159, 72, }, { 4, 1, 1, 3, 159, 66, }, @@ -43606,32 +43606,32 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 1, 3, 159, 72, }, { 7, 1, 1, 3, 159, 30, }, { 8, 1, 1, 3, 159, 72, }, - { 9, 1, 1, 3, 159, -128, }, - { 0, 1, 2, 4, 42, 64, }, + { 9, 1, 1, 3, 159, 4, }, + { 0, 1, 2, 4, 42, 68, }, { 2, 1, 2, 4, 42, 64, }, { 1, 1, 2, 4, 42, 64, }, { 3, 1, 2, 4, 42, 64, }, - { 4, 1, 2, 4, 42, 68, }, + { 4, 1, 2, 4, 42, 60, }, { 5, 1, 2, 4, 42, 64, }, { 6, 1, 2, 4, 42, 64, }, { 7, 1, 2, 4, 42, 54, }, { 8, 1, 2, 4, 42, 62, }, { 9, 1, 2, 4, 42, 64, }, - { 0, 1, 2, 4, 58, 62, }, + { 0, 1, 2, 4, 58, 60, }, { 2, 1, 2, 4, 58, 64, }, { 1, 1, 2, 4, 58, 64, }, - { 3, 1, 2, 4, 58, 62, }, - { 4, 1, 2, 4, 58, 64, }, + { 3, 1, 2, 4, 58, 60, }, + { 4, 1, 2, 4, 58, 56, }, { 5, 1, 2, 4, 58, 64, }, { 6, 1, 2, 4, 58, 62, }, { 7, 1, 2, 4, 58, 54, }, { 8, 1, 2, 4, 58, 62, }, { 9, 1, 2, 4, 58, 64, }, - { 0, 1, 2, 4, 106, 58, }, + { 0, 1, 2, 4, 106, 60, }, { 2, 1, 2, 4, 106, 64, }, { 1, 1, 2, 4, 106, 72, }, - { 3, 1, 2, 4, 106, 58, }, - { 4, 1, 2, 4, 106, 66, }, + { 3, 1, 2, 4, 106, 60, }, + { 4, 1, 2, 4, 106, 58, }, { 5, 1, 2, 4, 106, 64, }, { 6, 1, 2, 4, 106, 58, }, { 7, 1, 2, 4, 106, 54, }, @@ -43649,84 +43649,84 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 9, 1, 2, 4, 122, 127, }, { 0, 1, 2, 4, 138, 72, }, { 2, 1, 2, 4, 138, 127, }, - { 1, 1, 2, 4, 138, 127, }, + { 1, 1, 2, 4, 138, 72, }, { 3, 1, 2, 4, 138, 72, }, - { 4, 1, 2, 4, 138, 68, }, + { 4, 1, 2, 4, 138, 70, }, { 5, 1, 2, 4, 138, 127, }, { 6, 1, 2, 4, 138, 72, }, { 7, 1, 2, 4, 138, 127, }, { 8, 1, 2, 4, 138, 72, }, { 9, 1, 2, 4, 138, 127, }, { 0, 1, 2, 4, 155, 72, }, - { 2, 1, 2, 4, 155, -128, }, + { 2, 1, 2, 4, 155, 28, }, { 1, 1, 2, 4, 155, 127, }, { 3, 1, 2, 4, 155, 72, }, - { 4, 1, 2, 4, 155, 68, }, + { 4, 1, 2, 4, 155, 62, }, { 5, 1, 2, 4, 155, 72, }, { 6, 1, 2, 4, 155, 72, }, { 7, 1, 2, 4, 155, 54, }, { 8, 1, 2, 4, 155, 68, }, - { 9, 1, 2, 4, 155, -128, }, - { 0, 1, 2, 5, 42, 54, }, + { 9, 1, 2, 4, 155, 28, }, + { 0, 1, 2, 5, 42, 56, }, { 2, 1, 2, 5, 42, 40, }, { 1, 1, 2, 5, 42, 50, }, { 3, 1, 2, 5, 42, 40, }, - { 4, 1, 2, 5, 42, 58, }, + { 4, 1, 2, 5, 42, 50, }, { 5, 1, 2, 5, 42, 40, }, { 6, 1, 2, 5, 42, 52, }, { 7, 1, 2, 5, 42, 30, }, { 8, 1, 2, 5, 42, 50, }, { 9, 1, 2, 5, 42, 40, }, - { 0, 1, 2, 5, 58, 52, }, + { 0, 1, 2, 5, 58, 54, }, { 2, 1, 2, 5, 58, 40, }, { 1, 1, 2, 5, 58, 50, }, { 3, 1, 2, 5, 58, 40, }, - { 4, 1, 2, 5, 58, 56, }, + { 4, 1, 2, 5, 58, 46, }, { 5, 1, 2, 5, 58, 40, }, { 6, 1, 2, 5, 58, 52, }, { 7, 1, 2, 5, 58, 30, }, { 8, 1, 2, 5, 58, 52, }, { 9, 1, 2, 5, 58, 40, }, - { 0, 1, 2, 5, 106, 50, }, + { 0, 1, 2, 5, 106, 48, }, { 2, 1, 2, 5, 106, 40, }, { 1, 1, 2, 5, 106, 72, }, - { 3, 1, 2, 5, 106, 50, }, - { 4, 1, 2, 5, 106, 56, }, + { 3, 1, 2, 5, 106, 48, }, + { 4, 1, 2, 5, 106, 50, }, { 5, 1, 2, 5, 106, 40, }, { 6, 1, 2, 5, 106, 50, }, { 7, 1, 2, 5, 106, 30, }, { 8, 1, 2, 5, 106, 50, }, { 9, 1, 2, 5, 106, 127, }, - { 0, 1, 2, 5, 122, 66, }, + { 0, 1, 2, 5, 122, 70, }, { 2, 1, 2, 5, 122, 40, }, { 1, 1, 2, 5, 122, 72, }, { 3, 1, 2, 5, 122, 127, }, - { 4, 1, 2, 5, 122, 56, }, + { 4, 1, 2, 5, 122, 62, }, { 5, 1, 2, 5, 122, 127, }, { 6, 1, 2, 5, 122, 66, }, { 7, 1, 2, 5, 122, 30, }, { 8, 1, 2, 5, 122, 66, }, { 9, 1, 2, 5, 122, 127, }, - { 0, 1, 2, 5, 138, 66, }, + { 0, 1, 2, 5, 138, 70, }, { 2, 1, 2, 5, 138, 127, }, - { 1, 1, 2, 5, 138, 127, }, - { 3, 1, 2, 5, 138, 66, }, - { 4, 1, 2, 5, 138, 58, }, + { 1, 1, 2, 5, 138, 72, }, + { 3, 1, 2, 5, 138, 70, }, + { 4, 1, 2, 5, 138, 62, }, { 5, 1, 2, 5, 138, 127, }, { 6, 1, 2, 5, 138, 66, }, { 7, 1, 2, 5, 138, 127, }, { 8, 1, 2, 5, 138, 66, }, { 9, 1, 2, 5, 138, 127, }, - { 0, 1, 2, 5, 155, 62, }, - { 2, 1, 2, 5, 155, -128, }, + { 0, 1, 2, 5, 155, 72, }, + { 2, 1, 2, 5, 155, 4, }, { 1, 1, 2, 5, 155, 127, }, - { 3, 1, 2, 5, 155, 62, }, - { 4, 1, 2, 5, 155, 58, }, + { 3, 1, 2, 5, 155, 72, }, + { 4, 1, 2, 5, 155, 52, }, { 5, 1, 2, 5, 155, 72, }, { 6, 1, 2, 5, 155, 62, }, { 7, 1, 2, 5, 155, 30, }, { 8, 1, 2, 5, 155, 62, }, - { 9, 1, 2, 5, 155, -128, }, + { 9, 1, 2, 5, 155, 4, }, }; RTW_DECL_TABLE_TXPWR_LMT(rtw8822c_txpwr_lmt_type5); diff --git a/drivers/net/wireless/rsi/rsi_91x_ps.c b/drivers/net/wireless/rsi/rsi_91x_ps.c index fdaa5a7260dd..a02904921729 100644 --- a/drivers/net/wireless/rsi/rsi_91x_ps.c +++ b/drivers/net/wireless/rsi/rsi_91x_ps.c @@ -16,7 +16,6 @@ #include <linux/etherdevice.h> #include <linux/if.h> -#include <linux/version.h> #include "rsi_debugfs.h" #include "rsi_mgmt.h" #include "rsi_common.h" diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index fe0287b22a25..e0c502bc4270 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -1513,7 +1513,7 @@ static int rsi_restore(struct device *dev) } static const struct dev_pm_ops rsi_pm_ops = { .suspend = rsi_suspend, - .resume = rsi_resume, + .resume_noirq = rsi_resume, .freeze = rsi_freeze, .thaw = rsi_thaw, .restore = rsi_restore, diff --git a/drivers/net/wireless/rsi/rsi_boot_params.h b/drivers/net/wireless/rsi/rsi_boot_params.h index c1cf19d1e376..30e03aa6a529 100644 --- a/drivers/net/wireless/rsi/rsi_boot_params.h +++ b/drivers/net/wireless/rsi/rsi_boot_params.h @@ -1,4 +1,4 @@ -/** +/* * Copyright (c) 2014 Redpine Signals Inc. * * Permission to use, copy, modify, and/or distribute this software for any diff --git a/drivers/net/wireless/rsi/rsi_coex.h b/drivers/net/wireless/rsi/rsi_coex.h index 0fdc67f37a56..2c14e4c651b9 100644 --- a/drivers/net/wireless/rsi/rsi_coex.h +++ b/drivers/net/wireless/rsi/rsi_coex.h @@ -1,4 +1,4 @@ -/** +/* * Copyright (c) 2018 Redpine Signals Inc. * * Permission to use, copy, modify, and/or distribute this software for any diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h index 60f1f286b030..7aa5124575cf 100644 --- a/drivers/net/wireless/rsi/rsi_common.h +++ b/drivers/net/wireless/rsi/rsi_common.h @@ -1,4 +1,4 @@ -/** +/* * Copyright (c) 2014 Redpine Signals Inc. * * Permission to use, copy, modify, and/or distribute this software for any diff --git a/drivers/net/wireless/rsi/rsi_debugfs.h b/drivers/net/wireless/rsi/rsi_debugfs.h index 580ad3b3f710..a6a28640ad40 100644 --- a/drivers/net/wireless/rsi/rsi_debugfs.h +++ b/drivers/net/wireless/rsi/rsi_debugfs.h @@ -1,4 +1,4 @@ -/** +/* * Copyright (c) 2014 Redpine Signals Inc. * * Permission to use, copy, modify, and/or distribute this software for any diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h index 46e36df9e8e3..d044a440fa08 100644 --- a/drivers/net/wireless/rsi/rsi_hal.h +++ b/drivers/net/wireless/rsi/rsi_hal.h @@ -1,4 +1,4 @@ -/** +/* * Copyright (c) 2017 Redpine Signals Inc. * * Permission to use, copy, modify, and/or distribute this software for any diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index 73a19e43106b..a1065e5a92b4 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -1,4 +1,4 @@ -/** +/* * Copyright (c) 2014 Redpine Signals Inc. * * Permission to use, copy, modify, and/or distribute this software for any diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index 2ce2dcf57441..236b21482f38 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -1,4 +1,4 @@ -/** +/* * Copyright (c) 2014 Redpine Signals Inc. * * Permission to use, copy, modify, and/or distribute this software for any diff --git a/drivers/net/wireless/rsi/rsi_ps.h b/drivers/net/wireless/rsi/rsi_ps.h index 98ff6a4ced57..0be2f1e201e5 100644 --- a/drivers/net/wireless/rsi/rsi_ps.h +++ b/drivers/net/wireless/rsi/rsi_ps.h @@ -1,4 +1,4 @@ -/** +/* * Copyright (c) 2017 Redpine Signals Inc. * * Permission to use, copy, modify, and/or distribute this software for any diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h index 1c756263cf15..7c91b126b350 100644 --- a/drivers/net/wireless/rsi/rsi_sdio.h +++ b/drivers/net/wireless/rsi/rsi_sdio.h @@ -1,4 +1,4 @@ -/** +/* * @section LICENSE * Copyright (c) 2014 Redpine Signals Inc. * diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h index 8702f434b569..254d19b66412 100644 --- a/drivers/net/wireless/rsi/rsi_usb.h +++ b/drivers/net/wireless/rsi/rsi_usb.h @@ -1,4 +1,4 @@ -/** +/* * @section LICENSE * Copyright (c) 2014 Redpine Signals Inc. * diff --git a/drivers/net/wireless/st/cw1200/bh.c b/drivers/net/wireless/st/cw1200/bh.c index c364a3987618..8bade5d89f12 100644 --- a/drivers/net/wireless/st/cw1200/bh.c +++ b/drivers/net/wireless/st/cw1200/bh.c @@ -42,9 +42,6 @@ enum cw1200_bh_pm_state { CW1200_BH_RESUME, }; -typedef int (*cw1200_wsm_handler)(struct cw1200_common *priv, - u8 *data, size_t size); - static void cw1200_bh_work(struct work_struct *work) { struct cw1200_common *priv = diff --git a/drivers/net/wireless/st/cw1200/wsm.h b/drivers/net/wireless/st/cw1200/wsm.h index 1ffa47994bb9..89fdc9115e9d 100644 --- a/drivers/net/wireless/st/cw1200/wsm.h +++ b/drivers/net/wireless/st/cw1200/wsm.h @@ -785,8 +785,6 @@ struct wsm_tx_confirm { }; /* 3.15 */ -typedef void (*wsm_tx_confirm_cb) (struct cw1200_common *priv, - struct wsm_tx_confirm *arg); /* Note that ideology of wsm_tx struct is different against the rest of * WSM API. wsm_hdr is /not/ a caller-adapted struct to be used as an input @@ -862,9 +860,6 @@ struct wsm_rx { /* = sizeof(generic hi hdr) + sizeof(wsm hdr) */ #define WSM_RX_EXTRA_HEADROOM (16) -typedef void (*wsm_rx_cb) (struct cw1200_common *priv, struct wsm_rx *arg, - struct sk_buff **skb_p); - /* 3.17 */ struct wsm_event { /* WSM_STATUS_... */ @@ -1180,8 +1175,6 @@ struct wsm_switch_channel { int wsm_switch_channel(struct cw1200_common *priv, const struct wsm_switch_channel *arg); -typedef void (*wsm_channel_switch_cb) (struct cw1200_common *priv); - #define WSM_START_REQ_ID 0x0017 #define WSM_START_RESP_ID 0x0417 @@ -1240,8 +1233,6 @@ int wsm_start_find(struct cw1200_common *priv); int wsm_stop_find(struct cw1200_common *priv); -typedef void (*wsm_find_complete_cb) (struct cw1200_common *priv, u32 status); - struct wsm_suspend_resume { /* See 3.52 */ /* Link ID */ @@ -1256,9 +1247,6 @@ struct wsm_suspend_resume { /* [out] */ int queue; }; -typedef void (*wsm_suspend_resume_cb) (struct cw1200_common *priv, - struct wsm_suspend_resume *arg); - /* 3.54 Update-IE request. */ struct wsm_update_ie { /* WSM_UPDATE_IE_... */ diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c index e14d88e558f0..85abd0a2d1c9 100644 --- a/drivers/net/wireless/ti/wlcore/boot.c +++ b/drivers/net/wireless/ti/wlcore/boot.c @@ -72,6 +72,7 @@ static int wlcore_validate_fw_ver(struct wl1271 *wl) unsigned int *min_ver = (wl->fw_type == WL12XX_FW_TYPE_MULTI) ? wl->min_mr_fw_ver : wl->min_sr_fw_ver; char min_fw_str[32] = ""; + int off = 0; int i; /* the chip must be exactly equal */ @@ -105,13 +106,15 @@ static int wlcore_validate_fw_ver(struct wl1271 *wl) return 0; fail: - for (i = 0; i < NUM_FW_VER; i++) + for (i = 0; i < NUM_FW_VER && off < sizeof(min_fw_str); i++) if (min_ver[i] == WLCORE_FW_VER_IGNORE) - snprintf(min_fw_str, sizeof(min_fw_str), - "%s*.", min_fw_str); + off += snprintf(min_fw_str + off, + sizeof(min_fw_str) - off, + "*."); else - snprintf(min_fw_str, sizeof(min_fw_str), - "%s%u.", min_fw_str, min_ver[i]); + off += snprintf(min_fw_str + off, + sizeof(min_fw_str) - off, + "%u.", min_ver[i]); wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is invalid.\n" "Please use at least FW %s\n" diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h index b143293e694f..a9e13e6d65c5 100644 --- a/drivers/net/wireless/ti/wlcore/debugfs.h +++ b/drivers/net/wireless/ti/wlcore/debugfs.h @@ -78,13 +78,14 @@ static ssize_t sub## _ ##name## _read(struct file *file, \ struct wl1271 *wl = file->private_data; \ struct struct_type *stats = wl->stats.fw_stats; \ char buf[DEBUGFS_FORMAT_BUFFER_SIZE] = ""; \ + int pos = 0; \ int i; \ \ wl1271_debugfs_update_stats(wl); \ \ - for (i = 0; i < len; i++) \ - snprintf(buf, sizeof(buf), "%s[%d] = %d\n", \ - buf, i, stats->sub.name[i]); \ + for (i = 0; i < len && pos < sizeof(buf); i++) \ + pos += snprintf(buf + pos, sizeof(buf) - pos, \ + "[%d] = %d\n", i, stats->sub.name[i]); \ \ return wl1271_format_buffer(userbuf, count, ppos, "%s", buf); \ } \ diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h index e98e04ee9a2c..91f276dd22a1 100644 --- a/drivers/net/wireless/wl3501.h +++ b/drivers/net/wireless/wl3501.h @@ -240,7 +240,7 @@ struct iw_mgmt_essid_pset { } __packed; /* - * According to 802.11 Wireless Netowors, the definitive guide - O'Reilly + * According to 802.11 Wireless Networks, the definitive guide - O'Reilly * Pg 75 */ #define IW_DATA_RATE_MAX_LABELS 8 @@ -379,16 +379,7 @@ struct wl3501_get_confirm { u8 mib_value[100]; }; -struct wl3501_join_req { - u16 next_blk; - u8 sig_id; - u8 reserved; - struct iw_mgmt_data_rset operational_rset; - u16 reserved2; - u16 timeout; - u16 probe_delay; - u8 timestamp[8]; - u8 local_time[8]; +struct wl3501_req { u16 beacon_period; u16 dtim_period; u16 cap_info; @@ -401,6 +392,19 @@ struct wl3501_join_req { struct iw_mgmt_data_rset bss_basic_rset; }; +struct wl3501_join_req { + u16 next_blk; + u8 sig_id; + u8 reserved; + struct iw_mgmt_data_rset operational_rset; + u16 reserved2; + u16 timeout; + u16 probe_delay; + u8 timestamp[8]; + u8 local_time[8]; + struct wl3501_req req; +}; + struct wl3501_join_confirm { u16 next_blk; u8 sig_id; @@ -443,16 +447,7 @@ struct wl3501_scan_confirm { u16 status; char timestamp[8]; char localtime[8]; - u16 beacon_period; - u16 dtim_period; - u16 cap_info; - u8 bss_type; - u8 bssid[ETH_ALEN]; - struct iw_mgmt_essid_pset ssid; - struct iw_mgmt_ds_pset ds_pset; - struct iw_mgmt_cf_pset cf_pset; - struct iw_mgmt_ibss_pset ibss_pset; - struct iw_mgmt_data_rset bss_basic_rset; + struct wl3501_req req; u8 rssi; }; @@ -471,8 +466,10 @@ struct wl3501_md_req { u16 size; u8 pri; u8 service_class; - u8 daddr[ETH_ALEN]; - u8 saddr[ETH_ALEN]; + struct { + u8 daddr[ETH_ALEN]; + u8 saddr[ETH_ALEN]; + } addr; }; struct wl3501_md_ind { @@ -484,8 +481,10 @@ struct wl3501_md_ind { u8 reception; u8 pri; u8 service_class; - u8 daddr[ETH_ALEN]; - u8 saddr[ETH_ALEN]; + struct { + u8 daddr[ETH_ALEN]; + u8 saddr[ETH_ALEN]; + } addr; }; struct wl3501_md_confirm { diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index 8ca5789c7b37..672f5d5f3f2c 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c @@ -469,6 +469,7 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len) struct wl3501_md_req sig = { .sig_id = WL3501_SIG_MD_REQ, }; + size_t sig_addr_len = sizeof(sig.addr); u8 *pdata = (char *)data; int rc = -EIO; @@ -484,9 +485,9 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len) goto out; } rc = 0; - memcpy(&sig.daddr[0], pdata, 12); - pktlen = len - 12; - pdata += 12; + memcpy(&sig.addr, pdata, sig_addr_len); + pktlen = len - sig_addr_len; + pdata += sig_addr_len; sig.data = bf; if (((*pdata) * 256 + (*(pdata + 1))) > 1500) { u8 addr4[ETH_ALEN] = { @@ -589,7 +590,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas) struct wl3501_join_req sig = { .sig_id = WL3501_SIG_JOIN_REQ, .timeout = 10, - .ds_pset = { + .req.ds_pset = { .el = { .id = IW_MGMT_INFO_ELEMENT_DS_PARAMETER_SET, .len = 1, @@ -598,7 +599,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas) }, }; - memcpy(&sig.beacon_period, &this->bss_set[stas].beacon_period, 72); + memcpy(&sig.req, &this->bss_set[stas].req, sizeof(sig.req)); return wl3501_esbq_exec(this, &sig, sizeof(sig)); } @@ -666,35 +667,37 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr) if (sig.status == WL3501_STATUS_SUCCESS) { pr_debug("success"); if ((this->net_type == IW_MODE_INFRA && - (sig.cap_info & WL3501_MGMT_CAPABILITY_ESS)) || + (sig.req.cap_info & WL3501_MGMT_CAPABILITY_ESS)) || (this->net_type == IW_MODE_ADHOC && - (sig.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) || + (sig.req.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) || this->net_type == IW_MODE_AUTO) { if (!this->essid.el.len) matchflag = 1; else if (this->essid.el.len == 3 && !memcmp(this->essid.essid, "ANY", 3)) matchflag = 1; - else if (this->essid.el.len != sig.ssid.el.len) + else if (this->essid.el.len != sig.req.ssid.el.len) matchflag = 0; - else if (memcmp(this->essid.essid, sig.ssid.essid, + else if (memcmp(this->essid.essid, sig.req.ssid.essid, this->essid.el.len)) matchflag = 0; else matchflag = 1; if (matchflag) { for (i = 0; i < this->bss_cnt; i++) { - if (ether_addr_equal_unaligned(this->bss_set[i].bssid, sig.bssid)) { + if (ether_addr_equal_unaligned(this->bss_set[i].req.bssid, + sig.req.bssid)) { matchflag = 0; break; } } } if (matchflag && (i < 20)) { - memcpy(&this->bss_set[i].beacon_period, - &sig.beacon_period, 73); + memcpy(&this->bss_set[i].req, + &sig.req, sizeof(sig.req)); this->bss_cnt++; this->rssi = sig.rssi; + this->bss_set[i].rssi = sig.rssi; } } } else if (sig.status == WL3501_STATUS_TIMEOUT) { @@ -886,19 +889,19 @@ static void wl3501_mgmt_join_confirm(struct net_device *dev, u16 addr) if (this->join_sta_bss < this->bss_cnt) { const int i = this->join_sta_bss; memcpy(this->bssid, - this->bss_set[i].bssid, ETH_ALEN); - this->chan = this->bss_set[i].ds_pset.chan; + this->bss_set[i].req.bssid, ETH_ALEN); + this->chan = this->bss_set[i].req.ds_pset.chan; iw_copy_mgmt_info_element(&this->keep_essid.el, - &this->bss_set[i].ssid.el); + &this->bss_set[i].req.ssid.el); wl3501_mgmt_auth(this); } } else { const int i = this->join_sta_bss; - memcpy(&this->bssid, &this->bss_set[i].bssid, ETH_ALEN); - this->chan = this->bss_set[i].ds_pset.chan; + memcpy(&this->bssid, &this->bss_set[i].req.bssid, ETH_ALEN); + this->chan = this->bss_set[i].req.ds_pset.chan; iw_copy_mgmt_info_element(&this->keep_essid.el, - &this->bss_set[i].ssid.el); + &this->bss_set[i].req.ssid.el); wl3501_online(dev); } } else { @@ -980,7 +983,8 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev, } else { skb->dev = dev; skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */ - skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12); + skb_copy_to_linear_data(skb, (unsigned char *)&sig.addr, + sizeof(sig.addr)); wl3501_receive(this, skb->data, pkt_len); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, dev); @@ -1571,30 +1575,30 @@ static int wl3501_get_scan(struct net_device *dev, struct iw_request_info *info, for (i = 0; i < this->bss_cnt; ++i) { iwe.cmd = SIOCGIWAP; iwe.u.ap_addr.sa_family = ARPHRD_ETHER; - memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].bssid, ETH_ALEN); + memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].req.bssid, ETH_ALEN); current_ev = iwe_stream_add_event(info, current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_ADDR_LEN); iwe.cmd = SIOCGIWESSID; iwe.u.data.flags = 1; - iwe.u.data.length = this->bss_set[i].ssid.el.len; + iwe.u.data.length = this->bss_set[i].req.ssid.el.len; current_ev = iwe_stream_add_point(info, current_ev, extra + IW_SCAN_MAX_DATA, &iwe, - this->bss_set[i].ssid.essid); + this->bss_set[i].req.ssid.essid); iwe.cmd = SIOCGIWMODE; - iwe.u.mode = this->bss_set[i].bss_type; + iwe.u.mode = this->bss_set[i].req.bss_type; current_ev = iwe_stream_add_event(info, current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_UINT_LEN); iwe.cmd = SIOCGIWFREQ; - iwe.u.freq.m = this->bss_set[i].ds_pset.chan; + iwe.u.freq.m = this->bss_set[i].req.ds_pset.chan; iwe.u.freq.e = 0; current_ev = iwe_stream_add_event(info, current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_FREQ_LEN); iwe.cmd = SIOCGIWENCODE; - if (this->bss_set[i].cap_info & WL3501_MGMT_CAPABILITY_PRIVACY) + if (this->bss_set[i].req.cap_info & WL3501_MGMT_CAPABILITY_PRIVACY) iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; else iwe.u.data.flags = IW_ENCODE_DISABLED; diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig new file mode 100644 index 000000000000..7ad1920120bc --- /dev/null +++ b/drivers/net/wwan/Kconfig @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Wireless WAN device configuration +# + +menuconfig WWAN + bool "Wireless WAN" + help + This section contains Wireless WAN configuration for WWAN framework + and drivers. + +if WWAN + +config WWAN_CORE + tristate "WWAN Driver Core" + help + Say Y here if you want to use the WWAN driver core. This driver + provides a common framework for WWAN drivers. + + To compile this driver as a module, choose M here: the module will be + called wwan. + +config MHI_WWAN_CTRL + tristate "MHI WWAN control driver for QCOM-based PCIe modems" + select WWAN_CORE + depends on MHI_BUS + help + MHI WWAN CTRL allows QCOM-based PCIe modems to expose different modem + control protocols/ports to userspace, including AT, MBIM, QMI, DIAG + and FIREHOSE. These protocols can be accessed directly from userspace + (e.g. AT commands) or via libraries/tools (e.g. libmbim, libqmi, + libqcdm...). + + To compile this driver as a module, choose M here: the module will be + called mhi_wwan_ctrl. + +endif # WWAN diff --git a/drivers/net/wwan/Makefile b/drivers/net/wwan/Makefile new file mode 100644 index 000000000000..556cd90958ca --- /dev/null +++ b/drivers/net/wwan/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux WWAN device drivers. +# + +obj-$(CONFIG_WWAN_CORE) += wwan.o +wwan-objs += wwan_core.o + +obj-$(CONFIG_MHI_WWAN_CTRL) += mhi_wwan_ctrl.o diff --git a/drivers/net/wwan/mhi_wwan_ctrl.c b/drivers/net/wwan/mhi_wwan_ctrl.c new file mode 100644 index 000000000000..1bc6b69aa530 --- /dev/null +++ b/drivers/net/wwan/mhi_wwan_ctrl.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */ +#include <linux/kernel.h> +#include <linux/mhi.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/wwan.h> + +/* MHI wwan flags */ +enum mhi_wwan_flags { + MHI_WWAN_DL_CAP, + MHI_WWAN_UL_CAP, + MHI_WWAN_RX_REFILL, +}; + +#define MHI_WWAN_MAX_MTU 0x8000 + +struct mhi_wwan_dev { + /* Lower level is a mhi dev, upper level is a wwan port */ + struct mhi_device *mhi_dev; + struct wwan_port *wwan_port; + + /* State and capabilities */ + unsigned long flags; + size_t mtu; + + /* Protect against concurrent TX and TX-completion (bh) */ + spinlock_t tx_lock; + + /* Protect RX budget and rx_refill scheduling */ + spinlock_t rx_lock; + struct work_struct rx_refill; + + /* RX budget is initially set to the size of the MHI RX queue and is + * used to limit the number of allocated and queued packets. It is + * decremented on data queueing and incremented on data release. + */ + unsigned int rx_budget; +}; + +/* Increment RX budget and schedule RX refill if necessary */ +static void mhi_wwan_rx_budget_inc(struct mhi_wwan_dev *mhiwwan) +{ + spin_lock(&mhiwwan->rx_lock); + + mhiwwan->rx_budget++; + + if (test_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags)) + schedule_work(&mhiwwan->rx_refill); + + spin_unlock(&mhiwwan->rx_lock); +} + +/* Decrement RX budget if non-zero and return true on success */ +static bool mhi_wwan_rx_budget_dec(struct mhi_wwan_dev *mhiwwan) +{ + bool ret = false; + + spin_lock(&mhiwwan->rx_lock); + + if (mhiwwan->rx_budget) { + mhiwwan->rx_budget--; + if (test_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags)) + ret = true; + } + + spin_unlock(&mhiwwan->rx_lock); + + return ret; +} + +static void __mhi_skb_destructor(struct sk_buff *skb) +{ + /* RX buffer has been consumed, increase the allowed budget */ + mhi_wwan_rx_budget_inc(skb_shinfo(skb)->destructor_arg); +} + +static void mhi_wwan_ctrl_refill_work(struct work_struct *work) +{ + struct mhi_wwan_dev *mhiwwan = container_of(work, struct mhi_wwan_dev, rx_refill); + struct mhi_device *mhi_dev = mhiwwan->mhi_dev; + + while (mhi_wwan_rx_budget_dec(mhiwwan)) { + struct sk_buff *skb; + + skb = alloc_skb(mhiwwan->mtu, GFP_KERNEL); + if (!skb) { + mhi_wwan_rx_budget_inc(mhiwwan); + break; + } + + /* To prevent unlimited buffer allocation if nothing consumes + * the RX buffers (passed to WWAN core), track their lifespan + * to not allocate more than allowed budget. + */ + skb->destructor = __mhi_skb_destructor; + skb_shinfo(skb)->destructor_arg = mhiwwan; + + if (mhi_queue_skb(mhi_dev, DMA_FROM_DEVICE, skb, mhiwwan->mtu, MHI_EOT)) { + dev_err(&mhi_dev->dev, "Failed to queue buffer\n"); + kfree_skb(skb); + break; + } + } +} + +static int mhi_wwan_ctrl_start(struct wwan_port *port) +{ + struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port); + int ret; + + /* Start mhi device's channel(s) */ + ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev); + if (ret) + return ret; + + /* Don't allocate more buffers than MHI channel queue size */ + mhiwwan->rx_budget = mhi_get_free_desc_count(mhiwwan->mhi_dev, DMA_FROM_DEVICE); + + /* Add buffers to the MHI inbound queue */ + if (test_bit(MHI_WWAN_DL_CAP, &mhiwwan->flags)) { + set_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags); + mhi_wwan_ctrl_refill_work(&mhiwwan->rx_refill); + } + + return 0; +} + +static void mhi_wwan_ctrl_stop(struct wwan_port *port) +{ + struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port); + + spin_lock(&mhiwwan->rx_lock); + clear_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags); + spin_unlock(&mhiwwan->rx_lock); + + cancel_work_sync(&mhiwwan->rx_refill); + + mhi_unprepare_from_transfer(mhiwwan->mhi_dev); +} + +static int mhi_wwan_ctrl_tx(struct wwan_port *port, struct sk_buff *skb) +{ + struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port); + int ret; + + if (skb->len > mhiwwan->mtu) + return -EMSGSIZE; + + if (!test_bit(MHI_WWAN_UL_CAP, &mhiwwan->flags)) + return -EOPNOTSUPP; + + /* Queue the packet for MHI transfer and check fullness of the queue */ + spin_lock_bh(&mhiwwan->tx_lock); + ret = mhi_queue_skb(mhiwwan->mhi_dev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT); + if (mhi_queue_is_full(mhiwwan->mhi_dev, DMA_TO_DEVICE)) + wwan_port_txoff(port); + spin_unlock_bh(&mhiwwan->tx_lock); + + return ret; +} + +static const struct wwan_port_ops wwan_pops = { + .start = mhi_wwan_ctrl_start, + .stop = mhi_wwan_ctrl_stop, + .tx = mhi_wwan_ctrl_tx, +}; + +static void mhi_ul_xfer_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_wwan_dev *mhiwwan = dev_get_drvdata(&mhi_dev->dev); + struct wwan_port *port = mhiwwan->wwan_port; + struct sk_buff *skb = mhi_result->buf_addr; + + dev_dbg(&mhi_dev->dev, "%s: status: %d xfer_len: %zu\n", __func__, + mhi_result->transaction_status, mhi_result->bytes_xferd); + + /* MHI core has done with the buffer, release it */ + consume_skb(skb); + + /* There is likely new slot available in the MHI queue, re-allow TX */ + spin_lock_bh(&mhiwwan->tx_lock); + if (!mhi_queue_is_full(mhiwwan->mhi_dev, DMA_TO_DEVICE)) + wwan_port_txon(port); + spin_unlock_bh(&mhiwwan->tx_lock); +} + +static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_wwan_dev *mhiwwan = dev_get_drvdata(&mhi_dev->dev); + struct wwan_port *port = mhiwwan->wwan_port; + struct sk_buff *skb = mhi_result->buf_addr; + + dev_dbg(&mhi_dev->dev, "%s: status: %d receive_len: %zu\n", __func__, + mhi_result->transaction_status, mhi_result->bytes_xferd); + + if (mhi_result->transaction_status && + mhi_result->transaction_status != -EOVERFLOW) { + kfree_skb(skb); + return; + } + + /* MHI core does not update skb->len, do it before forward */ + skb_put(skb, mhi_result->bytes_xferd); + wwan_port_rx(port, skb); + + /* Do not increment rx budget nor refill RX buffers now, wait for the + * buffer to be consumed. Done from __mhi_skb_destructor(). + */ +} + +static int mhi_wwan_ctrl_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + struct mhi_controller *cntrl = mhi_dev->mhi_cntrl; + struct mhi_wwan_dev *mhiwwan; + struct wwan_port *port; + + mhiwwan = kzalloc(sizeof(*mhiwwan), GFP_KERNEL); + if (!mhiwwan) + return -ENOMEM; + + mhiwwan->mhi_dev = mhi_dev; + mhiwwan->mtu = MHI_WWAN_MAX_MTU; + INIT_WORK(&mhiwwan->rx_refill, mhi_wwan_ctrl_refill_work); + spin_lock_init(&mhiwwan->tx_lock); + spin_lock_init(&mhiwwan->rx_lock); + + if (mhi_dev->dl_chan) + set_bit(MHI_WWAN_DL_CAP, &mhiwwan->flags); + if (mhi_dev->ul_chan) + set_bit(MHI_WWAN_UL_CAP, &mhiwwan->flags); + + dev_set_drvdata(&mhi_dev->dev, mhiwwan); + + /* Register as a wwan port, id->driver_data contains wwan port type */ + port = wwan_create_port(&cntrl->mhi_dev->dev, id->driver_data, + &wwan_pops, mhiwwan); + if (IS_ERR(port)) { + kfree(mhiwwan); + return PTR_ERR(port); + } + + mhiwwan->wwan_port = port; + + return 0; +}; + +static void mhi_wwan_ctrl_remove(struct mhi_device *mhi_dev) +{ + struct mhi_wwan_dev *mhiwwan = dev_get_drvdata(&mhi_dev->dev); + + wwan_remove_port(mhiwwan->wwan_port); + kfree(mhiwwan); +} + +static const struct mhi_device_id mhi_wwan_ctrl_match_table[] = { + { .chan = "DUN", .driver_data = WWAN_PORT_AT }, + { .chan = "MBIM", .driver_data = WWAN_PORT_MBIM }, + { .chan = "QMI", .driver_data = WWAN_PORT_QMI }, + { .chan = "DIAG", .driver_data = WWAN_PORT_QCDM }, + { .chan = "FIREHOSE", .driver_data = WWAN_PORT_FIREHOSE }, + {}, +}; +MODULE_DEVICE_TABLE(mhi, mhi_wwan_ctrl_match_table); + +static struct mhi_driver mhi_wwan_ctrl_driver = { + .id_table = mhi_wwan_ctrl_match_table, + .remove = mhi_wwan_ctrl_remove, + .probe = mhi_wwan_ctrl_probe, + .ul_xfer_cb = mhi_ul_xfer_cb, + .dl_xfer_cb = mhi_dl_xfer_cb, + .driver = { + .name = "mhi_wwan_ctrl", + }, +}; + +module_mhi_driver(mhi_wwan_ctrl_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MHI WWAN CTRL Driver"); +MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>"); diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c new file mode 100644 index 000000000000..cff04e532c1e --- /dev/null +++ b/drivers/net/wwan/wwan_core.c @@ -0,0 +1,554 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */ + +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/idr.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/poll.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/wwan.h> + +#define WWAN_MAX_MINORS 256 /* 256 minors allowed with register_chrdev() */ + +static DEFINE_MUTEX(wwan_register_lock); /* WWAN device create|remove lock */ +static DEFINE_IDA(minors); /* minors for WWAN port chardevs */ +static DEFINE_IDA(wwan_dev_ids); /* for unique WWAN device IDs */ +static struct class *wwan_class; +static int wwan_major; + +#define to_wwan_dev(d) container_of(d, struct wwan_device, dev) +#define to_wwan_port(d) container_of(d, struct wwan_port, dev) + +/* WWAN port flags */ +#define WWAN_PORT_TX_OFF 0 + +/** + * struct wwan_device - The structure that defines a WWAN device + * + * @id: WWAN device unique ID. + * @dev: Underlying device. + * @port_id: Current available port ID to pick. + */ +struct wwan_device { + unsigned int id; + struct device dev; + atomic_t port_id; +}; + +/** + * struct wwan_port - The structure that defines a WWAN port + * @type: Port type + * @start_count: Port start counter + * @flags: Store port state and capabilities + * @ops: Pointer to WWAN port operations + * @ops_lock: Protect port ops + * @dev: Underlying device + * @rxq: Buffer inbound queue + * @waitqueue: The waitqueue for port fops (read/write/poll) + */ +struct wwan_port { + enum wwan_port_type type; + unsigned int start_count; + unsigned long flags; + const struct wwan_port_ops *ops; + struct mutex ops_lock; /* Serialize ops + protect against removal */ + struct device dev; + struct sk_buff_head rxq; + wait_queue_head_t waitqueue; +}; + +static void wwan_dev_destroy(struct device *dev) +{ + struct wwan_device *wwandev = to_wwan_dev(dev); + + ida_free(&wwan_dev_ids, wwandev->id); + kfree(wwandev); +} + +static const struct device_type wwan_dev_type = { + .name = "wwan_dev", + .release = wwan_dev_destroy, +}; + +static int wwan_dev_parent_match(struct device *dev, const void *parent) +{ + return (dev->type == &wwan_dev_type && dev->parent == parent); +} + +static struct wwan_device *wwan_dev_get_by_parent(struct device *parent) +{ + struct device *dev; + + dev = class_find_device(wwan_class, NULL, parent, wwan_dev_parent_match); + if (!dev) + return ERR_PTR(-ENODEV); + + return to_wwan_dev(dev); +} + +/* This function allocates and registers a new WWAN device OR if a WWAN device + * already exist for the given parent, it gets a reference and return it. + * This function is not exported (for now), it is called indirectly via + * wwan_create_port(). + */ +static struct wwan_device *wwan_create_dev(struct device *parent) +{ + struct wwan_device *wwandev; + int err, id; + + /* The 'find-alloc-register' operation must be protected against + * concurrent execution, a WWAN device is possibly shared between + * multiple callers or concurrently unregistered from wwan_remove_dev(). + */ + mutex_lock(&wwan_register_lock); + + /* If wwandev already exists, return it */ + wwandev = wwan_dev_get_by_parent(parent); + if (!IS_ERR(wwandev)) + goto done_unlock; + + id = ida_alloc(&wwan_dev_ids, GFP_KERNEL); + if (id < 0) + goto done_unlock; + + wwandev = kzalloc(sizeof(*wwandev), GFP_KERNEL); + if (!wwandev) { + ida_free(&wwan_dev_ids, id); + goto done_unlock; + } + + wwandev->dev.parent = parent; + wwandev->dev.class = wwan_class; + wwandev->dev.type = &wwan_dev_type; + wwandev->id = id; + dev_set_name(&wwandev->dev, "wwan%d", wwandev->id); + + err = device_register(&wwandev->dev); + if (err) { + put_device(&wwandev->dev); + wwandev = NULL; + } + +done_unlock: + mutex_unlock(&wwan_register_lock); + + return wwandev; +} + +static int is_wwan_child(struct device *dev, void *data) +{ + return dev->class == wwan_class; +} + +static void wwan_remove_dev(struct wwan_device *wwandev) +{ + int ret; + + /* Prevent concurrent picking from wwan_create_dev */ + mutex_lock(&wwan_register_lock); + + /* WWAN device is created and registered (get+add) along with its first + * child port, and subsequent port registrations only grab a reference + * (get). The WWAN device must then be unregistered (del+put) along with + * its latest port, and reference simply dropped (put) otherwise. + */ + ret = device_for_each_child(&wwandev->dev, NULL, is_wwan_child); + if (!ret) + device_unregister(&wwandev->dev); + else + put_device(&wwandev->dev); + + mutex_unlock(&wwan_register_lock); +} + +/* ------- WWAN port management ------- */ + +static void wwan_port_destroy(struct device *dev) +{ + struct wwan_port *port = to_wwan_port(dev); + + ida_free(&minors, MINOR(port->dev.devt)); + skb_queue_purge(&port->rxq); + mutex_destroy(&port->ops_lock); + kfree(port); +} + +static const struct device_type wwan_port_dev_type = { + .name = "wwan_port", + .release = wwan_port_destroy, +}; + +static int wwan_port_minor_match(struct device *dev, const void *minor) +{ + return (dev->type == &wwan_port_dev_type && + MINOR(dev->devt) == *(unsigned int *)minor); +} + +static struct wwan_port *wwan_port_get_by_minor(unsigned int minor) +{ + struct device *dev; + + dev = class_find_device(wwan_class, NULL, &minor, wwan_port_minor_match); + if (!dev) + return ERR_PTR(-ENODEV); + + return to_wwan_port(dev); +} + +/* Keep aligned with wwan_port_type enum */ +static const char * const wwan_port_type_str[] = { + "AT", + "MBIM", + "QMI", + "QCDM", + "FIREHOSE" +}; + +struct wwan_port *wwan_create_port(struct device *parent, + enum wwan_port_type type, + const struct wwan_port_ops *ops, + void *drvdata) +{ + struct wwan_device *wwandev; + struct wwan_port *port; + int minor, err = -ENOMEM; + + if (type >= WWAN_PORT_MAX || !ops) + return ERR_PTR(-EINVAL); + + /* A port is always a child of a WWAN device, retrieve (allocate or + * pick) the WWAN device based on the provided parent device. + */ + wwandev = wwan_create_dev(parent); + if (IS_ERR(wwandev)) + return ERR_CAST(wwandev); + + /* A port is exposed as character device, get a minor */ + minor = ida_alloc_range(&minors, 0, WWAN_MAX_MINORS - 1, GFP_KERNEL); + if (minor < 0) + goto error_wwandev_remove; + + port = kzalloc(sizeof(*port), GFP_KERNEL); + if (!port) { + ida_free(&minors, minor); + goto error_wwandev_remove; + } + + port->type = type; + port->ops = ops; + mutex_init(&port->ops_lock); + skb_queue_head_init(&port->rxq); + init_waitqueue_head(&port->waitqueue); + + port->dev.parent = &wwandev->dev; + port->dev.class = wwan_class; + port->dev.type = &wwan_port_dev_type; + port->dev.devt = MKDEV(wwan_major, minor); + dev_set_drvdata(&port->dev, drvdata); + + /* create unique name based on wwan device id, port index and type */ + dev_set_name(&port->dev, "wwan%up%u%s", wwandev->id, + atomic_inc_return(&wwandev->port_id), + wwan_port_type_str[port->type]); + + err = device_register(&port->dev); + if (err) + goto error_put_device; + + return port; + +error_put_device: + put_device(&port->dev); +error_wwandev_remove: + wwan_remove_dev(wwandev); + + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(wwan_create_port); + +void wwan_remove_port(struct wwan_port *port) +{ + struct wwan_device *wwandev = to_wwan_dev(port->dev.parent); + + mutex_lock(&port->ops_lock); + if (port->start_count) + port->ops->stop(port); + port->ops = NULL; /* Prevent any new port operations (e.g. from fops) */ + mutex_unlock(&port->ops_lock); + + wake_up_interruptible(&port->waitqueue); + + skb_queue_purge(&port->rxq); + dev_set_drvdata(&port->dev, NULL); + device_unregister(&port->dev); + + /* Release related wwan device */ + wwan_remove_dev(wwandev); +} +EXPORT_SYMBOL_GPL(wwan_remove_port); + +void wwan_port_rx(struct wwan_port *port, struct sk_buff *skb) +{ + skb_queue_tail(&port->rxq, skb); + wake_up_interruptible(&port->waitqueue); +} +EXPORT_SYMBOL_GPL(wwan_port_rx); + +void wwan_port_txon(struct wwan_port *port) +{ + clear_bit(WWAN_PORT_TX_OFF, &port->flags); + wake_up_interruptible(&port->waitqueue); +} +EXPORT_SYMBOL_GPL(wwan_port_txon); + +void wwan_port_txoff(struct wwan_port *port) +{ + set_bit(WWAN_PORT_TX_OFF, &port->flags); +} +EXPORT_SYMBOL_GPL(wwan_port_txoff); + +void *wwan_port_get_drvdata(struct wwan_port *port) +{ + return dev_get_drvdata(&port->dev); +} +EXPORT_SYMBOL_GPL(wwan_port_get_drvdata); + +static int wwan_port_op_start(struct wwan_port *port) +{ + int ret = 0; + + mutex_lock(&port->ops_lock); + if (!port->ops) { /* Port got unplugged */ + ret = -ENODEV; + goto out_unlock; + } + + /* If port is already started, don't start again */ + if (!port->start_count) + ret = port->ops->start(port); + + if (!ret) + port->start_count++; + +out_unlock: + mutex_unlock(&port->ops_lock); + + return ret; +} + +static void wwan_port_op_stop(struct wwan_port *port) +{ + mutex_lock(&port->ops_lock); + port->start_count--; + if (port->ops && !port->start_count) + port->ops->stop(port); + mutex_unlock(&port->ops_lock); +} + +static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb) +{ + int ret; + + mutex_lock(&port->ops_lock); + if (!port->ops) { /* Port got unplugged */ + ret = -ENODEV; + goto out_unlock; + } + + ret = port->ops->tx(port, skb); + +out_unlock: + mutex_unlock(&port->ops_lock); + + return ret; +} + +static bool is_read_blocked(struct wwan_port *port) +{ + return skb_queue_empty(&port->rxq) && port->ops; +} + +static bool is_write_blocked(struct wwan_port *port) +{ + return test_bit(WWAN_PORT_TX_OFF, &port->flags) && port->ops; +} + +static int wwan_wait_rx(struct wwan_port *port, bool nonblock) +{ + if (!is_read_blocked(port)) + return 0; + + if (nonblock) + return -EAGAIN; + + if (wait_event_interruptible(port->waitqueue, !is_read_blocked(port))) + return -ERESTARTSYS; + + return 0; +} + +static int wwan_wait_tx(struct wwan_port *port, bool nonblock) +{ + if (!is_write_blocked(port)) + return 0; + + if (nonblock) + return -EAGAIN; + + if (wait_event_interruptible(port->waitqueue, !is_write_blocked(port))) + return -ERESTARTSYS; + + return 0; +} + +static int wwan_port_fops_open(struct inode *inode, struct file *file) +{ + struct wwan_port *port; + int err = 0; + + port = wwan_port_get_by_minor(iminor(inode)); + if (IS_ERR(port)) + return PTR_ERR(port); + + file->private_data = port; + stream_open(inode, file); + + err = wwan_port_op_start(port); + if (err) + put_device(&port->dev); + + return err; +} + +static int wwan_port_fops_release(struct inode *inode, struct file *filp) +{ + struct wwan_port *port = filp->private_data; + + wwan_port_op_stop(port); + put_device(&port->dev); + + return 0; +} + +static ssize_t wwan_port_fops_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + struct wwan_port *port = filp->private_data; + struct sk_buff *skb; + size_t copied; + int ret; + + ret = wwan_wait_rx(port, !!(filp->f_flags & O_NONBLOCK)); + if (ret) + return ret; + + skb = skb_dequeue(&port->rxq); + if (!skb) + return -EIO; + + copied = min_t(size_t, count, skb->len); + if (copy_to_user(buf, skb->data, copied)) { + kfree_skb(skb); + return -EFAULT; + } + skb_pull(skb, copied); + + /* skb is not fully consumed, keep it in the queue */ + if (skb->len) + skb_queue_head(&port->rxq, skb); + else + consume_skb(skb); + + return copied; +} + +static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf, + size_t count, loff_t *offp) +{ + struct wwan_port *port = filp->private_data; + struct sk_buff *skb; + int ret; + + ret = wwan_wait_tx(port, !!(filp->f_flags & O_NONBLOCK)); + if (ret) + return ret; + + skb = alloc_skb(count, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + if (copy_from_user(skb_put(skb, count), buf, count)) { + kfree_skb(skb); + return -EFAULT; + } + + ret = wwan_port_op_tx(port, skb); + if (ret) { + kfree_skb(skb); + return ret; + } + + return count; +} + +static __poll_t wwan_port_fops_poll(struct file *filp, poll_table *wait) +{ + struct wwan_port *port = filp->private_data; + __poll_t mask = 0; + + poll_wait(filp, &port->waitqueue, wait); + + if (!is_write_blocked(port)) + mask |= EPOLLOUT | EPOLLWRNORM; + if (!is_read_blocked(port)) + mask |= EPOLLIN | EPOLLRDNORM; + if (!port->ops) + mask |= EPOLLHUP | EPOLLERR; + + return mask; +} + +static const struct file_operations wwan_port_fops = { + .owner = THIS_MODULE, + .open = wwan_port_fops_open, + .release = wwan_port_fops_release, + .read = wwan_port_fops_read, + .write = wwan_port_fops_write, + .poll = wwan_port_fops_poll, + .llseek = noop_llseek, +}; + +static int __init wwan_init(void) +{ + wwan_class = class_create(THIS_MODULE, "wwan"); + if (IS_ERR(wwan_class)) + return PTR_ERR(wwan_class); + + /* chrdev used for wwan ports */ + wwan_major = register_chrdev(0, "wwan_port", &wwan_port_fops); + if (wwan_major < 0) { + class_destroy(wwan_class); + return wwan_major; + } + + return 0; +} + +static void __exit wwan_exit(void) +{ + unregister_chrdev(wwan_major, "wwan_port"); + class_destroy(wwan_class); +} + +module_init(wwan_init); +module_exit(wwan_exit); + +MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>"); +MODULE_DESCRIPTION("WWAN core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index cc19cd9203da..44275908d61a 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -608,8 +608,8 @@ static int xennet_xdp_xmit(struct net_device *dev, int n, struct netfront_info *np = netdev_priv(dev); struct netfront_queue *queue = NULL; unsigned long irq_flags; - int drops = 0; - int i, err; + int nxmit = 0; + int i; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; @@ -622,15 +622,13 @@ static int xennet_xdp_xmit(struct net_device *dev, int n, if (!xdpf) continue; - err = xennet_xdp_xmit_one(dev, queue, xdpf); - if (err) { - xdp_return_frame_rx_napi(xdpf); - drops++; - } + if (xennet_xdp_xmit_one(dev, queue, xdpf)) + break; + nxmit++; } spin_unlock_irqrestore(&queue->tx_lock, irq_flags); - return n - drops; + return nxmit; } @@ -875,7 +873,9 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata, get_page(pdata); xdpf = xdp_convert_buff_to_frame(xdp); err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0); - if (unlikely(err < 0)) + if (unlikely(!err)) + xdp_return_frame_rx_napi(xdpf); + else if (unlikely(err < 0)) trace_xdp_exception(queue->info->netdev, prog, act); break; case XDP_REDIRECT: diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c index 4dc7bd7e02b6..fe0719ed81a0 100644 --- a/drivers/nfc/fdp/fdp.c +++ b/drivers/nfc/fdp/fdp.c @@ -176,7 +176,7 @@ static void fdp_nci_set_data_pkt_counter(struct nci_dev *ndev, * * The firmware will be analyzed and applied when we send NCI_OP_PROP_PATCH_CMD * command with NCI_PATCH_TYPE_EOT parameter. The device will send a - * NFCC_PATCH_NTF packaet and a NCI_OP_CORE_RESET_NTF packet. + * NFCC_PATCH_NTF packet and a NCI_OP_CORE_RESET_NTF packet. */ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type) { @@ -236,15 +236,12 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type) static int fdp_nci_open(struct nci_dev *ndev) { - int r; struct fdp_nci_info *info = nci_get_drvdata(ndev); struct device *dev = &info->phy->i2c_dev->dev; dev_dbg(dev, "%s\n", __func__); - r = info->phy_ops->enable(info->phy); - - return r; + return info->phy_ops->enable(info->phy); } static int fdp_nci_close(struct nci_dev *ndev) @@ -347,7 +344,7 @@ static int fdp_nci_patch_otp(struct nci_dev *ndev) int r = 0; if (info->otp_version >= info->otp_patch_version) - goto out; + return r; info->setup_patch_sent = 0; info->setup_reset_ntf = 0; @@ -356,19 +353,17 @@ static int fdp_nci_patch_otp(struct nci_dev *ndev) /* Patch init request */ r = fdp_nci_patch_cmd(ndev, NCI_PATCH_TYPE_OTP); if (r) - goto out; + return r; /* Patch data connection creation */ conn_id = fdp_nci_create_conn(ndev); - if (conn_id < 0) { - r = conn_id; - goto out; - } + if (conn_id < 0) + return conn_id; /* Send the patch over the data connection */ r = fdp_nci_send_patch(ndev, conn_id, NCI_PATCH_TYPE_OTP); if (r) - goto out; + return r; /* Wait for all the packets to be send over i2c */ wait_event_interruptible(info->setup_wq, @@ -380,13 +375,12 @@ static int fdp_nci_patch_otp(struct nci_dev *ndev) /* Close the data connection */ r = nci_core_conn_close(info->ndev, conn_id); if (r) - goto out; + return r; /* Patch finish message */ if (fdp_nci_patch_cmd(ndev, NCI_PATCH_TYPE_EOT)) { nfc_err(dev, "OTP patch error 0x%x\n", r); - r = -EINVAL; - goto out; + return -EINVAL; } /* If the patch notification didn't arrive yet, wait for it */ @@ -396,8 +390,7 @@ static int fdp_nci_patch_otp(struct nci_dev *ndev) r = info->setup_patch_status; if (r) { nfc_err(dev, "OTP patch error 0x%x\n", r); - r = -EINVAL; - goto out; + return -EINVAL; } /* @@ -406,7 +399,6 @@ static int fdp_nci_patch_otp(struct nci_dev *ndev) */ wait_event_interruptible(info->setup_wq, info->setup_reset_ntf); -out: return r; } @@ -418,7 +410,7 @@ static int fdp_nci_patch_ram(struct nci_dev *ndev) int r = 0; if (info->ram_version >= info->ram_patch_version) - goto out; + return r; info->setup_patch_sent = 0; info->setup_reset_ntf = 0; @@ -427,19 +419,17 @@ static int fdp_nci_patch_ram(struct nci_dev *ndev) /* Patch init request */ r = fdp_nci_patch_cmd(ndev, NCI_PATCH_TYPE_RAM); if (r) - goto out; + return r; /* Patch data connection creation */ conn_id = fdp_nci_create_conn(ndev); - if (conn_id < 0) { - r = conn_id; - goto out; - } + if (conn_id < 0) + return conn_id; /* Send the patch over the data connection */ r = fdp_nci_send_patch(ndev, conn_id, NCI_PATCH_TYPE_RAM); if (r) - goto out; + return r; /* Wait for all the packets to be send over i2c */ wait_event_interruptible(info->setup_wq, @@ -451,13 +441,12 @@ static int fdp_nci_patch_ram(struct nci_dev *ndev) /* Close the data connection */ r = nci_core_conn_close(info->ndev, conn_id); if (r) - goto out; + return r; /* Patch finish message */ if (fdp_nci_patch_cmd(ndev, NCI_PATCH_TYPE_EOT)) { nfc_err(dev, "RAM patch error 0x%x\n", r); - r = -EINVAL; - goto out; + return -EINVAL; } /* If the patch notification didn't arrive yet, wait for it */ @@ -467,8 +456,7 @@ static int fdp_nci_patch_ram(struct nci_dev *ndev) r = info->setup_patch_status; if (r) { nfc_err(dev, "RAM patch error 0x%x\n", r); - r = -EINVAL; - goto out; + return -EINVAL; } /* @@ -477,7 +465,6 @@ static int fdp_nci_patch_ram(struct nci_dev *ndev) */ wait_event_interruptible(info->setup_wq, info->setup_reset_ntf); -out: return r; } diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c index 0207e66cee21..795da9b85d56 100644 --- a/drivers/nfc/pn533/i2c.c +++ b/drivers/nfc/pn533/i2c.c @@ -40,11 +40,8 @@ static int pn533_i2c_send_ack(struct pn533 *dev, gfp_t flags) struct i2c_client *client = phy->i2c_dev; static const u8 ack[6] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00}; /* spec 6.2.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */ - int rc; - - rc = i2c_master_send(client, ack, 6); - return rc; + return i2c_master_send(client, ack, 6); } static int pn533_i2c_send_frame(struct pn533 *dev, @@ -199,8 +196,7 @@ static int pn533_i2c_probe(struct i2c_client *client, &phy->i2c_dev->dev); if (IS_ERR(priv)) { - r = PTR_ERR(priv); - return r; + return PTR_ERR(priv); } phy->priv = priv; diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c index f1469ac8ff42..2c7f9916f206 100644 --- a/drivers/nfc/pn533/pn533.c +++ b/drivers/nfc/pn533/pn533.c @@ -489,12 +489,8 @@ static int pn533_send_data_async(struct pn533 *dev, u8 cmd_code, pn533_send_async_complete_t complete_cb, void *complete_cb_context) { - int rc; - - rc = __pn533_send_async(dev, cmd_code, req, complete_cb, + return __pn533_send_async(dev, cmd_code, req, complete_cb, complete_cb_context); - - return rc; } static int pn533_send_cmd_async(struct pn533 *dev, u8 cmd_code, @@ -502,12 +498,8 @@ static int pn533_send_cmd_async(struct pn533 *dev, u8 cmd_code, pn533_send_async_complete_t complete_cb, void *complete_cb_context) { - int rc; - - rc = __pn533_send_async(dev, cmd_code, req, complete_cb, + return __pn533_send_async(dev, cmd_code, req, complete_cb, complete_cb_context); - - return rc; } /* @@ -706,6 +698,9 @@ static bool pn533_target_type_a_is_valid(struct pn533_target_type_a *type_a, if (PN533_TYPE_A_SEL_CASCADE(type_a->sel_res) != 0) return false; + if (type_a->nfcid_len > NFC_NFCID1_MAXSIZE) + return false; + return true; } @@ -2617,7 +2612,7 @@ static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf) return rc; } - return rc; + return 0; } static int pn532_sam_configuration(struct nfc_dev *nfc_dev) @@ -2791,7 +2786,6 @@ struct pn533 *pn53x_common_init(u32 device_type, struct device *dev) { struct pn533 *priv; - int rc = -ENOMEM; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) @@ -2833,7 +2827,7 @@ struct pn533 *pn53x_common_init(u32 device_type, error: kfree(priv); - return ERR_PTR(rc); + return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL_GPL(pn53x_common_init); diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c index c00b7a07c3ee..865d3e3d1528 100644 --- a/drivers/nfc/s3fwrn5/core.c +++ b/drivers/nfc/s3fwrn5/core.c @@ -124,13 +124,12 @@ static int s3fwrn5_nci_post_setup(struct nci_dev *ndev) if (s3fwrn5_firmware_init(info)) { //skip bootloader mode - ret = 0; - goto out; + return 0; } ret = s3fwrn5_firmware_update(info); if (ret < 0) - goto out; + return ret; /* NCI core reset */ @@ -139,12 +138,9 @@ static int s3fwrn5_nci_post_setup(struct nci_dev *ndev) ret = nci_core_reset(info->ndev); if (ret < 0) - goto out; - - ret = nci_core_init(info->ndev); + return ret; -out: - return ret; + return nci_core_init(info->ndev); } static struct nci_ops s3fwrn5_nci_ops = { diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c index 8db323adebf0..09df6ea65840 100644 --- a/drivers/nfc/st-nci/spi.c +++ b/drivers/nfc/st-nci/spi.c @@ -95,17 +95,14 @@ static int st_nci_spi_write(void *phy_id, struct sk_buff *skb) */ if (!r) { skb_rx = alloc_skb(skb->len, GFP_KERNEL); - if (!skb_rx) { - r = -ENOMEM; - goto exit; - } + if (!skb_rx) + return -ENOMEM; skb_put(skb_rx, skb->len); memcpy(skb_rx->data, buf, skb->len); ndlc_recv(phy->ndlc, skb_rx); } -exit: return r; } diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c index bc0a27de69d4..dbac3a172a11 100644 --- a/drivers/of/of_net.c +++ b/drivers/of/of_net.c @@ -11,6 +11,7 @@ #include <linux/phy.h> #include <linux/export.h> #include <linux/device.h> +#include <linux/nvmem-consumer.h> /** * of_get_phy_mode - Get phy mode for given device_node @@ -45,42 +46,59 @@ int of_get_phy_mode(struct device_node *np, phy_interface_t *interface) } EXPORT_SYMBOL_GPL(of_get_phy_mode); -static const void *of_get_mac_addr(struct device_node *np, const char *name) +static int of_get_mac_addr(struct device_node *np, const char *name, u8 *addr) { struct property *pp = of_find_property(np, name, NULL); - if (pp && pp->length == ETH_ALEN && is_valid_ether_addr(pp->value)) - return pp->value; - return NULL; + if (pp && pp->length == ETH_ALEN && is_valid_ether_addr(pp->value)) { + memcpy(addr, pp->value, ETH_ALEN); + return 0; + } + return -ENODEV; } -static const void *of_get_mac_addr_nvmem(struct device_node *np) +static int of_get_mac_addr_nvmem(struct device_node *np, u8 *addr) { - int ret; - const void *mac; - u8 nvmem_mac[ETH_ALEN]; struct platform_device *pdev = of_find_device_by_node(np); + struct nvmem_cell *cell; + const void *mac; + size_t len; + int ret; - if (!pdev) - return ERR_PTR(-ENODEV); - - ret = nvmem_get_mac_address(&pdev->dev, &nvmem_mac); - if (ret) { + /* Try lookup by device first, there might be a nvmem_cell_lookup + * associated with a given device. + */ + if (pdev) { + ret = nvmem_get_mac_address(&pdev->dev, addr); put_device(&pdev->dev); - return ERR_PTR(ret); + return ret; } - mac = devm_kmemdup(&pdev->dev, nvmem_mac, ETH_ALEN, GFP_KERNEL); - put_device(&pdev->dev); - if (!mac) - return ERR_PTR(-ENOMEM); + cell = of_nvmem_cell_get(np, "mac-address"); + if (IS_ERR(cell)) + return PTR_ERR(cell); - return mac; + mac = nvmem_cell_read(cell, &len); + nvmem_cell_put(cell); + + if (IS_ERR(mac)) + return PTR_ERR(mac); + + if (len != ETH_ALEN || !is_valid_ether_addr(mac)) { + kfree(mac); + return -EINVAL; + } + + memcpy(addr, mac, ETH_ALEN); + kfree(mac); + + return 0; } /** * of_get_mac_address() * @np: Caller's Device Node + * @addr: Pointer to a six-byte array for the result * * Search the device tree for the best MAC address to use. 'mac-address' is * checked first, because that is supposed to contain to "most recent" MAC @@ -101,24 +119,27 @@ static const void *of_get_mac_addr_nvmem(struct device_node *np) * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists * but is all zeros. * - * Return: Will be a valid pointer on success and ERR_PTR in case of error. + * Return: 0 on success and errno in case of error. */ -const void *of_get_mac_address(struct device_node *np) +int of_get_mac_address(struct device_node *np, u8 *addr) { - const void *addr; + int ret; + + if (!np) + return -ENODEV; - addr = of_get_mac_addr(np, "mac-address"); - if (addr) - return addr; + ret = of_get_mac_addr(np, "mac-address", addr); + if (!ret) + return 0; - addr = of_get_mac_addr(np, "local-mac-address"); - if (addr) - return addr; + ret = of_get_mac_addr(np, "local-mac-address", addr); + if (!ret) + return 0; - addr = of_get_mac_addr(np, "address"); - if (addr) - return addr; + ret = of_get_mac_addr(np, "address", addr); + if (!ret) + return 0; - return of_get_mac_addr_nvmem(np); + return of_get_mac_addr_nvmem(np, addr); } EXPORT_SYMBOL(of_get_mac_address); diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 4afd4ee4f7f0..afc06e6ce115 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -31,6 +31,7 @@ int pci_iov_virtfn_devfn(struct pci_dev *dev, int vf_id) return (dev->devfn + dev->sriov->offset + dev->sriov->stride * vf_id) & 0xff; } +EXPORT_SYMBOL_GPL(pci_iov_virtfn_devfn); /* * Per SR-IOV spec sec 3.3.10 and 3.3.11, First VF Offset and VF Stride may @@ -157,6 +158,92 @@ failed: return rc; } +#ifdef CONFIG_PCI_MSI +static ssize_t sriov_vf_total_msix_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + u32 vf_total_msix = 0; + + device_lock(dev); + if (!pdev->driver || !pdev->driver->sriov_get_vf_total_msix) + goto unlock; + + vf_total_msix = pdev->driver->sriov_get_vf_total_msix(pdev); +unlock: + device_unlock(dev); + return sysfs_emit(buf, "%u\n", vf_total_msix); +} +static DEVICE_ATTR_RO(sriov_vf_total_msix); + +static ssize_t sriov_vf_msix_count_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *vf_dev = to_pci_dev(dev); + struct pci_dev *pdev = pci_physfn(vf_dev); + int val, ret; + + ret = kstrtoint(buf, 0, &val); + if (ret) + return ret; + + if (val < 0) + return -EINVAL; + + device_lock(&pdev->dev); + if (!pdev->driver || !pdev->driver->sriov_set_msix_vec_count) { + ret = -EOPNOTSUPP; + goto err_pdev; + } + + device_lock(&vf_dev->dev); + if (vf_dev->driver) { + /* + * A driver is already attached to this VF and has configured + * itself based on the current MSI-X vector count. Changing + * the vector size could mess up the driver, so block it. + */ + ret = -EBUSY; + goto err_dev; + } + + ret = pdev->driver->sriov_set_msix_vec_count(vf_dev, val); + +err_dev: + device_unlock(&vf_dev->dev); +err_pdev: + device_unlock(&pdev->dev); + return ret ? : count; +} +static DEVICE_ATTR_WO(sriov_vf_msix_count); +#endif + +static struct attribute *sriov_vf_dev_attrs[] = { +#ifdef CONFIG_PCI_MSI + &dev_attr_sriov_vf_msix_count.attr, +#endif + NULL, +}; + +static umode_t sriov_vf_attrs_are_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct pci_dev *pdev = to_pci_dev(dev); + + if (!pdev->is_virtfn) + return 0; + + return a->mode; +} + +const struct attribute_group sriov_vf_dev_attr_group = { + .attrs = sriov_vf_dev_attrs, + .is_visible = sriov_vf_attrs_are_visible, +}; + int pci_iov_add_virtfn(struct pci_dev *dev, int id) { int i; @@ -400,18 +487,21 @@ static DEVICE_ATTR_RO(sriov_stride); static DEVICE_ATTR_RO(sriov_vf_device); static DEVICE_ATTR_RW(sriov_drivers_autoprobe); -static struct attribute *sriov_dev_attrs[] = { +static struct attribute *sriov_pf_dev_attrs[] = { &dev_attr_sriov_totalvfs.attr, &dev_attr_sriov_numvfs.attr, &dev_attr_sriov_offset.attr, &dev_attr_sriov_stride.attr, &dev_attr_sriov_vf_device.attr, &dev_attr_sriov_drivers_autoprobe.attr, +#ifdef CONFIG_PCI_MSI + &dev_attr_sriov_vf_total_msix.attr, +#endif NULL, }; -static umode_t sriov_attrs_are_visible(struct kobject *kobj, - struct attribute *a, int n) +static umode_t sriov_pf_attrs_are_visible(struct kobject *kobj, + struct attribute *a, int n) { struct device *dev = kobj_to_dev(kobj); @@ -421,9 +511,9 @@ static umode_t sriov_attrs_are_visible(struct kobject *kobj, return a->mode; } -const struct attribute_group sriov_dev_attr_group = { - .attrs = sriov_dev_attrs, - .is_visible = sriov_attrs_are_visible, +const struct attribute_group sriov_pf_dev_attr_group = { + .attrs = sriov_pf_dev_attrs, + .is_visible = sriov_pf_attrs_are_visible, }; int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs) diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index f8afd54ca3e1..a6b8fbbba6d2 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -1567,7 +1567,8 @@ static const struct attribute_group *pci_dev_attr_groups[] = { &pci_dev_attr_group, &pci_dev_hp_attr_group, #ifdef CONFIG_PCI_IOV - &sriov_dev_attr_group, + &sriov_pf_dev_attr_group, + &sriov_vf_dev_attr_group, #endif &pci_bridge_attr_group, &pcie_dev_attr_group, diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index ef7c4661314f..afb87b917f07 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -501,7 +501,8 @@ void pci_iov_update_resource(struct pci_dev *dev, int resno); resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno); void pci_restore_iov_state(struct pci_dev *dev); int pci_iov_bus_range(struct pci_bus *bus); -extern const struct attribute_group sriov_dev_attr_group; +extern const struct attribute_group sriov_pf_dev_attr_group; +extern const struct attribute_group sriov_vf_dev_attr_group; #else static inline int pci_iov_init(struct pci_dev *dev) { diff --git a/drivers/phy/phy-core-mipi-dphy.c b/drivers/phy/phy-core-mipi-dphy.c index 14e0551cd319..77fe65367ce5 100644 --- a/drivers/phy/phy-core-mipi-dphy.c +++ b/drivers/phy/phy-core-mipi-dphy.c @@ -12,8 +12,6 @@ #include <linux/phy/phy.h> #include <linux/phy/phy-mipi-dphy.h> -#define PSEC_PER_SEC 1000000000000LL - /* * Minimum D-PHY timings based on MIPI D-PHY specification. Derived * from the valid ranges specified in Section 6.9, Table 14, Page 41 diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c index 8af8c6c5cc02..347dc79a18c1 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c @@ -11,16 +11,16 @@ #include <linux/clk-provider.h> #include <linux/delay.h> #include <linux/init.h> +#include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/reset.h> +#include <linux/time64.h> + #include <linux/phy/phy.h> #include <linux/phy/phy-mipi-dphy.h> -#include <linux/pm_runtime.h> -#include <linux/mfd/syscon.h> - -#define PSEC_PER_SEC 1000000000000LL #define UPDATE(x, h, l) (((x) << (l)) & GENMASK((h), (l))) diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c index 75463c2e2b86..fa636951169e 100644 --- a/drivers/ptp/ptp_clockmatrix.c +++ b/drivers/ptp/ptp_clockmatrix.c @@ -1404,8 +1404,8 @@ static int idtcm_set_pll_mode(struct idtcm_channel *channel, /* PTP Hardware Clock interface */ -/** - * @brief Maximum absolute value for write phase offset in picoseconds +/* + * Maximum absolute value for write phase offset in picoseconds * * Destination signed register is 32-bit register in resolution of 50ps * diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c index ce10ecd41ba0..a17e8cc642c5 100644 --- a/drivers/ptp/ptp_pch.c +++ b/drivers/ptp/ptp_pch.c @@ -18,6 +18,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/ptp_clock_kernel.h> +#include <linux/ptp_pch.h> #include <linux/slab.h> #define STATION_ADDR_LEN 20 @@ -36,7 +37,8 @@ enum pch_status { PCH_FAILED, PCH_UNSUPPORTED, }; -/** + +/* * struct pch_ts_regs - IEEE 1588 registers */ struct pch_ts_regs { @@ -102,7 +104,8 @@ struct pch_ts_regs { #define PCH_IEEE1588_ETH (1 << 0) #define PCH_IEEE1588_CAN (1 << 1) -/** + +/* * struct pch_dev - Driver private data */ struct pch_dev { @@ -119,7 +122,7 @@ struct pch_dev { spinlock_t register_lock; }; -/** +/* * struct pch_params - 1588 module parameter */ struct pch_params { @@ -179,17 +182,6 @@ static inline void pch_block_reset(struct pch_dev *chip) iowrite32(val, (&chip->regs->control)); } -u32 pch_ch_control_read(struct pci_dev *pdev) -{ - struct pch_dev *chip = pci_get_drvdata(pdev); - u32 val; - - val = ioread32(&chip->regs->ch_control); - - return val; -} -EXPORT_SYMBOL(pch_ch_control_read); - void pch_ch_control_write(struct pci_dev *pdev, u32 val) { struct pch_dev *chip = pci_get_drvdata(pdev); @@ -296,6 +288,7 @@ static void pch_reset(struct pch_dev *chip) * IEEE 1588 hardware when looking at PTP * traffic on the ethernet interface * @addr: dress which contain the column separated address to be used. + * @pdev: PCI device. */ int pch_set_station_address(u8 *addr, struct pci_dev *pdev) { diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 175b82b98f36..a1f08e9aa064 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1429,7 +1429,7 @@ static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf) static void qeth_tx_complete_pending_bufs(struct qeth_card *card, struct qeth_qdio_out_q *queue, - bool drain) + bool drain, int budget) { struct qeth_qdio_out_buffer *buf, *tmp; @@ -1441,7 +1441,7 @@ static void qeth_tx_complete_pending_bufs(struct qeth_card *card, if (drain) qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR); - qeth_tx_complete_buf(buf, drain, 0); + qeth_tx_complete_buf(buf, drain, budget); list_del(&buf->list_entry); qeth_free_out_buf(buf); @@ -1453,7 +1453,7 @@ static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free) { int j; - qeth_tx_complete_pending_bufs(q->card, q, true); + qeth_tx_complete_pending_bufs(q->card, q, true, 0); for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { if (!q->bufs[j]) @@ -2566,11 +2566,12 @@ static int qeth_ulp_setup(struct qeth_card *card) return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL); } -static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) +static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx, + gfp_t gfp) { struct qeth_qdio_out_buffer *newbuf; - newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC); + newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, gfp); if (!newbuf) return -ENOMEM; @@ -2605,7 +2606,7 @@ static struct qeth_qdio_out_q *qeth_alloc_output_queue(void) goto err_qdio_bufs; for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { - if (qeth_init_qdio_out_buf(q, i)) + if (qeth_alloc_out_buf(q, i, GFP_KERNEL)) goto err_out_bufs; } @@ -6080,7 +6081,8 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue, /* Prepare the queue slot for immediate re-use: */ qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements); - if (qeth_init_qdio_out_buf(queue, bidx)) { + if (qeth_alloc_out_buf(queue, bidx, + GFP_ATOMIC)) { QETH_CARD_TEXT(card, 2, "outofbuf"); qeth_schedule_recovery(card); } @@ -6144,7 +6146,7 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget) unsigned int bytes = 0; int completed; - qeth_tx_complete_pending_bufs(card, queue, false); + qeth_tx_complete_pending_bufs(card, queue, false, budget); if (qeth_out_queue_is_empty(queue)) { napi_complete(napi); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index dd441eaec66e..d308ff744a29 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1098,8 +1098,9 @@ walk_ipv6: tmp.disp_flag = QETH_DISP_ADDR_ADD; tmp.is_multicast = 1; - read_lock_bh(&in6_dev->lock); - for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) { + for (im6 = rtnl_dereference(in6_dev->mc_list); + im6; + im6 = rtnl_dereference(im6->next)) { tmp.u.a6.addr = im6->mca_addr; ipm = qeth_l3_find_addr_by_ip(card, &tmp); @@ -1117,30 +1118,11 @@ walk_ipv6: qeth_l3_ipaddr_hash(ipm)); } - read_unlock_bh(&in6_dev->lock); out: return 0; } -static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, - __be16 proto, u16 vid) -{ - struct qeth_card *card = dev->ml_priv; - - QETH_CARD_TEXT_(card, 4, "aid:%d", vid); - return 0; -} - -static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, - __be16 proto, u16 vid) -{ - struct qeth_card *card = dev->ml_priv; - - QETH_CARD_TEXT_(card, 4, "kid:%d", vid); - return 0; -} - static void qeth_l3_set_promisc_mode(struct qeth_card *card) { bool enable = card->dev->flags & IFF_PROMISC; @@ -1861,8 +1843,6 @@ static const struct net_device_ops qeth_l3_netdev_ops = { .ndo_do_ioctl = qeth_do_ioctl, .ndo_fix_features = qeth_fix_features, .ndo_set_features = qeth_set_features, - .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, .ndo_tx_timeout = qeth_tx_timeout, }; @@ -1878,8 +1858,6 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { .ndo_do_ioctl = qeth_do_ioctl, .ndo_fix_features = qeth_fix_features, .ndo_set_features = qeth_set_features, - .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, .ndo_tx_timeout = qeth_tx_timeout, .ndo_neigh_setup = qeth_l3_neigh_setup, }; @@ -1933,8 +1911,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) card->dev->needed_headroom = headroom; card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_VLAN_CTAG_RX; netif_keep_dst(card->dev); if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) diff --git a/drivers/scsi/aacraid/TODO b/drivers/scsi/aacraid/TODO deleted file mode 100644 index 78dc863eff4f..000000000000 --- a/drivers/scsi/aacraid/TODO +++ /dev/null @@ -1,3 +0,0 @@ -o Testing -o More testing -o I/O size increase diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 808e78d6cd98..b7ae5bdc4eb5 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -76,8 +76,6 @@ source "drivers/staging/clocking-wizard/Kconfig" source "drivers/staging/fbtft/Kconfig" -source "drivers/staging/fsl-dpaa2/Kconfig" - source "drivers/staging/most/Kconfig" source "drivers/staging/ks7010/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 5a871f0ff2f4..075c979bfe7c 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -28,7 +28,6 @@ obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/ obj-$(CONFIG_UNISYSSPAR) += unisys/ obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/ obj-$(CONFIG_FB_TFT) += fbtft/ -obj-$(CONFIG_FSL_DPAA2) += fsl-dpaa2/ obj-$(CONFIG_MOST) += most/ obj-$(CONFIG_KS7010) += ks7010/ obj-$(CONFIG_GREYBUS) += greybus/ diff --git a/drivers/staging/fsl-dpaa2/Kconfig b/drivers/staging/fsl-dpaa2/Kconfig deleted file mode 100644 index 244237bb068a..000000000000 --- a/drivers/staging/fsl-dpaa2/Kconfig +++ /dev/null @@ -1,19 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Freescale DataPath Acceleration Architecture Gen2 (DPAA2) drivers -# - -config FSL_DPAA2 - bool "Freescale DPAA2 devices" - depends on FSL_MC_BUS - help - Build drivers for Freescale DataPath Acceleration - Architecture (DPAA2) family of SoCs. - -config FSL_DPAA2_ETHSW - tristate "Freescale DPAA2 Ethernet Switch" - depends on FSL_DPAA2 - depends on NET_SWITCHDEV - help - Driver for Freescale DPAA2 Ethernet Switch. Select - BRIDGE to have support for bridge tools. diff --git a/drivers/staging/fsl-dpaa2/Makefile b/drivers/staging/fsl-dpaa2/Makefile deleted file mode 100644 index 9645db7689c9..000000000000 --- a/drivers/staging/fsl-dpaa2/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Freescale DataPath Acceleration Architecture Gen2 (DPAA2) drivers -# - -obj-$(CONFIG_FSL_DPAA2_ETHSW) += ethsw/ diff --git a/drivers/staging/fsl-dpaa2/ethsw/Makefile b/drivers/staging/fsl-dpaa2/ethsw/Makefile deleted file mode 100644 index f6f2cf798faf..000000000000 --- a/drivers/staging/fsl-dpaa2/ethsw/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Makefile for the Freescale DPAA2 Ethernet Switch -# -# Copyright 2014-2017 Freescale Semiconductor Inc. -# Copyright 2017-2018 NXP - -obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o - -dpaa2-ethsw-objs := ethsw.o ethsw-ethtool.o dpsw.o diff --git a/drivers/staging/fsl-dpaa2/ethsw/README b/drivers/staging/fsl-dpaa2/ethsw/README deleted file mode 100644 index b48dcbf7c5fb..000000000000 --- a/drivers/staging/fsl-dpaa2/ethsw/README +++ /dev/null @@ -1,106 +0,0 @@ -DPAA2 Ethernet Switch driver -============================ - -This file provides documentation for the DPAA2 Ethernet Switch driver - - -Contents -======== - Supported Platforms - Architecture Overview - Creating an Ethernet Switch - Features - - - Supported Platforms -=================== -This driver provides networking support for Freescale LS2085A, LS2088A -DPAA2 SoCs. - - -Architecture Overview -===================== -The Ethernet Switch in the DPAA2 architecture consists of several hardware -resources that provide the functionality. These are allocated and -configured via the Management Complex (MC) portals. MC abstracts most of -these resources as DPAA2 objects and exposes ABIs through which they can -be configured and controlled. - -For a more detailed description of the DPAA2 architecture and its object -abstractions see: - drivers/staging/fsl-mc/README.txt - -The Ethernet Switch is built on top of a Datapath Switch (DPSW) object. - -Configuration interface: - - --------------------- - | DPAA2 Switch driver | - --------------------- - . - . - ---------- - | DPSW API | - ---------- - . software - ================= . ============== - . hardware - --------------------- - | MC hardware portals | - --------------------- - . - . - ------ - | DPSW | - ------ - -Driver uses the switch device driver model and exposes each switch port as -a network interface, which can be included in a bridge. Traffic switched -between ports is offloaded into the hardware. Exposed network interfaces -are not used for I/O, they are used just for configuration. This -limitation is going to be addressed in the future. - -The DPSW can have ports connected to DPNIs or to PHYs via DPMACs. - - - [ethA] [ethB] [ethC] [ethD] [ethE] [ethF] - : : : : : : - : : : : : : -[eth drv] [eth drv] [ ethsw drv ] - : : : : : : kernel -======================================================================== - : : : : : : hardware - [DPNI] [DPNI] [============= DPSW =================] - | | | | | | - | ---------- | [DPMAC] [DPMAC] - ------------------------------- | | - | | - [PHY] [PHY] - -For a more detailed description of the Ethernet switch device driver model -see: - Documentation/networking/switchdev.rst - -Creating an Ethernet Switch -=========================== -A device is created for the switch objects probed on the MC bus. Each DPSW -has a number of properties which determine the configuration options and -associated hardware resources. - -A DPSW object (and the other DPAA2 objects needed for a DPAA2 switch) can -be added to a container on the MC bus in one of two ways: statically, -through a Datapath Layout Binary file (DPL) that is parsed by MC at boot -time; or created dynamically at runtime, via the DPAA2 objects APIs. - -Features -======== -Driver configures DPSW to perform hardware switching offload of -unicast/multicast/broadcast (VLAN tagged or untagged) traffic between its -ports. - -It allows configuration of hardware learning, flooding, multicast groups, -port VLAN configuration and STP state. - -Static entries can be added/removed from the FDB. - -Hardware statistics for each port are provided through ethtool -S option. diff --git a/drivers/staging/fsl-dpaa2/ethsw/TODO b/drivers/staging/fsl-dpaa2/ethsw/TODO deleted file mode 100644 index 4d46857b0b2b..000000000000 --- a/drivers/staging/fsl-dpaa2/ethsw/TODO +++ /dev/null @@ -1,13 +0,0 @@ -* Add I/O capabilities on switch port netdevices. This will allow control -traffic to reach the CPU. -* Add ACL to redirect control traffic to CPU. -* Add support for multiple FDBs and switch port partitioning -* MC firmware uprev; the DPAA2 objects used by the Ethernet Switch driver -need to be kept in sync with binary interface changes in MC -* refine README file -* cleanup - -NOTE: At least first three of the above are required before getting the -DPAA2 Ethernet Switch driver out of staging. Another requirement is that -dpio driver is moved to drivers/soc (this is required for I/O). - diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw.h b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h deleted file mode 100644 index 9cfd8a8e0197..000000000000 --- a/drivers/staging/fsl-dpaa2/ethsw/dpsw.h +++ /dev/null @@ -1,594 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright 2017-2018 NXP - * - */ - -#ifndef __FSL_DPSW_H -#define __FSL_DPSW_H - -/* Data Path L2-Switch API - * Contains API for handling DPSW topology and functionality - */ - -struct fsl_mc_io; - -/** - * DPSW general definitions - */ - -/** - * Maximum number of traffic class priorities - */ -#define DPSW_MAX_PRIORITIES 8 -/** - * Maximum number of interfaces - */ -#define DPSW_MAX_IF 64 - -int dpsw_open(struct fsl_mc_io *mc_io, - u32 cmd_flags, - int dpsw_id, - u16 *token); - -int dpsw_close(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token); - -/** - * DPSW options - */ - -/** - * Disable flooding - */ -#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL -/** - * Disable Multicast - */ -#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL -/** - * Support control interface - */ -#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL -/** - * Disable flooding metering - */ -#define DPSW_OPT_FLOODING_METERING_DIS 0x0000000000000020ULL -/** - * Enable metering - */ -#define DPSW_OPT_METERING_EN 0x0000000000000040ULL - -/** - * enum dpsw_component_type - component type of a bridge - * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an - * enterprise VLAN bridge or of a Provider Bridge used - * to process C-tagged frames - * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a - * Provider Bridge - * - */ -enum dpsw_component_type { - DPSW_COMPONENT_TYPE_C_VLAN = 0, - DPSW_COMPONENT_TYPE_S_VLAN -}; - -int dpsw_enable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token); - -int dpsw_disable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token); - -int dpsw_reset(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token); - -/** - * DPSW IRQ Index and Events - */ - -#define DPSW_IRQ_INDEX_IF 0x0000 -#define DPSW_IRQ_INDEX_L2SW 0x0001 - -/** - * IRQ event - Indicates that the link state changed - */ -#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001 - -/** - * struct dpsw_irq_cfg - IRQ configuration - * @addr: Address that must be written to signal a message-based interrupt - * @val: Value to write into irq_addr address - * @irq_num: A user defined number associated with this IRQ - */ -struct dpsw_irq_cfg { - u64 addr; - u32 val; - int irq_num; -}; - -int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u8 en); - -int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 mask); - -int dpsw_get_irq_status(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 *status); - -int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 status); - -/** - * struct dpsw_attr - Structure representing DPSW attributes - * @id: DPSW object ID - * @options: Enable/Disable DPSW features - * @max_vlans: Maximum Number of VLANs - * @max_meters_per_if: Number of meters per interface - * @max_fdbs: Maximum Number of FDBs - * @max_fdb_entries: Number of FDB entries for default FDB table; - * 0 - indicates default 1024 entries. - * @fdb_aging_time: Default FDB aging time for default FDB table; - * 0 - indicates default 300 seconds - * @max_fdb_mc_groups: Number of multicast groups in each FDB table; - * 0 - indicates default 32 - * @mem_size: DPSW frame storage memory size - * @num_ifs: Number of interfaces - * @num_vlans: Current number of VLANs - * @num_fdbs: Current number of FDBs - * @component_type: Component type of this bridge - */ -struct dpsw_attr { - int id; - u64 options; - u16 max_vlans; - u8 max_meters_per_if; - u8 max_fdbs; - u16 max_fdb_entries; - u16 fdb_aging_time; - u16 max_fdb_mc_groups; - u16 num_ifs; - u16 mem_size; - u16 num_vlans; - u8 num_fdbs; - enum dpsw_component_type component_type; -}; - -int dpsw_get_attributes(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - struct dpsw_attr *attr); - -/** - * enum dpsw_action - Action selection for special/control frames - * @DPSW_ACTION_DROP: Drop frame - * @DPSW_ACTION_REDIRECT: Redirect frame to control port - */ -enum dpsw_action { - DPSW_ACTION_DROP = 0, - DPSW_ACTION_REDIRECT = 1 -}; - -/** - * Enable auto-negotiation - */ -#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL -/** - * Enable half-duplex mode - */ -#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL -/** - * Enable pause frames - */ -#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL -/** - * Enable a-symmetric pause frames - */ -#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL - -/** - * struct dpsw_link_cfg - Structure representing DPSW link configuration - * @rate: Rate - * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values - */ -struct dpsw_link_cfg { - u32 rate; - u64 options; -}; - -int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - struct dpsw_link_cfg *cfg); -/** - * struct dpsw_link_state - Structure representing DPSW link state - * @rate: Rate - * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values - * @up: 0 - covers two cases: down and disconnected, 1 - up - */ -struct dpsw_link_state { - u32 rate; - u64 options; - u8 up; -}; - -int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - struct dpsw_link_state *state); - -int dpsw_if_set_flooding(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - u8 en); - -int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - u8 en); - -/** - * struct dpsw_tci_cfg - Tag Control Information (TCI) configuration - * @pcp: Priority Code Point (PCP): a 3-bit field which refers - * to the IEEE 802.1p priority - * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used - * separately or in conjunction with PCP to indicate frames - * eligible to be dropped in the presence of congestion - * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN - * to which the frame belongs. The hexadecimal values - * of 0x000 and 0xFFF are reserved; - * all other values may be used as VLAN identifiers, - * allowing up to 4,094 VLANs - */ -struct dpsw_tci_cfg { - u8 pcp; - u8 dei; - u16 vlan_id; -}; - -int dpsw_if_set_tci(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - const struct dpsw_tci_cfg *cfg); - -int dpsw_if_get_tci(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - struct dpsw_tci_cfg *cfg); - -/** - * enum dpsw_stp_state - Spanning Tree Protocol (STP) states - * @DPSW_STP_STATE_BLOCKING: Blocking state - * @DPSW_STP_STATE_LISTENING: Listening state - * @DPSW_STP_STATE_LEARNING: Learning state - * @DPSW_STP_STATE_FORWARDING: Forwarding state - * - */ -enum dpsw_stp_state { - DPSW_STP_STATE_DISABLED = 0, - DPSW_STP_STATE_LISTENING = 1, - DPSW_STP_STATE_LEARNING = 2, - DPSW_STP_STATE_FORWARDING = 3, - DPSW_STP_STATE_BLOCKING = 0 -}; - -/** - * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration - * @vlan_id: VLAN ID STP state - * @state: STP state - */ -struct dpsw_stp_cfg { - u16 vlan_id; - enum dpsw_stp_state state; -}; - -int dpsw_if_set_stp(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - const struct dpsw_stp_cfg *cfg); - -/** - * enum dpsw_accepted_frames - Types of frames to accept - * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and - * priority tagged frames - * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or - * Priority-Tagged frames received on this interface. - * - */ -enum dpsw_accepted_frames { - DPSW_ADMIT_ALL = 1, - DPSW_ADMIT_ONLY_VLAN_TAGGED = 3 -}; - -/** - * enum dpsw_counter - Counters types - * @DPSW_CNT_ING_FRAME: Counts ingress frames - * @DPSW_CNT_ING_BYTE: Counts ingress bytes - * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames - * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame - * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames - * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes - * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames - * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes - * @DPSW_CNT_EGR_FRAME: Counts egress frames - * @DPSW_CNT_EGR_BYTE: Counts egress bytes - * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames - * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames - * @DPSW_CNT_ING_NO_BUFF_DISCARD: Counts ingress no buffer discarded frames - */ -enum dpsw_counter { - DPSW_CNT_ING_FRAME = 0x0, - DPSW_CNT_ING_BYTE = 0x1, - DPSW_CNT_ING_FLTR_FRAME = 0x2, - DPSW_CNT_ING_FRAME_DISCARD = 0x3, - DPSW_CNT_ING_MCAST_FRAME = 0x4, - DPSW_CNT_ING_MCAST_BYTE = 0x5, - DPSW_CNT_ING_BCAST_FRAME = 0x6, - DPSW_CNT_ING_BCAST_BYTES = 0x7, - DPSW_CNT_EGR_FRAME = 0x8, - DPSW_CNT_EGR_BYTE = 0x9, - DPSW_CNT_EGR_FRAME_DISCARD = 0xa, - DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb, - DPSW_CNT_ING_NO_BUFF_DISCARD = 0xc, -}; - -int dpsw_if_get_counter(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - enum dpsw_counter type, - u64 *counter); - -int dpsw_if_enable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id); - -int dpsw_if_disable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id); - -int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - u16 frame_length); - -/** - * struct dpsw_vlan_cfg - VLAN Configuration - * @fdb_id: Forwarding Data Base - */ -struct dpsw_vlan_cfg { - u16 fdb_id; -}; - -int dpsw_vlan_add(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 vlan_id, - const struct dpsw_vlan_cfg *cfg); - -/** - * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces - * @num_ifs: The number of interfaces that are assigned to the egress - * list for this VLAN - * @if_id: The set of interfaces that are - * assigned to the egress list for this VLAN - */ -struct dpsw_vlan_if_cfg { - u16 num_ifs; - u16 if_id[DPSW_MAX_IF]; -}; - -int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 vlan_id, - const struct dpsw_vlan_if_cfg *cfg); - -int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 vlan_id, - const struct dpsw_vlan_if_cfg *cfg); - -int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 vlan_id, - const struct dpsw_vlan_if_cfg *cfg); - -int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 vlan_id, - const struct dpsw_vlan_if_cfg *cfg); - -int dpsw_vlan_remove(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 vlan_id); - -/** - * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic - * @DPSW_FDB_ENTRY_STATIC: Static entry - * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry - */ -enum dpsw_fdb_entry_type { - DPSW_FDB_ENTRY_STATIC = 0, - DPSW_FDB_ENTRY_DINAMIC = 1 -}; - -/** - * struct dpsw_fdb_unicast_cfg - Unicast entry configuration - * @type: Select static or dynamic entry - * @mac_addr: MAC address - * @if_egress: Egress interface ID - */ -struct dpsw_fdb_unicast_cfg { - enum dpsw_fdb_entry_type type; - u8 mac_addr[6]; - u16 if_egress; -}; - -int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - const struct dpsw_fdb_unicast_cfg *cfg); - -int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - const struct dpsw_fdb_unicast_cfg *cfg); - -#define DPSW_FDB_ENTRY_TYPE_DYNAMIC BIT(0) -#define DPSW_FDB_ENTRY_TYPE_UNICAST BIT(1) - -/** - * struct fdb_dump_entry - fdb snapshot entry - * @mac_addr: MAC address - * @type: bit0 - DINAMIC(1)/STATIC(0), bit1 - UNICAST(1)/MULTICAST(0) - * @if_info: unicast - egress interface, multicast - number of egress interfaces - * @if_mask: multicast - egress interface mask - */ -struct fdb_dump_entry { - u8 mac_addr[6]; - u8 type; - u8 if_info; - u8 if_mask[8]; -}; - -int dpsw_fdb_dump(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - u64 iova_addr, - u32 iova_size, - u16 *num_entries); - -/** - * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration - * @type: Select static or dynamic entry - * @mac_addr: MAC address - * @num_ifs: Number of external and internal interfaces - * @if_id: Egress interface IDs - */ -struct dpsw_fdb_multicast_cfg { - enum dpsw_fdb_entry_type type; - u8 mac_addr[6]; - u16 num_ifs; - u16 if_id[DPSW_MAX_IF]; -}; - -int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - const struct dpsw_fdb_multicast_cfg *cfg); - -int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - const struct dpsw_fdb_multicast_cfg *cfg); - -/** - * enum dpsw_fdb_learning_mode - Auto-learning modes - * @DPSW_FDB_LEARNING_MODE_DIS: Disable Auto-learning - * @DPSW_FDB_LEARNING_MODE_HW: Enable HW auto-Learning - * @DPSW_FDB_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU - * @DPSW_FDB_LEARNING_MODE_SECURE: Enable secure learning by CPU - * - * NONE - SECURE LEARNING - * SMAC found DMAC found CTLU Action - * v v Forward frame to - * 1. DMAC destination - * - v Forward frame to - * 1. DMAC destination - * 2. Control interface - * v - Forward frame to - * 1. Flooding list of interfaces - * - - Forward frame to - * 1. Flooding list of interfaces - * 2. Control interface - * SECURE LEARING - * SMAC found DMAC found CTLU Action - * v v Forward frame to - * 1. DMAC destination - * - v Forward frame to - * 1. Control interface - * v - Forward frame to - * 1. Flooding list of interfaces - * - - Forward frame to - * 1. Control interface - */ -enum dpsw_fdb_learning_mode { - DPSW_FDB_LEARNING_MODE_DIS = 0, - DPSW_FDB_LEARNING_MODE_HW = 1, - DPSW_FDB_LEARNING_MODE_NON_SECURE = 2, - DPSW_FDB_LEARNING_MODE_SECURE = 3 -}; - -int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - enum dpsw_fdb_learning_mode mode); - -/** - * struct dpsw_fdb_attr - FDB Attributes - * @max_fdb_entries: Number of FDB entries - * @fdb_aging_time: Aging time in seconds - * @learning_mode: Learning mode - * @num_fdb_mc_groups: Current number of multicast groups - * @max_fdb_mc_groups: Maximum number of multicast groups - */ -struct dpsw_fdb_attr { - u16 max_fdb_entries; - u16 fdb_aging_time; - enum dpsw_fdb_learning_mode learning_mode; - u16 num_fdb_mc_groups; - u16 max_fdb_mc_groups; -}; - -int dpsw_get_api_version(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 *major_ver, - u16 *minor_ver); - -int dpsw_if_get_port_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, - u16 if_id, u8 mac_addr[6]); - -int dpsw_if_get_primary_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags, - u16 token, u16 if_id, u8 mac_addr[6]); - -int dpsw_if_set_primary_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags, - u16 token, u16 if_id, u8 mac_addr[6]); - -#endif /* __FSL_DPSW_H */ diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c deleted file mode 100644 index 703055e063ff..000000000000 --- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c +++ /dev/null @@ -1,1839 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * DPAA2 Ethernet Switch driver - * - * Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright 2017-2018 NXP - * - */ - -#include <linux/module.h> - -#include <linux/interrupt.h> -#include <linux/msi.h> -#include <linux/kthread.h> -#include <linux/workqueue.h> - -#include <linux/fsl/mc.h> - -#include "ethsw.h" - -/* Minimal supported DPSW version */ -#define DPSW_MIN_VER_MAJOR 8 -#define DPSW_MIN_VER_MINOR 1 - -#define DEFAULT_VLAN_ID 1 - -static int dpaa2_switch_add_vlan(struct ethsw_core *ethsw, u16 vid) -{ - int err; - - struct dpsw_vlan_cfg vcfg = { - .fdb_id = 0, - }; - - err = dpsw_vlan_add(ethsw->mc_io, 0, - ethsw->dpsw_handle, vid, &vcfg); - if (err) { - dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err); - return err; - } - ethsw->vlans[vid] = ETHSW_VLAN_MEMBER; - - return 0; -} - -static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv) -{ - struct net_device *netdev = port_priv->netdev; - struct dpsw_link_state state; - int err; - - err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, &state); - if (err) { - netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); - return true; - } - - WARN_ONCE(state.up > 1, "Garbage read into link_state"); - - return state.up ? true : false; -} - -static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid) -{ - struct ethsw_core *ethsw = port_priv->ethsw_data; - struct net_device *netdev = port_priv->netdev; - struct dpsw_tci_cfg tci_cfg = { 0 }; - bool up; - int err, ret; - - err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, - port_priv->idx, &tci_cfg); - if (err) { - netdev_err(netdev, "dpsw_if_get_tci err %d\n", err); - return err; - } - - tci_cfg.vlan_id = pvid; - - /* Interface needs to be down to change PVID */ - up = dpaa2_switch_port_is_up(port_priv); - if (up) { - err = dpsw_if_disable(ethsw->mc_io, 0, - ethsw->dpsw_handle, - port_priv->idx); - if (err) { - netdev_err(netdev, "dpsw_if_disable err %d\n", err); - return err; - } - } - - err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, - port_priv->idx, &tci_cfg); - if (err) { - netdev_err(netdev, "dpsw_if_set_tci err %d\n", err); - goto set_tci_error; - } - - /* Delete previous PVID info and mark the new one */ - port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID; - port_priv->vlans[pvid] |= ETHSW_VLAN_PVID; - port_priv->pvid = pvid; - -set_tci_error: - if (up) { - ret = dpsw_if_enable(ethsw->mc_io, 0, - ethsw->dpsw_handle, - port_priv->idx); - if (ret) { - netdev_err(netdev, "dpsw_if_enable err %d\n", ret); - return ret; - } - } - - return err; -} - -static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv, - u16 vid, u16 flags) -{ - struct ethsw_core *ethsw = port_priv->ethsw_data; - struct net_device *netdev = port_priv->netdev; - struct dpsw_vlan_if_cfg vcfg; - int err; - - if (port_priv->vlans[vid]) { - netdev_warn(netdev, "VLAN %d already configured\n", vid); - return -EEXIST; - } - - vcfg.num_ifs = 1; - vcfg.if_id[0] = port_priv->idx; - err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg); - if (err) { - netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err); - return err; - } - - port_priv->vlans[vid] = ETHSW_VLAN_MEMBER; - - if (flags & BRIDGE_VLAN_INFO_UNTAGGED) { - err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0, - ethsw->dpsw_handle, - vid, &vcfg); - if (err) { - netdev_err(netdev, - "dpsw_vlan_add_if_untagged err %d\n", err); - return err; - } - port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED; - } - - if (flags & BRIDGE_VLAN_INFO_PVID) { - err = dpaa2_switch_port_set_pvid(port_priv, vid); - if (err) - return err; - } - - return 0; -} - -static int dpaa2_switch_set_learning(struct ethsw_core *ethsw, bool enable) -{ - enum dpsw_fdb_learning_mode learn_mode; - int err; - - if (enable) - learn_mode = DPSW_FDB_LEARNING_MODE_HW; - else - learn_mode = DPSW_FDB_LEARNING_MODE_DIS; - - err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0, - learn_mode); - if (err) { - dev_err(ethsw->dev, "dpsw_fdb_set_learning_mode err %d\n", err); - return err; - } - ethsw->learning = enable; - - return 0; -} - -static int dpaa2_switch_port_set_flood(struct ethsw_port_priv *port_priv, bool enable) -{ - int err; - - err = dpsw_if_set_flooding(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, enable); - if (err) { - netdev_err(port_priv->netdev, - "dpsw_if_set_flooding err %d\n", err); - return err; - } - port_priv->flood = enable; - - return 0; -} - -static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state) -{ - struct dpsw_stp_cfg stp_cfg = { - .state = state, - }; - int err; - u16 vid; - - if (!netif_running(port_priv->netdev) || state == port_priv->stp_state) - return 0; /* Nothing to do */ - - for (vid = 0; vid <= VLAN_VID_MASK; vid++) { - if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) { - stp_cfg.vlan_id = vid; - err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, &stp_cfg); - if (err) { - netdev_err(port_priv->netdev, - "dpsw_if_set_stp err %d\n", err); - return err; - } - } - } - - port_priv->stp_state = state; - - return 0; -} - -static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid) -{ - struct ethsw_port_priv *ppriv_local = NULL; - int i, err; - - if (!ethsw->vlans[vid]) - return -ENOENT; - - err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid); - if (err) { - dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err); - return err; - } - ethsw->vlans[vid] = 0; - - for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { - ppriv_local = ethsw->ports[i]; - ppriv_local->vlans[vid] = 0; - } - - return 0; -} - -static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv, - const unsigned char *addr) -{ - struct dpsw_fdb_unicast_cfg entry = {0}; - int err; - - entry.if_egress = port_priv->idx; - entry.type = DPSW_FDB_ENTRY_STATIC; - ether_addr_copy(entry.mac_addr, addr); - - err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - 0, &entry); - if (err) - netdev_err(port_priv->netdev, - "dpsw_fdb_add_unicast err %d\n", err); - return err; -} - -static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv, - const unsigned char *addr) -{ - struct dpsw_fdb_unicast_cfg entry = {0}; - int err; - - entry.if_egress = port_priv->idx; - entry.type = DPSW_FDB_ENTRY_STATIC; - ether_addr_copy(entry.mac_addr, addr); - - err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - 0, &entry); - /* Silently discard error for calling multiple times the del command */ - if (err && err != -ENXIO) - netdev_err(port_priv->netdev, - "dpsw_fdb_remove_unicast err %d\n", err); - return err; -} - -static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv, - const unsigned char *addr) -{ - struct dpsw_fdb_multicast_cfg entry = {0}; - int err; - - ether_addr_copy(entry.mac_addr, addr); - entry.type = DPSW_FDB_ENTRY_STATIC; - entry.num_ifs = 1; - entry.if_id[0] = port_priv->idx; - - err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - 0, &entry); - /* Silently discard error for calling multiple times the add command */ - if (err && err != -ENXIO) - netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n", - err); - return err; -} - -static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv, - const unsigned char *addr) -{ - struct dpsw_fdb_multicast_cfg entry = {0}; - int err; - - ether_addr_copy(entry.mac_addr, addr); - entry.type = DPSW_FDB_ENTRY_STATIC; - entry.num_ifs = 1; - entry.if_id[0] = port_priv->idx; - - err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - 0, &entry); - /* Silently discard error for calling multiple times the del command */ - if (err && err != -ENAVAIL) - netdev_err(port_priv->netdev, - "dpsw_fdb_remove_multicast err %d\n", err); - return err; -} - -static int dpaa2_switch_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, const unsigned char *addr, - u16 vid, u16 flags, - struct netlink_ext_ack *extack) -{ - if (is_unicast_ether_addr(addr)) - return dpaa2_switch_port_fdb_add_uc(netdev_priv(dev), - addr); - else - return dpaa2_switch_port_fdb_add_mc(netdev_priv(dev), - addr); -} - -static int dpaa2_switch_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr, u16 vid) -{ - if (is_unicast_ether_addr(addr)) - return dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), - addr); - else - return dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), - addr); -} - -static void dpaa2_switch_port_get_stats(struct net_device *netdev, - struct rtnl_link_stats64 *stats) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - u64 tmp; - int err; - - err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, - DPSW_CNT_ING_FRAME, &stats->rx_packets); - if (err) - goto error; - - err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, - DPSW_CNT_EGR_FRAME, &stats->tx_packets); - if (err) - goto error; - - err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, - DPSW_CNT_ING_BYTE, &stats->rx_bytes); - if (err) - goto error; - - err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, - DPSW_CNT_EGR_BYTE, &stats->tx_bytes); - if (err) - goto error; - - err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, - DPSW_CNT_ING_FRAME_DISCARD, - &stats->rx_dropped); - if (err) - goto error; - - err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, - DPSW_CNT_ING_FLTR_FRAME, - &tmp); - if (err) - goto error; - stats->rx_dropped += tmp; - - err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, - DPSW_CNT_EGR_FRAME_DISCARD, - &stats->tx_dropped); - if (err) - goto error; - - return; - -error: - netdev_err(netdev, "dpsw_if_get_counter err %d\n", err); -} - -static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev, - int attr_id) -{ - return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT); -} - -static int dpaa2_switch_port_get_offload_stats(int attr_id, - const struct net_device *netdev, - void *sp) -{ - switch (attr_id) { - case IFLA_OFFLOAD_XSTATS_CPU_HIT: - dpaa2_switch_port_get_stats((struct net_device *)netdev, sp); - return 0; - } - - return -EINVAL; -} - -static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - int err; - - err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io, - 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, - (u16)ETHSW_L2_MAX_FRM(mtu)); - if (err) { - netdev_err(netdev, - "dpsw_if_set_max_frame_length() err %d\n", err); - return err; - } - - netdev->mtu = mtu; - return 0; -} - -static int dpaa2_switch_port_carrier_state_sync(struct net_device *netdev) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - struct dpsw_link_state state; - int err; - - /* Interrupts are received even though no one issued an 'ifconfig up' - * on the switch interface. Ignore these link state update interrupts - */ - if (!netif_running(netdev)) - return 0; - - err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, &state); - if (err) { - netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); - return err; - } - - WARN_ONCE(state.up > 1, "Garbage read into link_state"); - - if (state.up != port_priv->link_state) { - if (state.up) - netif_carrier_on(netdev); - else - netif_carrier_off(netdev); - port_priv->link_state = state.up; - } - - return 0; -} - -static int dpaa2_switch_port_open(struct net_device *netdev) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - int err; - - /* No need to allow Tx as control interface is disabled */ - netif_tx_stop_all_queues(netdev); - - /* Explicitly set carrier off, otherwise - * netif_carrier_ok() will return true and cause 'ip link show' - * to report the LOWER_UP flag, even though the link - * notification wasn't even received. - */ - netif_carrier_off(netdev); - - err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx); - if (err) { - netdev_err(netdev, "dpsw_if_enable err %d\n", err); - return err; - } - - /* sync carrier state */ - err = dpaa2_switch_port_carrier_state_sync(netdev); - if (err) { - netdev_err(netdev, - "dpaa2_switch_port_carrier_state_sync err %d\n", err); - goto err_carrier_sync; - } - - return 0; - -err_carrier_sync: - dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx); - return err; -} - -static int dpaa2_switch_port_stop(struct net_device *netdev) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - int err; - - err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx); - if (err) { - netdev_err(netdev, "dpsw_if_disable err %d\n", err); - return err; - } - - return 0; -} - -static netdev_tx_t dpaa2_switch_port_dropframe(struct sk_buff *skb, - struct net_device *netdev) -{ - /* we don't support I/O for now, drop the frame */ - dev_kfree_skb_any(skb); - - return NETDEV_TX_OK; -} - -static int dpaa2_switch_port_parent_id(struct net_device *dev, - struct netdev_phys_item_id *ppid) -{ - struct ethsw_port_priv *port_priv = netdev_priv(dev); - - ppid->id_len = 1; - ppid->id[0] = port_priv->ethsw_data->dev_id; - - return 0; -} - -static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name, - size_t len) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - int err; - - err = snprintf(name, len, "p%d", port_priv->idx); - if (err >= len) - return -EINVAL; - - return 0; -} - -struct ethsw_dump_ctx { - struct net_device *dev; - struct sk_buff *skb; - struct netlink_callback *cb; - int idx; -}; - -static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry, - struct ethsw_dump_ctx *dump) -{ - int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC; - u32 portid = NETLINK_CB(dump->cb->skb).portid; - u32 seq = dump->cb->nlh->nlmsg_seq; - struct nlmsghdr *nlh; - struct ndmsg *ndm; - - if (dump->idx < dump->cb->args[2]) - goto skip; - - nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, - sizeof(*ndm), NLM_F_MULTI); - if (!nlh) - return -EMSGSIZE; - - ndm = nlmsg_data(nlh); - ndm->ndm_family = AF_BRIDGE; - ndm->ndm_pad1 = 0; - ndm->ndm_pad2 = 0; - ndm->ndm_flags = NTF_SELF; - ndm->ndm_type = 0; - ndm->ndm_ifindex = dump->dev->ifindex; - ndm->ndm_state = is_dynamic ? NUD_REACHABLE : NUD_NOARP; - - if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr)) - goto nla_put_failure; - - nlmsg_end(dump->skb, nlh); - -skip: - dump->idx++; - return 0; - -nla_put_failure: - nlmsg_cancel(dump->skb, nlh); - return -EMSGSIZE; -} - -static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry, - struct ethsw_port_priv *port_priv) -{ - int idx = port_priv->idx; - int valid; - - if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST) - valid = entry->if_info == port_priv->idx; - else - valid = entry->if_mask[idx / 8] & BIT(idx % 8); - - return valid; -} - -static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, - struct net_device *net_dev, - struct net_device *filter_dev, int *idx) -{ - struct ethsw_port_priv *port_priv = netdev_priv(net_dev); - struct ethsw_core *ethsw = port_priv->ethsw_data; - struct device *dev = net_dev->dev.parent; - struct fdb_dump_entry *fdb_entries; - struct fdb_dump_entry fdb_entry; - struct ethsw_dump_ctx dump = { - .dev = net_dev, - .skb = skb, - .cb = cb, - .idx = *idx, - }; - dma_addr_t fdb_dump_iova; - u16 num_fdb_entries; - u32 fdb_dump_size; - int err = 0, i; - u8 *dma_mem; - - fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry); - dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL); - if (!dma_mem) - return -ENOMEM; - - fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size, - DMA_FROM_DEVICE); - if (dma_mapping_error(dev, fdb_dump_iova)) { - netdev_err(net_dev, "dma_map_single() failed\n"); - err = -ENOMEM; - goto err_map; - } - - err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, 0, - fdb_dump_iova, fdb_dump_size, &num_fdb_entries); - if (err) { - netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err); - goto err_dump; - } - - dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE); - - fdb_entries = (struct fdb_dump_entry *)dma_mem; - for (i = 0; i < num_fdb_entries; i++) { - fdb_entry = fdb_entries[i]; - - if (!dpaa2_switch_port_fdb_valid_entry(&fdb_entry, port_priv)) - continue; - - err = dpaa2_switch_fdb_dump_nl(&fdb_entry, &dump); - if (err) - goto end; - } - -end: - *idx = dump.idx; - - kfree(dma_mem); - - return 0; - -err_dump: - dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE); -err_map: - kfree(dma_mem); - return err; -} - -static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv) -{ - struct ethsw_core *ethsw = port_priv->ethsw_data; - struct net_device *net_dev = port_priv->netdev; - struct device *dev = net_dev->dev.parent; - u8 mac_addr[ETH_ALEN]; - int err; - - if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR)) - return 0; - - /* Get firmware address, if any */ - err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle, - port_priv->idx, mac_addr); - if (err) { - dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n"); - return err; - } - - /* First check if firmware has any address configured by bootloader */ - if (!is_zero_ether_addr(mac_addr)) { - memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); - } else { - /* No MAC address configured, fill in net_dev->dev_addr - * with a random one - */ - eth_hw_addr_random(net_dev); - dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); - - /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all - * practical purposes, this will be our "permanent" mac address, - * at least until the next reboot. This move will also permit - * register_netdevice() to properly fill up net_dev->perm_addr. - */ - net_dev->addr_assign_type = NET_ADDR_PERM; - } - - return 0; -} - -static const struct net_device_ops dpaa2_switch_port_ops = { - .ndo_open = dpaa2_switch_port_open, - .ndo_stop = dpaa2_switch_port_stop, - - .ndo_set_mac_address = eth_mac_addr, - .ndo_get_stats64 = dpaa2_switch_port_get_stats, - .ndo_change_mtu = dpaa2_switch_port_change_mtu, - .ndo_has_offload_stats = dpaa2_switch_port_has_offload_stats, - .ndo_get_offload_stats = dpaa2_switch_port_get_offload_stats, - .ndo_fdb_add = dpaa2_switch_port_fdb_add, - .ndo_fdb_del = dpaa2_switch_port_fdb_del, - .ndo_fdb_dump = dpaa2_switch_port_fdb_dump, - - .ndo_start_xmit = dpaa2_switch_port_dropframe, - .ndo_get_port_parent_id = dpaa2_switch_port_parent_id, - .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name, -}; - -static bool dpaa2_switch_port_dev_check(const struct net_device *netdev, - struct notifier_block *nb) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - - if (netdev->netdev_ops == &dpaa2_switch_port_ops && - (!nb || &port_priv->ethsw_data->port_nb == nb || - &port_priv->ethsw_data->port_switchdev_nb == nb || - &port_priv->ethsw_data->port_switchdevb_nb == nb)) - return true; - - return false; -} - -static void dpaa2_switch_links_state_update(struct ethsw_core *ethsw) -{ - int i; - - for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { - dpaa2_switch_port_carrier_state_sync(ethsw->ports[i]->netdev); - dpaa2_switch_port_set_mac_addr(ethsw->ports[i]); - } -} - -static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg) -{ - struct device *dev = (struct device *)arg; - struct ethsw_core *ethsw = dev_get_drvdata(dev); - - /* Mask the events and the if_id reserved bits to be cleared on read */ - u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000; - int err; - - err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, - DPSW_IRQ_INDEX_IF, &status); - if (err) { - dev_err(dev, "Can't get irq status (err %d)\n", err); - - err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, - DPSW_IRQ_INDEX_IF, 0xFFFFFFFF); - if (err) - dev_err(dev, "Can't clear irq status (err %d)\n", err); - goto out; - } - - if (status & DPSW_IRQ_EVENT_LINK_CHANGED) - dpaa2_switch_links_state_update(ethsw); - -out: - return IRQ_HANDLED; -} - -static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev) -{ - struct device *dev = &sw_dev->dev; - struct ethsw_core *ethsw = dev_get_drvdata(dev); - u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED; - struct fsl_mc_device_irq *irq; - int err; - - err = fsl_mc_allocate_irqs(sw_dev); - if (err) { - dev_err(dev, "MC irqs allocation failed\n"); - return err; - } - - if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) { - err = -EINVAL; - goto free_irq; - } - - err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, - DPSW_IRQ_INDEX_IF, 0); - if (err) { - dev_err(dev, "dpsw_set_irq_enable err %d\n", err); - goto free_irq; - } - - irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF]; - - err = devm_request_threaded_irq(dev, irq->msi_desc->irq, - NULL, - dpaa2_switch_irq0_handler_thread, - IRQF_NO_SUSPEND | IRQF_ONESHOT, - dev_name(dev), dev); - if (err) { - dev_err(dev, "devm_request_threaded_irq(): %d\n", err); - goto free_irq; - } - - err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle, - DPSW_IRQ_INDEX_IF, mask); - if (err) { - dev_err(dev, "dpsw_set_irq_mask(): %d\n", err); - goto free_devm_irq; - } - - err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, - DPSW_IRQ_INDEX_IF, 1); - if (err) { - dev_err(dev, "dpsw_set_irq_enable(): %d\n", err); - goto free_devm_irq; - } - - return 0; - -free_devm_irq: - devm_free_irq(dev, irq->msi_desc->irq, dev); -free_irq: - fsl_mc_free_irqs(sw_dev); - return err; -} - -static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev) -{ - struct device *dev = &sw_dev->dev; - struct ethsw_core *ethsw = dev_get_drvdata(dev); - int err; - - err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, - DPSW_IRQ_INDEX_IF, 0); - if (err) - dev_err(dev, "dpsw_set_irq_enable err %d\n", err); - - fsl_mc_free_irqs(sw_dev); -} - -static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev, - u8 state) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - - return dpaa2_switch_port_set_stp_state(port_priv, state); -} - -static int -dpaa2_switch_port_attr_br_flags_pre_set(struct net_device *netdev, - struct switchdev_brport_flags flags) -{ - if (flags.mask & ~(BR_LEARNING | BR_FLOOD)) - return -EINVAL; - - return 0; -} - -static int -dpaa2_switch_port_attr_br_flags_set(struct net_device *netdev, - struct switchdev_brport_flags flags) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - int err = 0; - - if (flags.mask & BR_LEARNING) { - /* Learning is enabled per switch */ - err = dpaa2_switch_set_learning(port_priv->ethsw_data, - !!(flags.val & BR_LEARNING)); - if (err) - return err; - } - - if (flags.mask & BR_FLOOD) { - err = dpaa2_switch_port_set_flood(port_priv, - !!(flags.val & BR_FLOOD)); - if (err) - return err; - } - - return 0; -} - -static int dpaa2_switch_port_attr_set(struct net_device *netdev, - const struct switchdev_attr *attr) -{ - int err = 0; - - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_STP_STATE: - err = dpaa2_switch_port_attr_stp_state_set(netdev, - attr->u.stp_state); - break; - case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: - err = dpaa2_switch_port_attr_br_flags_pre_set(netdev, - attr->u.brport_flags); - break; - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: - err = dpaa2_switch_port_attr_br_flags_set(netdev, - attr->u.brport_flags); - break; - case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: - /* VLANs are supported by default */ - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static int dpaa2_switch_port_vlans_add(struct net_device *netdev, - const struct switchdev_obj_port_vlan *vlan) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - struct ethsw_core *ethsw = port_priv->ethsw_data; - struct dpsw_attr *attr = ðsw->sw_attr; - int err = 0; - - /* Make sure that the VLAN is not already configured - * on the switch port - */ - if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER) - return -EEXIST; - - /* Check if there is space for a new VLAN */ - err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, - ðsw->sw_attr); - if (err) { - netdev_err(netdev, "dpsw_get_attributes err %d\n", err); - return err; - } - if (attr->max_vlans - attr->num_vlans < 1) - return -ENOSPC; - - /* Check if there is space for a new VLAN */ - err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, - ðsw->sw_attr); - if (err) { - netdev_err(netdev, "dpsw_get_attributes err %d\n", err); - return err; - } - if (attr->max_vlans - attr->num_vlans < 1) - return -ENOSPC; - - if (!port_priv->ethsw_data->vlans[vlan->vid]) { - /* this is a new VLAN */ - err = dpaa2_switch_add_vlan(port_priv->ethsw_data, vlan->vid); - if (err) - return err; - - port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL; - } - - return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags); -} - -static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc, - const unsigned char *addr) -{ - struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc; - struct netdev_hw_addr *ha; - - netif_addr_lock_bh(netdev); - list_for_each_entry(ha, &list->list, list) { - if (ether_addr_equal(ha->addr, addr)) { - netif_addr_unlock_bh(netdev); - return 1; - } - } - netif_addr_unlock_bh(netdev); - return 0; -} - -static int dpaa2_switch_port_mdb_add(struct net_device *netdev, - const struct switchdev_obj_port_mdb *mdb) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - int err; - - /* Check if address is already set on this port */ - if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr)) - return -EEXIST; - - err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr); - if (err) - return err; - - err = dev_mc_add(netdev, mdb->addr); - if (err) { - netdev_err(netdev, "dev_mc_add err %d\n", err); - dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr); - } - - return err; -} - -static int dpaa2_switch_port_obj_add(struct net_device *netdev, - const struct switchdev_obj *obj) -{ - int err; - - switch (obj->id) { - case SWITCHDEV_OBJ_ID_PORT_VLAN: - err = dpaa2_switch_port_vlans_add(netdev, - SWITCHDEV_OBJ_PORT_VLAN(obj)); - break; - case SWITCHDEV_OBJ_ID_PORT_MDB: - err = dpaa2_switch_port_mdb_add(netdev, - SWITCHDEV_OBJ_PORT_MDB(obj)); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid) -{ - struct ethsw_core *ethsw = port_priv->ethsw_data; - struct net_device *netdev = port_priv->netdev; - struct dpsw_vlan_if_cfg vcfg; - int i, err; - - if (!port_priv->vlans[vid]) - return -ENOENT; - - if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) { - err = dpaa2_switch_port_set_pvid(port_priv, 0); - if (err) - return err; - } - - vcfg.num_ifs = 1; - vcfg.if_id[0] = port_priv->idx; - if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) { - err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, - ethsw->dpsw_handle, - vid, &vcfg); - if (err) { - netdev_err(netdev, - "dpsw_vlan_remove_if_untagged err %d\n", - err); - } - port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED; - } - - if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) { - err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, - vid, &vcfg); - if (err) { - netdev_err(netdev, - "dpsw_vlan_remove_if err %d\n", err); - return err; - } - port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER; - - /* Delete VLAN from switch if it is no longer configured on - * any port - */ - for (i = 0; i < ethsw->sw_attr.num_ifs; i++) - if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER) - return 0; /* Found a port member in VID */ - - ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL; - - err = dpaa2_switch_dellink(ethsw, vid); - if (err) - return err; - } - - return 0; -} - -static int dpaa2_switch_port_vlans_del(struct net_device *netdev, - const struct switchdev_obj_port_vlan *vlan) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - - if (netif_is_bridge_master(vlan->obj.orig_dev)) - return -EOPNOTSUPP; - - return dpaa2_switch_port_del_vlan(port_priv, vlan->vid); -} - -static int dpaa2_switch_port_mdb_del(struct net_device *netdev, - const struct switchdev_obj_port_mdb *mdb) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - int err; - - if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr)) - return -ENOENT; - - err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr); - if (err) - return err; - - err = dev_mc_del(netdev, mdb->addr); - if (err) { - netdev_err(netdev, "dev_mc_del err %d\n", err); - return err; - } - - return err; -} - -static int dpaa2_switch_port_obj_del(struct net_device *netdev, - const struct switchdev_obj *obj) -{ - int err; - - switch (obj->id) { - case SWITCHDEV_OBJ_ID_PORT_VLAN: - err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj)); - break; - case SWITCHDEV_OBJ_ID_PORT_MDB: - err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj)); - break; - default: - err = -EOPNOTSUPP; - break; - } - return err; -} - -static int dpaa2_switch_port_attr_set_event(struct net_device *netdev, - struct switchdev_notifier_port_attr_info - *port_attr_info) -{ - int err; - - err = dpaa2_switch_port_attr_set(netdev, port_attr_info->attr); - - port_attr_info->handled = true; - return notifier_from_errno(err); -} - -/* For the moment, only flood setting needs to be updated */ -static int dpaa2_switch_port_bridge_join(struct net_device *netdev, - struct net_device *upper_dev) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - struct ethsw_core *ethsw = port_priv->ethsw_data; - struct ethsw_port_priv *other_port_priv; - struct net_device *other_dev; - struct list_head *iter; - int i, err; - - for (i = 0; i < ethsw->sw_attr.num_ifs; i++) - if (ethsw->ports[i]->bridge_dev && - (ethsw->ports[i]->bridge_dev != upper_dev)) { - netdev_err(netdev, - "Only one bridge supported per DPSW object!\n"); - return -EINVAL; - } - - netdev_for_each_lower_dev(upper_dev, other_dev, iter) { - if (!dpaa2_switch_port_dev_check(other_dev, NULL)) - continue; - - other_port_priv = netdev_priv(other_dev); - if (other_port_priv->ethsw_data != port_priv->ethsw_data) { - netdev_err(netdev, - "Interface from a different DPSW is in the bridge already!\n"); - return -EINVAL; - } - } - - /* Enable flooding */ - err = dpaa2_switch_port_set_flood(port_priv, 1); - if (!err) - port_priv->bridge_dev = upper_dev; - - return err; -} - -static int dpaa2_switch_port_bridge_leave(struct net_device *netdev) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - int err; - - /* Disable flooding */ - err = dpaa2_switch_port_set_flood(port_priv, 0); - if (!err) - port_priv->bridge_dev = NULL; - - return err; -} - -static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb, - unsigned long event, void *ptr) -{ - struct net_device *netdev = netdev_notifier_info_to_dev(ptr); - struct netdev_notifier_changeupper_info *info = ptr; - struct net_device *upper_dev; - int err = 0; - - if (!dpaa2_switch_port_dev_check(netdev, nb)) - return NOTIFY_DONE; - - /* Handle just upper dev link/unlink for the moment */ - if (event == NETDEV_CHANGEUPPER) { - upper_dev = info->upper_dev; - if (netif_is_bridge_master(upper_dev)) { - if (info->linking) - err = dpaa2_switch_port_bridge_join(netdev, upper_dev); - else - err = dpaa2_switch_port_bridge_leave(netdev); - } - } - - return notifier_from_errno(err); -} - -struct ethsw_switchdev_event_work { - struct work_struct work; - struct switchdev_notifier_fdb_info fdb_info; - struct net_device *dev; - unsigned long event; -}; - -static void dpaa2_switch_event_work(struct work_struct *work) -{ - struct ethsw_switchdev_event_work *switchdev_work = - container_of(work, struct ethsw_switchdev_event_work, work); - struct net_device *dev = switchdev_work->dev; - struct switchdev_notifier_fdb_info *fdb_info; - int err; - - rtnl_lock(); - fdb_info = &switchdev_work->fdb_info; - - switch (switchdev_work->event) { - case SWITCHDEV_FDB_ADD_TO_DEVICE: - if (!fdb_info->added_by_user) - break; - if (is_unicast_ether_addr(fdb_info->addr)) - err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev), - fdb_info->addr); - else - err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev), - fdb_info->addr); - if (err) - break; - fdb_info->offloaded = true; - call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev, - &fdb_info->info, NULL); - break; - case SWITCHDEV_FDB_DEL_TO_DEVICE: - if (!fdb_info->added_by_user) - break; - if (is_unicast_ether_addr(fdb_info->addr)) - dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr); - else - dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr); - break; - } - - rtnl_unlock(); - kfree(switchdev_work->fdb_info.addr); - kfree(switchdev_work); - dev_put(dev); -} - -/* Called under rcu_read_lock() */ -static int dpaa2_switch_port_event(struct notifier_block *nb, - unsigned long event, void *ptr) -{ - struct net_device *dev = switchdev_notifier_info_to_dev(ptr); - struct ethsw_port_priv *port_priv = netdev_priv(dev); - struct ethsw_switchdev_event_work *switchdev_work; - struct switchdev_notifier_fdb_info *fdb_info = ptr; - struct ethsw_core *ethsw = port_priv->ethsw_data; - - if (!dpaa2_switch_port_dev_check(dev, nb)) - return NOTIFY_DONE; - - if (event == SWITCHDEV_PORT_ATTR_SET) - return dpaa2_switch_port_attr_set_event(dev, ptr); - - switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); - if (!switchdev_work) - return NOTIFY_BAD; - - INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work); - switchdev_work->dev = dev; - switchdev_work->event = event; - - switch (event) { - case SWITCHDEV_FDB_ADD_TO_DEVICE: - case SWITCHDEV_FDB_DEL_TO_DEVICE: - memcpy(&switchdev_work->fdb_info, ptr, - sizeof(switchdev_work->fdb_info)); - switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); - if (!switchdev_work->fdb_info.addr) - goto err_addr_alloc; - - ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, - fdb_info->addr); - - /* Take a reference on the device to avoid being freed. */ - dev_hold(dev); - break; - default: - kfree(switchdev_work); - return NOTIFY_DONE; - } - - queue_work(ethsw->workqueue, &switchdev_work->work); - - return NOTIFY_DONE; - -err_addr_alloc: - kfree(switchdev_work); - return NOTIFY_BAD; -} - -static int dpaa2_switch_port_obj_event(unsigned long event, - struct net_device *netdev, - struct switchdev_notifier_port_obj_info *port_obj_info) -{ - int err = -EOPNOTSUPP; - - switch (event) { - case SWITCHDEV_PORT_OBJ_ADD: - err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj); - break; - case SWITCHDEV_PORT_OBJ_DEL: - err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj); - break; - } - - port_obj_info->handled = true; - return notifier_from_errno(err); -} - -static int dpaa2_switch_port_blocking_event(struct notifier_block *nb, - unsigned long event, void *ptr) -{ - struct net_device *dev = switchdev_notifier_info_to_dev(ptr); - - if (!dpaa2_switch_port_dev_check(dev, nb)) - return NOTIFY_DONE; - - switch (event) { - case SWITCHDEV_PORT_OBJ_ADD: - case SWITCHDEV_PORT_OBJ_DEL: - return dpaa2_switch_port_obj_event(event, dev, ptr); - case SWITCHDEV_PORT_ATTR_SET: - return dpaa2_switch_port_attr_set_event(dev, ptr); - } - - return NOTIFY_DONE; -} - -static int dpaa2_switch_register_notifier(struct device *dev) -{ - struct ethsw_core *ethsw = dev_get_drvdata(dev); - int err; - - ethsw->port_nb.notifier_call = dpaa2_switch_port_netdevice_event; - err = register_netdevice_notifier(ðsw->port_nb); - if (err) { - dev_err(dev, "Failed to register netdev notifier\n"); - return err; - } - - ethsw->port_switchdev_nb.notifier_call = dpaa2_switch_port_event; - err = register_switchdev_notifier(ðsw->port_switchdev_nb); - if (err) { - dev_err(dev, "Failed to register switchdev notifier\n"); - goto err_switchdev_nb; - } - - ethsw->port_switchdevb_nb.notifier_call = dpaa2_switch_port_blocking_event; - err = register_switchdev_blocking_notifier(ðsw->port_switchdevb_nb); - if (err) { - dev_err(dev, "Failed to register switchdev blocking notifier\n"); - goto err_switchdev_blocking_nb; - } - - return 0; - -err_switchdev_blocking_nb: - unregister_switchdev_notifier(ðsw->port_switchdev_nb); -err_switchdev_nb: - unregister_netdevice_notifier(ðsw->port_nb); - return err; -} - -static void dpaa2_switch_detect_features(struct ethsw_core *ethsw) -{ - ethsw->features = 0; - - if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6)) - ethsw->features |= ETHSW_FEATURE_MAC_ADDR; -} - -static int dpaa2_switch_init(struct fsl_mc_device *sw_dev) -{ - struct device *dev = &sw_dev->dev; - struct ethsw_core *ethsw = dev_get_drvdata(dev); - struct dpsw_stp_cfg stp_cfg; - int err; - u16 i; - - ethsw->dev_id = sw_dev->obj_desc.id; - - err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, ðsw->dpsw_handle); - if (err) { - dev_err(dev, "dpsw_open err %d\n", err); - return err; - } - - err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, - ðsw->sw_attr); - if (err) { - dev_err(dev, "dpsw_get_attributes err %d\n", err); - goto err_close; - } - - err = dpsw_get_api_version(ethsw->mc_io, 0, - ðsw->major, - ðsw->minor); - if (err) { - dev_err(dev, "dpsw_get_api_version err %d\n", err); - goto err_close; - } - - /* Minimum supported DPSW version check */ - if (ethsw->major < DPSW_MIN_VER_MAJOR || - (ethsw->major == DPSW_MIN_VER_MAJOR && - ethsw->minor < DPSW_MIN_VER_MINOR)) { - dev_err(dev, "DPSW version %d:%d not supported. Use %d.%d or greater.\n", - ethsw->major, - ethsw->minor, - DPSW_MIN_VER_MAJOR, DPSW_MIN_VER_MINOR); - err = -ENOTSUPP; - goto err_close; - } - - dpaa2_switch_detect_features(ethsw); - - err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle); - if (err) { - dev_err(dev, "dpsw_reset err %d\n", err); - goto err_close; - } - - err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0, - DPSW_FDB_LEARNING_MODE_HW); - if (err) { - dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err); - goto err_close; - } - - stp_cfg.vlan_id = DEFAULT_VLAN_ID; - stp_cfg.state = DPSW_STP_STATE_FORWARDING; - - for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { - err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i, - &stp_cfg); - if (err) { - dev_err(dev, "dpsw_if_set_stp err %d for port %d\n", - err, i); - goto err_close; - } - - err = dpsw_if_set_broadcast(ethsw->mc_io, 0, - ethsw->dpsw_handle, i, 1); - if (err) { - dev_err(dev, - "dpsw_if_set_broadcast err %d for port %d\n", - err, i); - goto err_close; - } - } - - ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered", - WQ_MEM_RECLAIM, "ethsw", - ethsw->sw_attr.id); - if (!ethsw->workqueue) { - err = -ENOMEM; - goto err_close; - } - - err = dpaa2_switch_register_notifier(dev); - if (err) - goto err_destroy_ordered_workqueue; - - return 0; - -err_destroy_ordered_workqueue: - destroy_workqueue(ethsw->workqueue); - -err_close: - dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); - return err; -} - -static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) -{ - struct net_device *netdev = port_priv->netdev; - struct ethsw_core *ethsw = port_priv->ethsw_data; - struct dpsw_vlan_if_cfg vcfg; - int err; - - /* Switch starts with all ports configured to VLAN 1. Need to - * remove this setting to allow configuration at bridge join - */ - vcfg.num_ifs = 1; - vcfg.if_id[0] = port_priv->idx; - - err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle, - DEFAULT_VLAN_ID, &vcfg); - if (err) { - netdev_err(netdev, "dpsw_vlan_remove_if_untagged err %d\n", - err); - return err; - } - - err = dpaa2_switch_port_set_pvid(port_priv, 0); - if (err) - return err; - - err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, - DEFAULT_VLAN_ID, &vcfg); - if (err) - netdev_err(netdev, "dpsw_vlan_remove_if err %d\n", err); - - return err; -} - -static void dpaa2_switch_unregister_notifier(struct device *dev) -{ - struct ethsw_core *ethsw = dev_get_drvdata(dev); - struct notifier_block *nb; - int err; - - nb = ðsw->port_switchdevb_nb; - err = unregister_switchdev_blocking_notifier(nb); - if (err) - dev_err(dev, - "Failed to unregister switchdev blocking notifier (%d)\n", - err); - - err = unregister_switchdev_notifier(ðsw->port_switchdev_nb); - if (err) - dev_err(dev, - "Failed to unregister switchdev notifier (%d)\n", err); - - err = unregister_netdevice_notifier(ðsw->port_nb); - if (err) - dev_err(dev, - "Failed to unregister netdev notifier (%d)\n", err); -} - -static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev) -{ - struct device *dev = &sw_dev->dev; - struct ethsw_core *ethsw = dev_get_drvdata(dev); - int err; - - dpaa2_switch_unregister_notifier(dev); - - err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); - if (err) - dev_warn(dev, "dpsw_close err %d\n", err); -} - -static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev) -{ - struct ethsw_port_priv *port_priv; - struct ethsw_core *ethsw; - struct device *dev; - int i; - - dev = &sw_dev->dev; - ethsw = dev_get_drvdata(dev); - - dpaa2_switch_teardown_irqs(sw_dev); - - dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); - - for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { - port_priv = ethsw->ports[i]; - unregister_netdev(port_priv->netdev); - free_netdev(port_priv->netdev); - } - kfree(ethsw->ports); - - dpaa2_switch_takedown(sw_dev); - - destroy_workqueue(ethsw->workqueue); - - fsl_mc_portal_free(ethsw->mc_io); - - kfree(ethsw); - - dev_set_drvdata(dev, NULL); - - return 0; -} - -static int dpaa2_switch_probe_port(struct ethsw_core *ethsw, - u16 port_idx) -{ - struct ethsw_port_priv *port_priv; - struct device *dev = ethsw->dev; - struct net_device *port_netdev; - int err; - - port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv)); - if (!port_netdev) { - dev_err(dev, "alloc_etherdev error\n"); - return -ENOMEM; - } - - port_priv = netdev_priv(port_netdev); - port_priv->netdev = port_netdev; - port_priv->ethsw_data = ethsw; - - port_priv->idx = port_idx; - port_priv->stp_state = BR_STATE_FORWARDING; - - /* Flooding is implicitly enabled */ - port_priv->flood = true; - - SET_NETDEV_DEV(port_netdev, dev); - port_netdev->netdev_ops = &dpaa2_switch_port_ops; - port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops; - - /* Set MTU limits */ - port_netdev->min_mtu = ETH_MIN_MTU; - port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH; - - err = dpaa2_switch_port_init(port_priv, port_idx); - if (err) - goto err_port_probe; - - err = dpaa2_switch_port_set_mac_addr(port_priv); - if (err) - goto err_port_probe; - - err = register_netdev(port_netdev); - if (err < 0) { - dev_err(dev, "register_netdev error %d\n", err); - goto err_port_probe; - } - - ethsw->ports[port_idx] = port_priv; - - return 0; - -err_port_probe: - free_netdev(port_netdev); - - return err; -} - -static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev) -{ - struct device *dev = &sw_dev->dev; - struct ethsw_core *ethsw; - int i, err; - - /* Allocate switch core*/ - ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL); - - if (!ethsw) - return -ENOMEM; - - ethsw->dev = dev; - dev_set_drvdata(dev, ethsw); - - err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, - ðsw->mc_io); - if (err) { - if (err == -ENXIO) - err = -EPROBE_DEFER; - else - dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); - goto err_free_drvdata; - } - - err = dpaa2_switch_init(sw_dev); - if (err) - goto err_free_cmdport; - - /* DEFAULT_VLAN_ID is implicitly configured on the switch */ - ethsw->vlans[DEFAULT_VLAN_ID] = ETHSW_VLAN_MEMBER; - - /* Learning is implicitly enabled */ - ethsw->learning = true; - - ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports), - GFP_KERNEL); - if (!(ethsw->ports)) { - err = -ENOMEM; - goto err_takedown; - } - - for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { - err = dpaa2_switch_probe_port(ethsw, i); - if (err) - goto err_free_ports; - } - - err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle); - if (err) { - dev_err(ethsw->dev, "dpsw_enable err %d\n", err); - goto err_free_ports; - } - - /* Make sure the switch ports are disabled at probe time */ - for (i = 0; i < ethsw->sw_attr.num_ifs; i++) - dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i); - - /* Setup IRQs */ - err = dpaa2_switch_setup_irqs(sw_dev); - if (err) - goto err_stop; - - dev_info(dev, "probed %d port switch\n", ethsw->sw_attr.num_ifs); - return 0; - -err_stop: - dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); - -err_free_ports: - /* Cleanup registered ports only */ - for (i--; i >= 0; i--) { - unregister_netdev(ethsw->ports[i]->netdev); - free_netdev(ethsw->ports[i]->netdev); - } - kfree(ethsw->ports); - -err_takedown: - dpaa2_switch_takedown(sw_dev); - -err_free_cmdport: - fsl_mc_portal_free(ethsw->mc_io); - -err_free_drvdata: - kfree(ethsw); - dev_set_drvdata(dev, NULL); - - return err; -} - -static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = { - { - .vendor = FSL_MC_VENDOR_FREESCALE, - .obj_type = "dpsw", - }, - { .vendor = 0x0 } -}; -MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table); - -static struct fsl_mc_driver dpaa2_switch_drv = { - .driver = { - .name = KBUILD_MODNAME, - .owner = THIS_MODULE, - }, - .probe = dpaa2_switch_probe, - .remove = dpaa2_switch_remove, - .match_id_table = dpaa2_switch_match_id_table -}; - -module_fsl_mc_driver(dpaa2_switch_drv); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver"); diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h deleted file mode 100644 index 5f9211ccb1ef..000000000000 --- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h +++ /dev/null @@ -1,80 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * DPAA2 Ethernet Switch declarations - * - * Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright 2017-2018 NXP - * - */ - -#ifndef __ETHSW_H -#define __ETHSW_H - -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/rtnetlink.h> -#include <linux/if_vlan.h> -#include <uapi/linux/if_bridge.h> -#include <net/switchdev.h> -#include <linux/if_bridge.h> - -#include "dpsw.h" - -/* Number of IRQs supported */ -#define DPSW_IRQ_NUM 2 - -/* Port is member of VLAN */ -#define ETHSW_VLAN_MEMBER 1 -/* VLAN to be treated as untagged on egress */ -#define ETHSW_VLAN_UNTAGGED 2 -/* Untagged frames will be assigned to this VLAN */ -#define ETHSW_VLAN_PVID 4 -/* VLAN configured on the switch */ -#define ETHSW_VLAN_GLOBAL 8 - -/* Maximum Frame Length supported by HW (currently 10k) */ -#define DPAA2_MFL (10 * 1024) -#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN) -#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN) - -#define ETHSW_FEATURE_MAC_ADDR BIT(0) - -extern const struct ethtool_ops dpaa2_switch_port_ethtool_ops; - -struct ethsw_core; - -/* Per port private data */ -struct ethsw_port_priv { - struct net_device *netdev; - u16 idx; - struct ethsw_core *ethsw_data; - u8 link_state; - u8 stp_state; - bool flood; - - u8 vlans[VLAN_VID_MASK + 1]; - u16 pvid; - struct net_device *bridge_dev; -}; - -/* Switch data */ -struct ethsw_core { - struct device *dev; - struct fsl_mc_io *mc_io; - u16 dpsw_handle; - struct dpsw_attr sw_attr; - u16 major, minor; - unsigned long features; - int dev_id; - struct ethsw_port_priv **ports; - - u8 vlans[VLAN_VID_MASK + 1]; - bool learning; - - struct notifier_block port_nb; - struct notifier_block port_switchdev_nb; - struct notifier_block port_switchdevb_nb; - struct workqueue_struct *workqueue; -}; - -#endif /* __ETHSW_H */ diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index 7c60b0cd8bf7..dcbba9621b21 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c @@ -407,14 +407,10 @@ static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr) int cvm_oct_common_init(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); - const u8 *mac = NULL; + int ret; - if (priv->of_node) - mac = of_get_mac_address(priv->of_node); - - if (!IS_ERR_OR_NULL(mac)) - ether_addr_copy(dev->dev_addr, mac); - else + ret = of_get_mac_address(priv->of_node, dev->dev_addr); + if (ret) eth_hw_addr_random(dev); /* diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c index e7bc1988124a..4b9fdf99981b 100644 --- a/drivers/staging/wfx/main.c +++ b/drivers/staging/wfx/main.c @@ -334,7 +334,6 @@ int wfx_probe(struct wfx_dev *wdev) { int i; int err; - const void *macaddr; struct gpio_desc *gpio_saved; // During first part of boot, gpio_wakeup cannot yet been used. So @@ -423,9 +422,9 @@ int wfx_probe(struct wfx_dev *wdev) for (i = 0; i < ARRAY_SIZE(wdev->addresses); i++) { eth_zero_addr(wdev->addresses[i].addr); - macaddr = of_get_mac_address(wdev->dev->of_node); - if (!IS_ERR_OR_NULL(macaddr)) { - ether_addr_copy(wdev->addresses[i].addr, macaddr); + err = of_get_mac_address(wdev->dev->of_node, + wdev->addresses[i].addr); + if (!err) { wdev->addresses[i].addr[ETH_ALEN - 1] += i; } else { ether_addr_copy(wdev->addresses[i].addr, |