diff options
author | Takashi Iwai <tiwai@suse.de> | 2024-12-20 14:09:45 +0100 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2024-12-20 14:09:45 +0100 |
commit | 8cbd01ba9c38eb16f3a572300da486ac544519b7 (patch) | |
tree | e9a800bcb96bf8e937ddf0d420514dccbc6c1a75 /drivers | |
parent | 66a0a2b0473c39ae85c44628d14e4366fdc0aa0d (diff) | |
parent | 32c9c06adb5b157ef259233775a063a43746d699 (diff) | |
download | lwn-8cbd01ba9c38eb16f3a572300da486ac544519b7.tar.gz lwn-8cbd01ba9c38eb16f3a572300da486ac544519b7.zip |
Merge tag 'asoc-fix-v6.13-rc3' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus
ASoC: Fixes for v6.13
A mix of quirks and small fixes, nothing too major anywhere.
Diffstat (limited to 'drivers')
116 files changed, 1043 insertions, 592 deletions
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c index 95f78383bbdb..bff2d099f469 100644 --- a/drivers/acpi/acpica/evxfregn.c +++ b/drivers/acpi/acpica/evxfregn.c @@ -232,8 +232,6 @@ acpi_remove_address_space_handler(acpi_handle device, /* Now we can delete the handler object */ - acpi_os_release_mutex(handler_obj->address_space. - context_mutex); acpi_ut_remove_reference(handler_obj); goto unlock_and_exit; } diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 5429ec9ef06f..a5d47819b3a4 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -454,8 +454,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, if (cmd_rc) *cmd_rc = -EINVAL; - if (cmd == ND_CMD_CALL) + if (cmd == ND_CMD_CALL) { + if (!buf || buf_len < sizeof(*call_pkg)) + return -EINVAL; + call_pkg = buf; + } + func = cmd_to_func(nfit_mem, cmd, call_pkg, &family); if (func < 0) return func; diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 7fe842dae1ec..821867de43be 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -250,6 +250,9 @@ static bool acpi_decode_space(struct resource_win *win, switch (addr->resource_type) { case ACPI_MEMORY_RANGE: acpi_dev_memresource_flags(res, len, wp); + + if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY) + res->flags |= IORESOURCE_PREFETCH; break; case ACPI_IO_RANGE: acpi_dev_ioresource_flags(res, len, iodec, @@ -265,9 +268,6 @@ static bool acpi_decode_space(struct resource_win *win, if (addr->producer_consumer == ACPI_PRODUCER) res->flags |= IORESOURCE_WINDOW; - if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY) - res->flags |= IORESOURCE_PREFETCH; - return !(res->flags & IORESOURCE_DISABLED); } diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index b1b40e9551de..c8c817c51230 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c @@ -348,6 +348,7 @@ static int highbank_initialize_phys(struct device *dev, void __iomem *addr) phy_nodes[phy] = phy_data.np; cphy_base[phy] = of_iomap(phy_nodes[phy], 0); if (cphy_base[phy] == NULL) { + of_node_put(phy_data.np); return 0; } phy_count += 1; diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c index 8a3f7c3fcfec..7fd9d5ddce02 100644 --- a/drivers/bluetooth/btmtk.c +++ b/drivers/bluetooth/btmtk.c @@ -395,6 +395,7 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb) { struct btmtk_data *data = hci_get_priv(hdev); int err; + bool complete = false; if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) { kfree_skb(skb); @@ -416,19 +417,22 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb) fallthrough; case HCI_DEVCOREDUMP_ACTIVE: default: + /* Mediatek coredump data would be more than MTK_COREDUMP_NUM */ + if (data->cd_info.cnt >= MTK_COREDUMP_NUM && + skb->len > MTK_COREDUMP_END_LEN) + if (!memcmp((char *)&skb->data[skb->len - MTK_COREDUMP_END_LEN], + MTK_COREDUMP_END, MTK_COREDUMP_END_LEN - 1)) + complete = true; + err = hci_devcd_append(hdev, skb); if (err < 0) break; data->cd_info.cnt++; - /* Mediatek coredump data would be more than MTK_COREDUMP_NUM */ - if (data->cd_info.cnt > MTK_COREDUMP_NUM && - skb->len > MTK_COREDUMP_END_LEN) - if (!memcmp((char *)&skb->data[skb->len - MTK_COREDUMP_END_LEN], - MTK_COREDUMP_END, MTK_COREDUMP_END_LEN - 1)) { - bt_dev_info(hdev, "Mediatek coredump end"); - hci_devcd_complete(hdev); - } + if (complete) { + bt_dev_info(hdev, "Mediatek coredump end"); + hci_devcd_complete(hdev); + } break; } diff --git a/drivers/clk/clk-en7523.c b/drivers/clk/clk-en7523.c index e52c5460e927..495c0d607c7d 100644 --- a/drivers/clk/clk-en7523.c +++ b/drivers/clk/clk-en7523.c @@ -87,6 +87,7 @@ static const u32 slic_base[] = { 100000000, 3125000 }; static const u32 npu_base[] = { 333000000, 400000000, 500000000 }; /* EN7581 */ static const u32 emi7581_base[] = { 540000000, 480000000, 400000000, 300000000 }; +static const u32 bus7581_base[] = { 600000000, 540000000 }; static const u32 npu7581_base[] = { 800000000, 750000000, 720000000, 600000000 }; static const u32 crypto_base[] = { 540000000, 480000000 }; @@ -222,8 +223,8 @@ static const struct en_clk_desc en7581_base_clks[] = { .base_reg = REG_BUS_CLK_DIV_SEL, .base_bits = 1, .base_shift = 8, - .base_values = bus_base, - .n_base_values = ARRAY_SIZE(bus_base), + .base_values = bus7581_base, + .n_base_values = ARRAY_SIZE(bus7581_base), .div_bits = 3, .div_shift = 0, @@ -503,6 +504,8 @@ static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_dat u32 rate; int i; + clk_data->num = EN7523_NUM_CLOCKS; + for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) { const struct en_clk_desc *desc = &en7523_base_clks[i]; u32 reg = desc->div_reg ? desc->div_reg : desc->base_reg; @@ -524,8 +527,6 @@ static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_dat hw = en7523_register_pcie_clk(dev, np_base); clk_data->hws[EN7523_CLK_PCIE] = hw; - - clk_data->num = EN7523_NUM_CLOCKS; } static int en7523_clk_hw_init(struct platform_device *pdev, diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index bdc6e5b90da5..9b45fa005030 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -2530,7 +2530,7 @@ static int clk_core_set_rate_nolock(struct clk_core *core, rate = clk_core_req_round_rate_nolock(core, req_rate); /* bail early if nothing to do */ - if (rate == clk_core_get_rate_recalc(core)) + if (rate == clk_core_get_rate_nolock(core)) return 0; /* fail on a direct rate set of a protected provider */ diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig index febb5d7348ff..be2e3a5f8336 100644 --- a/drivers/clk/meson/Kconfig +++ b/drivers/clk/meson/Kconfig @@ -106,7 +106,7 @@ config COMMON_CLK_AXG_AUDIO select COMMON_CLK_MESON_SCLK_DIV select COMMON_CLK_MESON_CLKC_UTILS select REGMAP_MMIO - depends on RESET_MESON_AUX + select RESET_CONTROLLER help Support for the audio clock controller on AmLogic A113D devices, aka axg, Say Y if you want audio subsystem to work. diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c index 7a3460804424..9df627b142f8 100644 --- a/drivers/clk/meson/axg-audio.c +++ b/drivers/clk/meson/axg-audio.c @@ -15,8 +15,6 @@ #include <linux/reset-controller.h> #include <linux/slab.h> -#include <soc/amlogic/reset-meson-aux.h> - #include "meson-clkc-utils.h" #include "axg-audio.h" #include "clk-regmap.h" @@ -1680,6 +1678,84 @@ static struct clk_regmap *const sm1_clk_regmaps[] = { &sm1_earcrx_dmac_clk, }; +struct axg_audio_reset_data { + struct reset_controller_dev rstc; + struct regmap *map; + unsigned int offset; +}; + +static void axg_audio_reset_reg_and_bit(struct axg_audio_reset_data *rst, + unsigned long id, + unsigned int *reg, + unsigned int *bit) +{ + unsigned int stride = regmap_get_reg_stride(rst->map); + + *reg = (id / (stride * BITS_PER_BYTE)) * stride; + *reg += rst->offset; + *bit = id % (stride * BITS_PER_BYTE); +} + +static int axg_audio_reset_update(struct reset_controller_dev *rcdev, + unsigned long id, bool assert) +{ + struct axg_audio_reset_data *rst = + container_of(rcdev, struct axg_audio_reset_data, rstc); + unsigned int offset, bit; + + axg_audio_reset_reg_and_bit(rst, id, &offset, &bit); + + regmap_update_bits(rst->map, offset, BIT(bit), + assert ? BIT(bit) : 0); + + return 0; +} + +static int axg_audio_reset_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct axg_audio_reset_data *rst = + container_of(rcdev, struct axg_audio_reset_data, rstc); + unsigned int val, offset, bit; + + axg_audio_reset_reg_and_bit(rst, id, &offset, &bit); + + regmap_read(rst->map, offset, &val); + + return !!(val & BIT(bit)); +} + +static int axg_audio_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return axg_audio_reset_update(rcdev, id, true); +} + +static int axg_audio_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return axg_audio_reset_update(rcdev, id, false); +} + +static int axg_audio_reset_toggle(struct reset_controller_dev *rcdev, + unsigned long id) +{ + int ret; + + ret = axg_audio_reset_assert(rcdev, id); + if (ret) + return ret; + + return axg_audio_reset_deassert(rcdev, id); +} + +static const struct reset_control_ops axg_audio_rstc_ops = { + .assert = axg_audio_reset_assert, + .deassert = axg_audio_reset_deassert, + .reset = axg_audio_reset_toggle, + .status = axg_audio_reset_status, +}; + static struct regmap_config axg_audio_regmap_cfg = { .reg_bits = 32, .val_bits = 32, @@ -1690,14 +1766,16 @@ struct audioclk_data { struct clk_regmap *const *regmap_clks; unsigned int regmap_clk_num; struct meson_clk_hw_data hw_clks; + unsigned int reset_offset; + unsigned int reset_num; unsigned int max_register; - const char *rst_drvname; }; static int axg_audio_clkc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct audioclk_data *data; + struct axg_audio_reset_data *rst; struct regmap *map; void __iomem *regs; struct clk_hw *hw; @@ -1756,11 +1834,22 @@ static int axg_audio_clkc_probe(struct platform_device *pdev) if (ret) return ret; - /* Register auxiliary reset driver when applicable */ - if (data->rst_drvname) - ret = devm_meson_rst_aux_register(dev, map, data->rst_drvname); + /* Stop here if there is no reset */ + if (!data->reset_num) + return 0; + + rst = devm_kzalloc(dev, sizeof(*rst), GFP_KERNEL); + if (!rst) + return -ENOMEM; + + rst->map = map; + rst->offset = data->reset_offset; + rst->rstc.nr_resets = data->reset_num; + rst->rstc.ops = &axg_audio_rstc_ops; + rst->rstc.of_node = dev->of_node; + rst->rstc.owner = THIS_MODULE; - return ret; + return devm_reset_controller_register(dev, &rst->rstc); } static const struct audioclk_data axg_audioclk_data = { @@ -1780,8 +1869,9 @@ static const struct audioclk_data g12a_audioclk_data = { .hws = g12a_audio_hw_clks, .num = ARRAY_SIZE(g12a_audio_hw_clks), }, + .reset_offset = AUDIO_SW_RESET, + .reset_num = 26, .max_register = AUDIO_CLK_SPDIFOUT_B_CTRL, - .rst_drvname = "rst-g12a", }; static const struct audioclk_data sm1_audioclk_data = { @@ -1791,8 +1881,9 @@ static const struct audioclk_data sm1_audioclk_data = { .hws = sm1_audio_hw_clks, .num = ARRAY_SIZE(sm1_audio_hw_clks), }, + .reset_offset = AUDIO_SM1_SW_RESET0, + .reset_num = 39, .max_register = AUDIO_EARCRX_DMAC_CLK_CTRL, - .rst_drvname = "rst-sm1", }; static const struct of_device_id clkc_match_table[] = { diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c index 1b9b7bccdeff..45e130b901eb 100644 --- a/drivers/crypto/hisilicon/debugfs.c +++ b/drivers/crypto/hisilicon/debugfs.c @@ -192,7 +192,7 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name) down_read(&qm->qps_lock); if (qm->sqc) { - memcpy(&sqc, qm->sqc + qp_id * sizeof(struct qm_sqc), sizeof(struct qm_sqc)); + memcpy(&sqc, qm->sqc + qp_id, sizeof(struct qm_sqc)); sqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK); sqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK); dump_show(qm, &sqc, sizeof(struct qm_sqc), "SOFT SQC"); @@ -229,7 +229,7 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name) down_read(&qm->qps_lock); if (qm->cqc) { - memcpy(&cqc, qm->cqc + qp_id * sizeof(struct qm_cqc), sizeof(struct qm_cqc)); + memcpy(&cqc, qm->cqc + qp_id, sizeof(struct qm_cqc)); cqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK); cqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK); dump_show(qm, &cqc, sizeof(struct qm_cqc), "SOFT CQC"); diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index ddfbdb66b794..5d356b7c4589 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -3362,36 +3362,24 @@ static bool dct_ecc_enabled(struct amd64_pvt *pvt) static bool umc_ecc_enabled(struct amd64_pvt *pvt) { - u8 umc_en_mask = 0, ecc_en_mask = 0; - u16 nid = pvt->mc_node_id; struct amd64_umc *umc; - u8 ecc_en = 0, i; + bool ecc_en = false; + int i; + /* Check whether at least one UMC is enabled: */ for_each_umc(i) { umc = &pvt->umc[i]; - /* Only check enabled UMCs. */ - if (!(umc->sdp_ctrl & UMC_SDP_INIT)) - continue; - - umc_en_mask |= BIT(i); - - if (umc->umc_cap_hi & UMC_ECC_ENABLED) - ecc_en_mask |= BIT(i); + if (umc->sdp_ctrl & UMC_SDP_INIT && + umc->umc_cap_hi & UMC_ECC_ENABLED) { + ecc_en = true; + break; + } } - /* Check whether at least one UMC is enabled: */ - if (umc_en_mask) - ecc_en = umc_en_mask == ecc_en_mask; - else - edac_dbg(0, "Node %d: No enabled UMCs.\n", nid); - - edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled")); + edac_dbg(3, "Node %d: DRAM ECC %s.\n", pvt->mc_node_id, (ecc_en ? "enabled" : "disabled")); - if (!ecc_en) - return false; - else - return true; + return ecc_en; } static inline void diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index e312d731f4a3..5fe61b9ab5f9 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -76,10 +76,6 @@ config EFI_ZBOOT bool "Enable the generic EFI decompressor" depends on EFI_GENERIC_STUB && !ARM select HAVE_KERNEL_GZIP - select HAVE_KERNEL_LZ4 - select HAVE_KERNEL_LZMA - select HAVE_KERNEL_LZO - select HAVE_KERNEL_XZ select HAVE_KERNEL_ZSTD help Create the bootable image as an EFI application that carries the diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c index 7a81c0ce4780..4bb7b0584bc9 100644 --- a/drivers/firmware/efi/esrt.c +++ b/drivers/firmware/efi/esrt.c @@ -75,8 +75,6 @@ static LIST_HEAD(entry_list); struct esre_attribute { struct attribute attr; ssize_t (*show)(struct esre_entry *entry, char *buf); - ssize_t (*store)(struct esre_entry *entry, - const char *buf, size_t count); }; static struct esre_entry *to_entry(struct kobject *kobj) diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot index 65ffd0b760b2..48842b5c106b 100644 --- a/drivers/firmware/efi/libstub/Makefile.zboot +++ b/drivers/firmware/efi/libstub/Makefile.zboot @@ -12,22 +12,16 @@ quiet_cmd_copy_and_pad = PAD $@ $(obj)/vmlinux.bin: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE $(call if_changed,copy_and_pad) -comp-type-$(CONFIG_KERNEL_GZIP) := gzip -comp-type-$(CONFIG_KERNEL_LZ4) := lz4 -comp-type-$(CONFIG_KERNEL_LZMA) := lzma -comp-type-$(CONFIG_KERNEL_LZO) := lzo -comp-type-$(CONFIG_KERNEL_XZ) := xzkern -comp-type-$(CONFIG_KERNEL_ZSTD) := zstd22 - # in GZIP, the appended le32 carrying the uncompressed size is part of the # format, but in other cases, we just append it at the end for convenience, # causing the original tools to complain when checking image integrity. -# So disregard it when calculating the payload size in the zimage header. -zboot-method-y := $(comp-type-y)_with_size -zboot-size-len-y := 4 +comp-type-y := gzip +zboot-method-y := gzip +zboot-size-len-y := 0 -zboot-method-$(CONFIG_KERNEL_GZIP) := gzip -zboot-size-len-$(CONFIG_KERNEL_GZIP) := 0 +comp-type-$(CONFIG_KERNEL_ZSTD) := zstd +zboot-method-$(CONFIG_KERNEL_ZSTD) := zstd22_with_size +zboot-size-len-$(CONFIG_KERNEL_ZSTD) := 4 $(obj)/vmlinuz: $(obj)/vmlinux.bin FORCE $(call if_changed,$(zboot-method-y)) diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 56fee58e281e..93ee3aa092f8 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -482,8 +482,9 @@ config GPIO_MT7621 Say yes here to support the Mediatek MT7621 SoC GPIO device. config GPIO_MVEBU - def_bool y + bool "Marvell Orion and EBU GPIO support" if COMPILE_TEST depends on PLAT_ORION || ARCH_MVEBU || COMPILE_TEST + default PLAT_ORION || ARCH_MVEBU select GENERIC_IRQ_CHIP select REGMAP_MMIO diff --git a/drivers/gpio/gpio-graniterapids.c b/drivers/gpio/gpio-graniterapids.c index f2e911a3d2ca..ad6a045fd3d2 100644 --- a/drivers/gpio/gpio-graniterapids.c +++ b/drivers/gpio/gpio-graniterapids.c @@ -32,12 +32,14 @@ #define GNR_PINS_PER_REG 32 #define GNR_NUM_REGS DIV_ROUND_UP(GNR_NUM_PINS, GNR_PINS_PER_REG) -#define GNR_CFG_BAR 0x00 +#define GNR_CFG_PADBAR 0x00 #define GNR_CFG_LOCK_OFFSET 0x04 -#define GNR_GPI_STATUS_OFFSET 0x20 +#define GNR_GPI_STATUS_OFFSET 0x14 #define GNR_GPI_ENABLE_OFFSET 0x24 -#define GNR_CFG_DW_RX_MASK GENMASK(25, 22) +#define GNR_CFG_DW_HOSTSW_MODE BIT(27) +#define GNR_CFG_DW_RX_MASK GENMASK(23, 22) +#define GNR_CFG_DW_INTSEL_MASK GENMASK(21, 14) #define GNR_CFG_DW_RX_DISABLE FIELD_PREP(GNR_CFG_DW_RX_MASK, 2) #define GNR_CFG_DW_RX_EDGE FIELD_PREP(GNR_CFG_DW_RX_MASK, 1) #define GNR_CFG_DW_RX_LEVEL FIELD_PREP(GNR_CFG_DW_RX_MASK, 0) @@ -50,6 +52,7 @@ * struct gnr_gpio - Intel Granite Rapids-D vGPIO driver state * @gc: GPIO controller interface * @reg_base: base address of the GPIO registers + * @pad_base: base address of the vGPIO pad configuration registers * @ro_bitmap: bitmap of read-only pins * @lock: guard the registers * @pad_backup: backup of the register state for suspend @@ -57,6 +60,7 @@ struct gnr_gpio { struct gpio_chip gc; void __iomem *reg_base; + void __iomem *pad_base; DECLARE_BITMAP(ro_bitmap, GNR_NUM_PINS); raw_spinlock_t lock; u32 pad_backup[]; @@ -65,7 +69,7 @@ struct gnr_gpio { static void __iomem *gnr_gpio_get_padcfg_addr(const struct gnr_gpio *priv, unsigned int gpio) { - return priv->reg_base + gpio * sizeof(u32); + return priv->pad_base + gpio * sizeof(u32); } static int gnr_gpio_configure_line(struct gpio_chip *gc, unsigned int gpio, @@ -88,6 +92,20 @@ static int gnr_gpio_configure_line(struct gpio_chip *gc, unsigned int gpio, return 0; } +static int gnr_gpio_request(struct gpio_chip *gc, unsigned int gpio) +{ + struct gnr_gpio *priv = gpiochip_get_data(gc); + u32 dw; + + dw = readl(gnr_gpio_get_padcfg_addr(priv, gpio)); + if (!(dw & GNR_CFG_DW_HOSTSW_MODE)) { + dev_warn(gc->parent, "GPIO %u is not owned by host", gpio); + return -EBUSY; + } + + return 0; +} + static int gnr_gpio_get(struct gpio_chip *gc, unsigned int gpio) { const struct gnr_gpio *priv = gpiochip_get_data(gc); @@ -139,6 +157,7 @@ static int gnr_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio, in static const struct gpio_chip gnr_gpio_chip = { .owner = THIS_MODULE, + .request = gnr_gpio_request, .get = gnr_gpio_get, .set = gnr_gpio_set, .get_direction = gnr_gpio_get_direction, @@ -166,7 +185,7 @@ static void gnr_gpio_irq_ack(struct irq_data *d) guard(raw_spinlock_irqsave)(&priv->lock); reg = readl(addr); - reg &= ~BIT(bit_idx); + reg |= BIT(bit_idx); writel(reg, addr); } @@ -209,10 +228,18 @@ static void gnr_gpio_irq_unmask(struct irq_data *d) static int gnr_gpio_irq_set_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - irq_hw_number_t pin = irqd_to_hwirq(d); - u32 mask = GNR_CFG_DW_RX_MASK; + struct gnr_gpio *priv = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + u32 reg; u32 set; + /* Allow interrupts only if Interrupt Select field is non-zero */ + reg = readl(gnr_gpio_get_padcfg_addr(priv, hwirq)); + if (!(reg & GNR_CFG_DW_INTSEL_MASK)) { + dev_dbg(gc->parent, "GPIO %lu cannot be used as IRQ", hwirq); + return -EPERM; + } + /* Falling edge and level low triggers not supported by the GPIO controller */ switch (type) { case IRQ_TYPE_NONE: @@ -230,10 +257,11 @@ static int gnr_gpio_irq_set_type(struct irq_data *d, unsigned int type) return -EINVAL; } - return gnr_gpio_configure_line(gc, pin, mask, set); + return gnr_gpio_configure_line(gc, hwirq, GNR_CFG_DW_RX_MASK, set); } static const struct irq_chip gnr_gpio_irq_chip = { + .name = "gpio-graniterapids", .irq_ack = gnr_gpio_irq_ack, .irq_mask = gnr_gpio_irq_mask, .irq_unmask = gnr_gpio_irq_unmask, @@ -291,6 +319,7 @@ static int gnr_gpio_probe(struct platform_device *pdev) struct gnr_gpio *priv; void __iomem *regs; int irq, ret; + u32 offset; priv = devm_kzalloc(dev, struct_size(priv, pad_backup, num_backup_pins), GFP_KERNEL); if (!priv) @@ -302,6 +331,10 @@ static int gnr_gpio_probe(struct platform_device *pdev) if (IS_ERR(regs)) return PTR_ERR(regs); + priv->reg_base = regs; + offset = readl(priv->reg_base + GNR_CFG_PADBAR); + priv->pad_base = priv->reg_base + offset; + irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; @@ -311,8 +344,6 @@ static int gnr_gpio_probe(struct platform_device *pdev) if (ret) return dev_err_probe(dev, ret, "failed to request interrupt\n"); - priv->reg_base = regs + readl(regs + GNR_CFG_BAR); - gnr_gpio_init_pin_ro_bits(dev, priv->reg_base + GNR_CFG_LOCK_OFFSET, priv->ro_bitmap); @@ -324,7 +355,6 @@ static int gnr_gpio_probe(struct platform_device *pdev) girq = &priv->gc.irq; gpio_irq_chip_set_chip(girq, &gnr_gpio_irq_chip); - girq->chip->name = dev_name(dev); girq->parent_handler = NULL; girq->num_parents = 0; girq->parents = NULL; diff --git a/drivers/gpio/gpio-idio-16.c b/drivers/gpio/gpio-idio-16.c index 2c9512589297..0103be977c66 100644 --- a/drivers/gpio/gpio-idio-16.c +++ b/drivers/gpio/gpio-idio-16.c @@ -3,6 +3,9 @@ * GPIO library for the ACCES IDIO-16 family * Copyright (C) 2022 William Breathitt Gray */ + +#define DEFAULT_SYMBOL_NAMESPACE "GPIO_IDIO_16" + #include <linux/bits.h> #include <linux/device.h> #include <linux/err.h> @@ -14,8 +17,6 @@ #include "gpio-idio-16.h" -#define DEFAULT_SYMBOL_NAMESPACE "GPIO_IDIO_16" - #define IDIO_16_DAT_BASE 0x0 #define IDIO_16_OUT_BASE IDIO_16_DAT_BASE #define IDIO_16_IN_BASE (IDIO_16_DAT_BASE + 1) diff --git a/drivers/gpio/gpio-ljca.c b/drivers/gpio/gpio-ljca.c index 503d2441c58f..817ecb12d550 100644 --- a/drivers/gpio/gpio-ljca.c +++ b/drivers/gpio/gpio-ljca.c @@ -82,9 +82,9 @@ static int ljca_gpio_config(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id, int ret; mutex_lock(&ljca_gpio->trans_lock); + packet->num = 1; packet->item[0].index = gpio_id; packet->item[0].value = config | ljca_gpio->connect_mode[gpio_id]; - packet->num = 1; ret = ljca_transfer(ljca_gpio->ljca, LJCA_GPIO_CONFIG, (u8 *)packet, struct_size(packet, item, packet->num), NULL, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index d891ab779ca7..5df21529b3b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1801,13 +1801,18 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket) return -EINVAL; + /* Make sure VRAM is allocated contigiously */ (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; - amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains); - for (i = 0; i < (*bo)->placement.num_placement; i++) - (*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS; - r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx); - if (r) - return r; + if ((*bo)->tbo.resource->mem_type == TTM_PL_VRAM && + !((*bo)->tbo.resource->placement & TTM_PL_FLAG_CONTIGUOUS)) { + + amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains); + for (i = 0; i < (*bo)->placement.num_placement; i++) + (*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS; + r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx); + if (r) + return r; + } return amdgpu_ttm_alloc_gart(&(*bo)->tbo); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 96316111300a..d272d95dd5b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -145,7 +145,7 @@ const char *amdgpu_asic_name[] = { "LAST", }; -#define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMDGPU_MAX_IP_NUM, 0) +#define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMD_IP_BLOCK_TYPE_NUM - 1, 0) /* * Default init level where all blocks are expected to be initialized. This is * the level of initialization expected by default and also after a full reset diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 31fd30dcd593..65bb26215e86 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -551,6 +551,8 @@ static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo) for (i = 0; i < abo->placement.num_placement; ++i) { abo->placements[i].fpfn = 0 >> PAGE_SHIFT; abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; + if (abo->placements[i].mem_type == TTM_PL_VRAM) + abo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8d9bf7a0857f..ddd7f05e4db9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -674,12 +674,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping && ring->funcs->emit_wreg; - if (adev->gfx.enable_cleaner_shader && - ring->funcs->emit_cleaner_shader && - job->enforce_isolation) - ring->funcs->emit_cleaner_shader(ring); - - if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync) + if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync && + !(job->enforce_isolation && !job->vmid)) return 0; amdgpu_ring_ib_begin(ring); @@ -690,6 +686,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, if (need_pipe_sync) amdgpu_ring_emit_pipeline_sync(ring); + if (adev->gfx.enable_cleaner_shader && + ring->funcs->emit_cleaner_shader && + job->enforce_isolation) + ring->funcs->emit_cleaner_shader(ring); + if (vm_flush_needed) { trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr); amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index e2b3dda57030..54459254bd37 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -45,6 +45,8 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_9_4_4_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin"); MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin"); +MODULE_FIRMWARE("amdgpu/gc_9_4_3_sjt_mec.bin"); +MODULE_FIRMWARE("amdgpu/gc_9_4_4_sjt_mec.bin"); #define GFX9_MEC_HPD_SIZE 4096 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L @@ -574,8 +576,12 @@ static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev, { int err; - err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, - "amdgpu/%s_mec.bin", chip_name); + if (amdgpu_sriov_vf(adev)) + err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, + "amdgpu/%s_sjt_mec.bin", chip_name); + else + err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, + "amdgpu/%s_mec.bin", chip_name); if (err) goto out; amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 079131aeb2f7..3c8ab8698af8 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -1288,7 +1288,7 @@ static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, struct amdgpu_job *job, struct amdgpu_ib *ib) { - struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); + struct amdgpu_ring *ring = amdgpu_job_ring(job); unsigned i; /* No patching necessary for the first instance */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 7b826a136ceb..e5324c5bc6c7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1423,6 +1423,7 @@ err: static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev, + bool cache_line_size_missing, struct kfd_gpu_cache_info *pcache_info) { struct amdgpu_device *adev = kdev->adev; @@ -1437,6 +1438,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev, CRAT_CACHE_FLAGS_SIMD_CACHE); pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_tcp_per_wpg / 2; pcache_info[i].cache_line_size = adev->gfx.config.gc_tcp_cache_line_size; + if (cache_line_size_missing && !pcache_info[i].cache_line_size) + pcache_info[i].cache_line_size = 128; i++; } /* Scalar L1 Instruction Cache per SQC */ @@ -1449,6 +1452,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev, CRAT_CACHE_FLAGS_SIMD_CACHE); pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_sqc_per_wgp * 2; pcache_info[i].cache_line_size = adev->gfx.config.gc_instruction_cache_line_size; + if (cache_line_size_missing && !pcache_info[i].cache_line_size) + pcache_info[i].cache_line_size = 128; i++; } /* Scalar L1 Data Cache per SQC */ @@ -1460,6 +1465,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev, CRAT_CACHE_FLAGS_SIMD_CACHE); pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_sqc_per_wgp * 2; pcache_info[i].cache_line_size = adev->gfx.config.gc_scalar_data_cache_line_size; + if (cache_line_size_missing && !pcache_info[i].cache_line_size) + pcache_info[i].cache_line_size = 64; i++; } /* GL1 Data Cache per SA */ @@ -1472,7 +1479,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev, CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE); pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh; - pcache_info[i].cache_line_size = 0; + if (cache_line_size_missing) + pcache_info[i].cache_line_size = 128; i++; } /* L2 Data Cache per GPU (Total Tex Cache) */ @@ -1484,6 +1492,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev, CRAT_CACHE_FLAGS_SIMD_CACHE); pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh; pcache_info[i].cache_line_size = adev->gfx.config.gc_tcc_cache_line_size; + if (cache_line_size_missing && !pcache_info[i].cache_line_size) + pcache_info[i].cache_line_size = 128; i++; } /* L3 Data Cache per GPU */ @@ -1494,7 +1504,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev, CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE); pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh; - pcache_info[i].cache_line_size = 0; + pcache_info[i].cache_line_size = 64; i++; } return i; @@ -1569,6 +1579,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev, int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info) { int num_of_cache_types = 0; + bool cache_line_size_missing = false; switch (kdev->adev->asic_type) { case CHIP_KAVERI: @@ -1692,10 +1703,17 @@ int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pc case IP_VERSION(11, 5, 0): case IP_VERSION(11, 5, 1): case IP_VERSION(11, 5, 2): + /* Cacheline size not available in IP discovery for gc11. + * kfd_fill_gpu_cache_info_from_gfx_config to hard code it + */ + cache_line_size_missing = true; + fallthrough; case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 1): num_of_cache_types = - kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd, *pcache_info); + kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd, + cache_line_size_missing, + *pcache_info); break; default: *pcache_info = dummy_cache_info; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index c79fe9069e22..16b5daaa272f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -207,6 +207,21 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, if (!down_read_trylock(&adev->reset_domain->sem)) return -EIO; + if (!pdd->proc_ctx_cpu_ptr) { + r = amdgpu_amdkfd_alloc_gtt_mem(adev, + AMDGPU_MES_PROC_CTX_SIZE, + &pdd->proc_ctx_bo, + &pdd->proc_ctx_gpu_addr, + &pdd->proc_ctx_cpu_ptr, + false); + if (r) { + dev_err(adev->dev, + "failed to allocate process context bo\n"); + return r; + } + memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); + } + memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input)); queue_input.process_id = qpd->pqm->process->pasid; queue_input.page_table_base_addr = qpd->page_table_base; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index eacfeb32f35d..4b275937d05e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -306,7 +306,7 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange, spage = migrate_pfn_to_page(migrate->src[i]); if (spage && !is_zone_device_page(spage)) { src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE, - DMA_TO_DEVICE); + DMA_BIDIRECTIONAL); r = dma_mapping_error(dev, src[i]); if (r) { dev_err(dev, "%s: fail %d dma_map_page\n", @@ -629,7 +629,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, goto out_oom; } - dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE); + dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); r = dma_mapping_error(dev, dst[i]); if (r) { dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 87cd52cf4ee9..d0ee173acf82 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1076,7 +1076,8 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) kfd_free_process_doorbells(pdd->dev->kfd, pdd); - if (pdd->dev->kfd->shared_resources.enable_mes) + if (pdd->dev->kfd->shared_resources.enable_mes && + pdd->proc_ctx_cpu_ptr) amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev, &pdd->proc_ctx_bo); /* @@ -1608,7 +1609,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, struct kfd_process *p) { struct kfd_process_device *pdd = NULL; - int retval = 0; if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE)) return NULL; @@ -1632,21 +1632,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, pdd->user_gpu_id = dev->id; atomic64_set(&pdd->evict_duration_counter, 0); - if (dev->kfd->shared_resources.enable_mes) { - retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, - AMDGPU_MES_PROC_CTX_SIZE, - &pdd->proc_ctx_bo, - &pdd->proc_ctx_gpu_addr, - &pdd->proc_ctx_cpu_ptr, - false); - if (retval) { - dev_err(dev->adev->dev, - "failed to allocate process context bo\n"); - goto err_free_pdd; - } - memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); - } - p->pdds[p->n_pdds++] = pdd; if (kfd_dbg_is_per_vmid_supported(pdd->dev)) pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap( @@ -1658,10 +1643,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, idr_init(&pdd->alloc_idr); return pdd; - -err_free_pdd: - kfree(pdd); - return NULL; } /** diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index c76db22a1000..59b92d66e958 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -212,13 +212,17 @@ static void pqm_clean_queue_resource(struct process_queue_manager *pqm, void pqm_uninit(struct process_queue_manager *pqm) { struct process_queue_node *pqn, *next; - struct kfd_process_device *pdd; list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) { if (pqn->q) { - pdd = kfd_get_process_device_data(pqn->q->device, pqm->process); - kfd_queue_unref_bo_vas(pdd, &pqn->q->properties); - kfd_queue_release_buffers(pdd, &pqn->q->properties); + struct kfd_process_device *pdd = kfd_get_process_device_data(pqn->q->device, + pqm->process); + if (pdd) { + kfd_queue_unref_bo_vas(pdd, &pqn->q->properties); + kfd_queue_release_buffers(pdd, &pqn->q->properties); + } else { + WARN_ON(!pdd); + } pqm_clean_queue_resource(pqm, pqn); } diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 67a5de573943..d7acdd42d80f 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -164,6 +164,7 @@ enum amd_pp_task { }; enum PP_SMC_POWER_PROFILE { + PP_SMC_POWER_PROFILE_UNKNOWN = -1, PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT = 0x0, PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x1, PP_SMC_POWER_PROFILE_POWERSAVING = 0x2, diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 5eae14fe79f1..21bd635bcdfc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -764,6 +764,7 @@ static int smu_early_init(struct amdgpu_ip_block *ip_block) smu->smu_baco.platform_support = false; smu->smu_baco.maco_support = false; smu->user_dpm_profile.fan_mode = -1; + smu->power_profile_mode = PP_SMC_POWER_PROFILE_UNKNOWN; mutex_init(&smu->message_lock); @@ -1248,6 +1249,21 @@ static bool smu_is_workload_profile_available(struct smu_context *smu, return smu->workload_map && smu->workload_map[profile].valid_mapping; } +static void smu_init_power_profile(struct smu_context *smu) +{ + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN) { + if (smu->is_apu || + !smu_is_workload_profile_available( + smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) + smu->power_profile_mode = + PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + else + smu->power_profile_mode = + PP_SMC_POWER_PROFILE_FULLSCREEN3D; + } + smu_power_profile_mode_get(smu, smu->power_profile_mode); +} + static int smu_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; @@ -1269,13 +1285,7 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block) atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1); - if (smu->is_apu || - !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) - smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; - else - smu->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D; - smu_power_profile_mode_get(smu, smu->power_profile_mode); - + smu_init_power_profile(smu); smu->display_config = &adev->pm.pm_display_cfg; smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index f4ac403b8b36..aabb94796005 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -2810,4 +2810,5 @@ void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu) smu->workload_map = smu_v13_0_7_workload_map; smu->smc_driver_if_version = SMU13_0_7_DRIVER_IF_VERSION; smu_v13_0_set_smu_mailbox_registers(smu); + smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; } diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs index 09500cddc009..ef2d490965ba 100644 --- a/drivers/gpu/drm/drm_panic_qr.rs +++ b/drivers/gpu/drm/drm_panic_qr.rs @@ -929,7 +929,6 @@ impl QrImage<'_> { /// * `tmp` must be valid for reading and writing for `tmp_size` bytes. /// /// They must remain valid for the duration of the function call. - #[no_mangle] pub unsafe extern "C" fn drm_panic_qr_generate( url: *const i8, diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 174753625bca..7cd902bbd244 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -1343,6 +1343,17 @@ static void ilk_lut_write(const struct intel_crtc_state *crtc_state, intel_de_write_fw(display, reg, val); } +static void ilk_lut_write_indexed(const struct intel_crtc_state *crtc_state, + i915_reg_t reg, u32 val) +{ + struct intel_display *display = to_intel_display(crtc_state); + + if (crtc_state->dsb_color_vblank) + intel_dsb_reg_write_indexed(crtc_state->dsb_color_vblank, reg, val); + else + intel_de_write_fw(display, reg, val); +} + static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob) { @@ -1357,19 +1368,29 @@ static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state, lut = blob->data; /* - * DSB fails to correctly load the legacy LUT - * unless we either write each entry twice, - * or use non-posted writes + * DSB fails to correctly load the legacy LUT unless + * we either write each entry twice when using posted + * writes, or we use non-posted writes. + * + * If palette anti-collision is active during LUT + * register writes: + * - posted writes simply get dropped and thus the LUT + * contents may not be correctly updated + * - non-posted writes are blocked and thus the LUT + * contents are always correct, but simultaneous CPU + * MMIO access will start to fail + * + * Choose the lesser of two evils and use posted writes. + * Using posted writes is also faster, even when having + * to write each register twice. */ - if (crtc_state->dsb_color_vblank) - intel_dsb_nonpost_start(crtc_state->dsb_color_vblank); - - for (i = 0; i < 256; i++) + for (i = 0; i < 256; i++) { ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i), i9xx_lut_8(&lut[i])); - - if (crtc_state->dsb_color_vblank) - intel_dsb_nonpost_end(crtc_state->dsb_color_vblank); + if (crtc_state->dsb_color_vblank) + ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i), + i9xx_lut_8(&lut[i])); + } } static void ilk_load_lut_10(const struct intel_crtc_state *crtc_state, @@ -1458,8 +1479,8 @@ static void bdw_load_lut_10(const struct intel_crtc_state *crtc_state, prec_index); for (i = 0; i < lut_size; i++) - ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe), - ilk_lut_10(&lut[i])); + ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe), + ilk_lut_10(&lut[i])); /* * Reset the index, otherwise it prevents the legacy palette to be @@ -1612,16 +1633,16 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state, * ToDo: Extend to max 7.0. Enable 32 bit input value * as compared to just 16 to achieve this. */ - ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe), - DISPLAY_VER(display) >= 14 ? - mtl_degamma_lut(&lut[i]) : glk_degamma_lut(&lut[i])); + ilk_lut_write_indexed(crtc_state, PRE_CSC_GAMC_DATA(pipe), + DISPLAY_VER(display) >= 14 ? + mtl_degamma_lut(&lut[i]) : glk_degamma_lut(&lut[i])); } /* Clamp values > 1.0. */ while (i++ < glk_degamma_lut_size(display)) - ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe), - DISPLAY_VER(display) >= 14 ? - 1 << 24 : 1 << 16); + ilk_lut_write_indexed(crtc_state, PRE_CSC_GAMC_DATA(pipe), + DISPLAY_VER(display) >= 14 ? + 1 << 24 : 1 << 16); ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe), 0); } @@ -1687,10 +1708,10 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state) for (i = 0; i < 9; i++) { const struct drm_color_lut *entry = &lut[i]; - ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe), - ilk_lut_12p4_ldw(entry)); - ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe), - ilk_lut_12p4_udw(entry)); + ilk_lut_write_indexed(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe), + ilk_lut_12p4_ldw(entry)); + ilk_lut_write_indexed(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe), + ilk_lut_12p4_udw(entry)); } ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe), @@ -1726,10 +1747,10 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state) for (i = 1; i < 257; i++) { entry = &lut[i * 8]; - ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe), - ilk_lut_12p4_ldw(entry)); - ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe), - ilk_lut_12p4_udw(entry)); + ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe), + ilk_lut_12p4_ldw(entry)); + ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe), + ilk_lut_12p4_udw(entry)); } /* @@ -1747,10 +1768,10 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state) for (i = 0; i < 256; i++) { entry = &lut[i * 8 * 128]; - ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe), - ilk_lut_12p4_ldw(entry)); - ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe), - ilk_lut_12p4_udw(entry)); + ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe), + ilk_lut_12p4_ldw(entry)); + ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe), + ilk_lut_12p4_udw(entry)); } ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe), diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index b7b44399adaa..4d3785f5cb52 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -273,16 +273,20 @@ static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_ } /** - * intel_dsb_reg_write() - Emit register wriite to the DSB context + * intel_dsb_reg_write_indexed() - Emit register wriite to the DSB context * @dsb: DSB context * @reg: register address. * @val: value. * * This function is used for writing register-value pair in command * buffer of DSB. + * + * Note that indexed writes are slower than normal MMIO writes + * for a small number (less than 5 or so) of writes to the same + * register. */ -void intel_dsb_reg_write(struct intel_dsb *dsb, - i915_reg_t reg, u32 val) +void intel_dsb_reg_write_indexed(struct intel_dsb *dsb, + i915_reg_t reg, u32 val) { /* * For example the buffer will look like below for 3 dwords for auto @@ -340,6 +344,15 @@ void intel_dsb_reg_write(struct intel_dsb *dsb, } } +void intel_dsb_reg_write(struct intel_dsb *dsb, + i915_reg_t reg, u32 val) +{ + intel_dsb_emit(dsb, val, + (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) | + (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) | + i915_mmio_reg_offset(reg)); +} + static u32 intel_dsb_mask_to_byte_en(u32 mask) { return (!!(mask & 0xff000000) << 3 | diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h index 33e0fc2ab380..da6df07a3c83 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.h +++ b/drivers/gpu/drm/i915/display/intel_dsb.h @@ -34,6 +34,8 @@ void intel_dsb_finish(struct intel_dsb *dsb); void intel_dsb_cleanup(struct intel_dsb *dsb); void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val); +void intel_dsb_reg_write_indexed(struct intel_dsb *dsb, + i915_reg_t reg, u32 val); void intel_dsb_reg_write_masked(struct intel_dsb *dsb, i915_reg_t reg, u32 mask, u32 val); void intel_dsb_noop(struct intel_dsb *dsb, int count); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 4eb58887819a..71c0daef1996 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1643,9 +1643,21 @@ capture_engine(struct intel_engine_cs *engine, return NULL; intel_engine_get_hung_entity(engine, &ce, &rq); - if (rq && !i915_request_started(rq)) - drm_info(&engine->gt->i915->drm, "Got hung context on %s with active request %lld:%lld [0x%04X] not yet started\n", - engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id); + if (rq && !i915_request_started(rq)) { + /* + * We want to know also what is the guc_id of the context, + * but if we don't have the context reference, then skip + * printing it. + */ + if (ce) + drm_info(&engine->gt->i915->drm, + "Got hung context on %s with active request %lld:%lld [0x%04X] not yet started\n", + engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id); + else + drm_info(&engine->gt->i915->drm, + "Got hung context on %s with active request %lld:%lld not yet started\n", + engine->name, rq->fence.context, rq->fence.seqno); + } if (rq) { capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL); diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 762127dd56c5..70a854557e6e 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -506,6 +506,6 @@ int __init i915_scheduler_module_init(void) return 0; err_priorities: - kmem_cache_destroy(slab_priorities); + kmem_cache_destroy(slab_dependencies); return -ENOMEM; } diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c index 1a192a2a941b..3bbdb362d6f0 100644 --- a/drivers/gpu/drm/xe/tests/xe_migrate.c +++ b/drivers/gpu/drm/xe/tests/xe_migrate.c @@ -224,8 +224,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_PINNED); if (IS_ERR(tiny)) { - KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n", - PTR_ERR(pt)); + KUNIT_FAIL(test, "Failed to allocate tiny fake pt: %li\n", + PTR_ERR(tiny)); goto free_pt; } diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index 3cb228c773cd..6146d1776bda 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -65,6 +65,14 @@ invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fe __invalidation_fence_signal(xe, fence); } +void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence) +{ + if (WARN_ON_ONCE(!fence->gt)) + return; + + __invalidation_fence_signal(gt_to_xe(fence->gt), fence); +} + static void xe_gt_tlb_fence_timeout(struct work_struct *work) { struct xe_gt *gt = container_of(work, struct xe_gt, diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h index f430d5797af7..00b1c6c01e8d 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h @@ -28,6 +28,7 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len); void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence, bool stack); +void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence); static inline void xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence) diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index f27f579f4d85..797576690356 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -1333,8 +1333,7 @@ static void invalidation_fence_cb(struct dma_fence *fence, queue_work(system_wq, &ifence->work); } else { ifence->base.base.error = ifence->fence->error; - dma_fence_signal(&ifence->base.base); - dma_fence_put(&ifence->base.base); + xe_gt_tlb_invalidation_fence_signal(&ifence->base); } dma_fence_put(ifence->fence); } diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c index e1a0e27cda14..c13123008e90 100644 --- a/drivers/gpu/drm/xe/xe_reg_sr.c +++ b/drivers/gpu/drm/xe/xe_reg_sr.c @@ -27,46 +27,27 @@ #include "xe_reg_whitelist.h" #include "xe_rtp_types.h" -#define XE_REG_SR_GROW_STEP_DEFAULT 16 - static void reg_sr_fini(struct drm_device *drm, void *arg) { struct xe_reg_sr *sr = arg; + struct xe_reg_sr_entry *entry; + unsigned long reg; + + xa_for_each(&sr->xa, reg, entry) + kfree(entry); xa_destroy(&sr->xa); - kfree(sr->pool.arr); - memset(&sr->pool, 0, sizeof(sr->pool)); } int xe_reg_sr_init(struct xe_reg_sr *sr, const char *name, struct xe_device *xe) { xa_init(&sr->xa); - memset(&sr->pool, 0, sizeof(sr->pool)); - sr->pool.grow_step = XE_REG_SR_GROW_STEP_DEFAULT; sr->name = name; return drmm_add_action_or_reset(&xe->drm, reg_sr_fini, sr); } EXPORT_SYMBOL_IF_KUNIT(xe_reg_sr_init); -static struct xe_reg_sr_entry *alloc_entry(struct xe_reg_sr *sr) -{ - if (sr->pool.used == sr->pool.allocated) { - struct xe_reg_sr_entry *arr; - - arr = krealloc_array(sr->pool.arr, - ALIGN(sr->pool.allocated + 1, sr->pool.grow_step), - sizeof(*arr), GFP_KERNEL); - if (!arr) - return NULL; - - sr->pool.arr = arr; - sr->pool.allocated += sr->pool.grow_step; - } - - return &sr->pool.arr[sr->pool.used++]; -} - static bool compatible_entries(const struct xe_reg_sr_entry *e1, const struct xe_reg_sr_entry *e2) { @@ -112,7 +93,7 @@ int xe_reg_sr_add(struct xe_reg_sr *sr, return 0; } - pentry = alloc_entry(sr); + pentry = kmalloc(sizeof(*pentry), GFP_KERNEL); if (!pentry) { ret = -ENOMEM; goto fail; diff --git a/drivers/gpu/drm/xe/xe_reg_sr_types.h b/drivers/gpu/drm/xe/xe_reg_sr_types.h index ad48a52b824a..ebe11f237fa2 100644 --- a/drivers/gpu/drm/xe/xe_reg_sr_types.h +++ b/drivers/gpu/drm/xe/xe_reg_sr_types.h @@ -20,12 +20,6 @@ struct xe_reg_sr_entry { }; struct xe_reg_sr { - struct { - struct xe_reg_sr_entry *arr; - unsigned int used; - unsigned int allocated; - unsigned int grow_step; - } pool; struct xarray xa; const char *name; diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index efb33802804f..d2877e4cc28d 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c @@ -1075,6 +1075,7 @@ static const struct of_device_id nmk_i2c_eyeq_match_table[] = { .compatible = "mobileye,eyeq6h-i2c", .data = (void *)NMK_I2C_EYEQ_FLAG_32B_BUS, }, + { /* sentinel */ } }; static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index d4d139b97513..9a1af5bbd604 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c @@ -95,7 +95,7 @@ enum { static inline int wait_timeout(struct i2c_pnx_algo_data *data) { - long timeout = data->timeout; + long timeout = jiffies_to_msecs(data->timeout); while (timeout > 0 && (ioread32(I2C_REG_STS(data)) & mstatus_active)) { mdelay(1); @@ -106,7 +106,7 @@ static inline int wait_timeout(struct i2c_pnx_algo_data *data) static inline int wait_reset(struct i2c_pnx_algo_data *data) { - long timeout = data->timeout; + long timeout = jiffies_to_msecs(data->timeout); while (timeout > 0 && (ioread32(I2C_REG_CTL(data)) & mcntrl_reset)) { mdelay(1); diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c index c218f73c3650..9264adc97ca9 100644 --- a/drivers/i2c/busses/i2c-riic.c +++ b/drivers/i2c/busses/i2c-riic.c @@ -352,7 +352,7 @@ static int riic_init_hw(struct riic_dev *riic) if (brl <= (0x1F + 3)) break; - total_ticks /= 2; + total_ticks = DIV_ROUND_UP(total_ticks, 2); rate /= 2; } diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 3f691e1fd22c..16f40b8000d7 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -1415,6 +1415,7 @@ static int domain_flush_pages_v2(struct protection_domain *pdom, struct iommu_cmd cmd; int ret = 0; + lockdep_assert_held(&pdom->lock); list_for_each_entry(dev_data, &pdom->dev_list, list) { struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); u16 domid = dev_data->gcr3_info.domid; @@ -1464,6 +1465,8 @@ static void __domain_flush_pages(struct protection_domain *domain, ioasid_t pasid = IOMMU_NO_PASID; bool gn = false; + lockdep_assert_held(&domain->lock); + if (pdom_is_v2_pgtbl_mode(domain)) { gn = true; ret = domain_flush_pages_v2(domain, address, size); @@ -1585,6 +1588,8 @@ void amd_iommu_update_and_flush_device_table(struct protection_domain *domain) { struct iommu_dev_data *dev_data; + lockdep_assert_held(&domain->lock); + list_for_each_entry(dev_data, &domain->dev_list, list) { struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev); @@ -2073,6 +2078,7 @@ static int attach_device(struct device *dev, struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); struct pci_dev *pdev; + unsigned long flags; int ret = 0; mutex_lock(&dev_data->mutex); @@ -2113,7 +2119,9 @@ static int attach_device(struct device *dev, /* Update data structures */ dev_data->domain = domain; + spin_lock_irqsave(&domain->lock, flags); list_add(&dev_data->list, &domain->dev_list); + spin_unlock_irqrestore(&domain->lock, flags); /* Update device table */ dev_update_dte(dev_data, true); @@ -2160,6 +2168,7 @@ static void detach_device(struct device *dev) /* Flush IOTLB and wait for the flushes to finish */ spin_lock_irqsave(&domain->lock, flags); amd_iommu_domain_flush_all(domain); + list_del(&dev_data->list); spin_unlock_irqrestore(&domain->lock, flags); /* Clear GCR3 table */ @@ -2168,7 +2177,6 @@ static void detach_device(struct device *dev) /* Update data structures */ dev_data->domain = NULL; - list_del(&dev_data->list); /* decrease reference counters - needs to happen after the flushes */ pdom_detach_iommu(iommu, domain); diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c index c8ec74f089f3..6e41ddaa24d6 100644 --- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c +++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c @@ -339,7 +339,7 @@ tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu, * one CPU at a time can enter the process, while the others * will be spinning at the same lock. */ - lidx = smp_processor_id() % cmdqv->num_lvcmdqs_per_vintf; + lidx = raw_smp_processor_id() % cmdqv->num_lvcmdqs_per_vintf; vcmdq = vintf->lvcmdqs[lidx]; if (!vcmdq || !READ_ONCE(vcmdq->enabled)) return NULL; diff --git a/drivers/iommu/intel/cache.c b/drivers/iommu/intel/cache.c index e5b89f728ad3..09694cca8752 100644 --- a/drivers/iommu/intel/cache.c +++ b/drivers/iommu/intel/cache.c @@ -105,12 +105,35 @@ static void cache_tag_unassign(struct dmar_domain *domain, u16 did, spin_unlock_irqrestore(&domain->cache_lock, flags); } +/* domain->qi_batch will be freed in iommu_free_domain() path. */ +static int domain_qi_batch_alloc(struct dmar_domain *domain) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&domain->cache_lock, flags); + if (domain->qi_batch) + goto out_unlock; + + domain->qi_batch = kzalloc(sizeof(*domain->qi_batch), GFP_ATOMIC); + if (!domain->qi_batch) + ret = -ENOMEM; +out_unlock: + spin_unlock_irqrestore(&domain->cache_lock, flags); + + return ret; +} + static int __cache_tag_assign_domain(struct dmar_domain *domain, u16 did, struct device *dev, ioasid_t pasid) { struct device_domain_info *info = dev_iommu_priv_get(dev); int ret; + ret = domain_qi_batch_alloc(domain); + if (ret) + return ret; + ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_IOTLB); if (ret || !info->ats_enabled) return ret; @@ -139,6 +162,10 @@ static int __cache_tag_assign_parent_domain(struct dmar_domain *domain, u16 did, struct device_domain_info *info = dev_iommu_priv_get(dev); int ret; + ret = domain_qi_batch_alloc(domain); + if (ret) + return ret; + ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_NESTING_IOTLB); if (ret || !info->ats_enabled) return ret; @@ -190,13 +217,6 @@ int cache_tag_assign_domain(struct dmar_domain *domain, u16 did = domain_get_id_for_dev(domain, dev); int ret; - /* domain->qi_bach will be freed in iommu_free_domain() path. */ - if (!domain->qi_batch) { - domain->qi_batch = kzalloc(sizeof(*domain->qi_batch), GFP_KERNEL); - if (!domain->qi_batch) - return -ENOMEM; - } - ret = __cache_tag_assign_domain(domain, did, dev, pasid); if (ret || domain->domain.type != IOMMU_DOMAIN_NESTED) return ret; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 7d0acb74d5a5..79e0da9eb626 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -3220,6 +3220,9 @@ void device_block_translation(struct device *dev) struct intel_iommu *iommu = info->iommu; unsigned long flags; + if (info->domain) + cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID); + iommu_disable_pci_caps(info); if (!dev_is_real_dma_subdevice(dev)) { if (sm_supported(iommu)) @@ -3236,7 +3239,6 @@ void device_block_translation(struct device *dev) list_del(&info->link); spin_unlock_irqrestore(&info->domain->lock, flags); - cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID); domain_detach_iommu(info->domain, iommu); info->domain = NULL; } diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index 0f2a926d3bd5..5b7d85f1e143 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -265,7 +265,8 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); devtlb_invalidation_with_pasid(iommu, dev, pasid); - intel_iommu_drain_pasid_prq(dev, pasid); + if (!fault_ignore) + intel_iommu_drain_pasid_prq(dev, pasid); } /* diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 34db379d066a..79d8cc80693c 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -161,7 +161,22 @@ static bool cpus_have_group0 __ro_after_init; static void __init gic_prio_init(void) { - cpus_have_security_disabled = gic_dist_security_disabled(); + bool ds; + + ds = gic_dist_security_disabled(); + if (!ds) { + u32 val; + + val = readl_relaxed(gic_data.dist_base + GICD_CTLR); + val |= GICD_CTLR_DS; + writel_relaxed(val, gic_data.dist_base + GICD_CTLR); + + ds = gic_dist_security_disabled(); + if (ds) + pr_warn("Broken GIC integration, security disabled"); + } + + cpus_have_security_disabled = ds; cpus_have_group0 = gic_has_group0(); /* diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 8fae6dc01024..6503573557fd 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -64,7 +64,7 @@ static void gic_check_cpu_features(void) union gic_base { void __iomem *common_base; - void __percpu * __iomem *percpu_base; + void __iomem * __percpu *percpu_base; }; struct gic_chip_data { diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c index d58db9a27e6c..76e2c6868548 100644 --- a/drivers/md/dm-zoned-reclaim.c +++ b/drivers/md/dm-zoned-reclaim.c @@ -76,9 +76,9 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone, * pointer and the requested position. */ nr_blocks = block - wp_block; - ret = blkdev_issue_zeroout(dev->bdev, - dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block), - dmz_blk2sect(nr_blocks), GFP_NOIO, 0); + ret = blk_zone_issue_zeroout(dev->bdev, + dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block), + dmz_blk2sect(nr_blocks), GFP_NOIO); if (ret) { dmz_dev_err(dev, "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d", diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 49dd4fe195e5..7b78c2bada81 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1520,9 +1520,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev, struct slave *slave; mask = features; - - features &= ~NETIF_F_ONE_FOR_ALL; - features |= NETIF_F_ALL_FOR_ALL; + features = netdev_base_features(features); bond_for_each_slave(bond, slave, iter) { features = netdev_increment_features(features, @@ -1536,6 +1534,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev, #define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \ + NETIF_F_GSO_ENCAP_ALL | \ NETIF_F_HIGHDMA | NETIF_F_LRO) #define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ @@ -1565,8 +1564,9 @@ static void bond_compute_features(struct bonding *bond) if (!bond_has_slaves(bond)) goto done; - vlan_features &= NETIF_F_ALL_FOR_ALL; - mpls_features &= NETIF_F_ALL_FOR_ALL; + + vlan_features = netdev_base_features(vlan_features); + mpls_features = netdev_base_features(mpls_features); bond_for_each_slave(bond, slave, iter) { vlan_features = netdev_increment_features(vlan_features, diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 920443ee8ffd..8a03baa6aecc 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -1100,10 +1100,9 @@ static const struct regmap_range ksz9896_valid_regs[] = { regmap_reg_range(0x1030, 0x1030), regmap_reg_range(0x1100, 0x1115), regmap_reg_range(0x111a, 0x111f), - regmap_reg_range(0x1122, 0x1127), - regmap_reg_range(0x112a, 0x112b), - regmap_reg_range(0x1136, 0x1139), - regmap_reg_range(0x113e, 0x113f), + regmap_reg_range(0x1120, 0x112b), + regmap_reg_range(0x1134, 0x113b), + regmap_reg_range(0x113c, 0x113f), regmap_reg_range(0x1400, 0x1401), regmap_reg_range(0x1403, 0x1403), regmap_reg_range(0x1410, 0x1417), @@ -1130,10 +1129,9 @@ static const struct regmap_range ksz9896_valid_regs[] = { regmap_reg_range(0x2030, 0x2030), regmap_reg_range(0x2100, 0x2115), regmap_reg_range(0x211a, 0x211f), - regmap_reg_range(0x2122, 0x2127), - regmap_reg_range(0x212a, 0x212b), - regmap_reg_range(0x2136, 0x2139), - regmap_reg_range(0x213e, 0x213f), + regmap_reg_range(0x2120, 0x212b), + regmap_reg_range(0x2134, 0x213b), + regmap_reg_range(0x213c, 0x213f), regmap_reg_range(0x2400, 0x2401), regmap_reg_range(0x2403, 0x2403), regmap_reg_range(0x2410, 0x2417), @@ -1160,10 +1158,9 @@ static const struct regmap_range ksz9896_valid_regs[] = { regmap_reg_range(0x3030, 0x3030), regmap_reg_range(0x3100, 0x3115), regmap_reg_range(0x311a, 0x311f), - regmap_reg_range(0x3122, 0x3127), - regmap_reg_range(0x312a, 0x312b), - regmap_reg_range(0x3136, 0x3139), - regmap_reg_range(0x313e, 0x313f), + regmap_reg_range(0x3120, 0x312b), + regmap_reg_range(0x3134, 0x313b), + regmap_reg_range(0x313c, 0x313f), regmap_reg_range(0x3400, 0x3401), regmap_reg_range(0x3403, 0x3403), regmap_reg_range(0x3410, 0x3417), @@ -1190,10 +1187,9 @@ static const struct regmap_range ksz9896_valid_regs[] = { regmap_reg_range(0x4030, 0x4030), regmap_reg_range(0x4100, 0x4115), regmap_reg_range(0x411a, 0x411f), - regmap_reg_range(0x4122, 0x4127), - regmap_reg_range(0x412a, 0x412b), - regmap_reg_range(0x4136, 0x4139), - regmap_reg_range(0x413e, 0x413f), + regmap_reg_range(0x4120, 0x412b), + regmap_reg_range(0x4134, 0x413b), + regmap_reg_range(0x413c, 0x413f), regmap_reg_range(0x4400, 0x4401), regmap_reg_range(0x4403, 0x4403), regmap_reg_range(0x4410, 0x4417), @@ -1220,10 +1216,9 @@ static const struct regmap_range ksz9896_valid_regs[] = { regmap_reg_range(0x5030, 0x5030), regmap_reg_range(0x5100, 0x5115), regmap_reg_range(0x511a, 0x511f), - regmap_reg_range(0x5122, 0x5127), - regmap_reg_range(0x512a, 0x512b), - regmap_reg_range(0x5136, 0x5139), - regmap_reg_range(0x513e, 0x513f), + regmap_reg_range(0x5120, 0x512b), + regmap_reg_range(0x5134, 0x513b), + regmap_reg_range(0x513c, 0x513f), regmap_reg_range(0x5400, 0x5401), regmap_reg_range(0x5403, 0x5403), regmap_reg_range(0x5410, 0x5417), @@ -1250,10 +1245,9 @@ static const struct regmap_range ksz9896_valid_regs[] = { regmap_reg_range(0x6030, 0x6030), regmap_reg_range(0x6100, 0x6115), regmap_reg_range(0x611a, 0x611f), - regmap_reg_range(0x6122, 0x6127), - regmap_reg_range(0x612a, 0x612b), - regmap_reg_range(0x6136, 0x6139), - regmap_reg_range(0x613e, 0x613f), + regmap_reg_range(0x6120, 0x612b), + regmap_reg_range(0x6134, 0x613b), + regmap_reg_range(0x613c, 0x613f), regmap_reg_range(0x6300, 0x6301), regmap_reg_range(0x6400, 0x6401), regmap_reg_range(0x6403, 0x6403), diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 0102a82e88cc..940f1b71226d 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -24,7 +24,7 @@ #define VSC9959_NUM_PORTS 6 #define VSC9959_TAS_GCL_ENTRY_MAX 63 -#define VSC9959_TAS_MIN_GATE_LEN_NS 33 +#define VSC9959_TAS_MIN_GATE_LEN_NS 35 #define VSC9959_VCAP_POLICER_BASE 63 #define VSC9959_VCAP_POLICER_MAX 383 #define VSC9959_SWITCH_PCI_BAR 4 @@ -1056,11 +1056,15 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot) mdiobus_free(felix->imdio); } -/* The switch considers any frame (regardless of size) as eligible for - * transmission if the traffic class gate is open for at least 33 ns. +/* The switch considers any frame (regardless of size) as eligible + * for transmission if the traffic class gate is open for at least + * VSC9959_TAS_MIN_GATE_LEN_NS. + * * Overruns are prevented by cropping an interval at the end of the gate time - * slot for which egress scheduling is blocked, but we need to still keep 33 ns - * available for one packet to be transmitted, otherwise the port tc will hang. + * slot for which egress scheduling is blocked, but we need to still keep + * VSC9959_TAS_MIN_GATE_LEN_NS available for one packet to be transmitted, + * otherwise the port tc will hang. + * * This function returns the size of a gate interval that remains available for * setting the guard band, after reserving the space for one egress frame. */ @@ -1303,7 +1307,8 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port) * per-tc static guard band lengths, so it reduces the * useful gate interval length. Therefore, be careful * to calculate a guard band (and therefore max_sdu) - * that still leaves 33 ns available in the time slot. + * that still leaves VSC9959_TAS_MIN_GATE_LEN_NS + * available in the time slot. */ max_sdu = div_u64(remaining_gate_len_ps, picos_per_byte); /* A TC gate may be completely closed, which is a diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 6b963086c1d3..b86f980fa7ea 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1531,7 +1531,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, if (TPA_START_IS_IPV6(tpa_start1)) tpa_info->gso_type = SKB_GSO_TCPV6; /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ - else if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP && + else if (!BNXT_CHIP_P4_PLUS(bp) && TPA_START_HASH_TYPE(tpa_start) == 3) tpa_info->gso_type = SKB_GSO_TCPV6; tpa_info->rss_hash = @@ -2226,15 +2226,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { type = bnxt_rss_ext_op(bp, rxcmp); } else { - u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); + u32 itypes = RX_CMP_ITYPES(rxcmp); - /* RSS profiles 1 and 3 with extract code 0 for inner - * 4-tuple - */ - if (hash_type != 1 && hash_type != 3) - type = PKT_HASH_TYPE_L3; - else + if (itypes == RX_CMP_FLAGS_ITYPE_TCP || + itypes == RX_CMP_FLAGS_ITYPE_UDP) type = PKT_HASH_TYPE_L4; + else + type = PKT_HASH_TYPE_L3; } skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); } @@ -8367,7 +8365,7 @@ static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max) struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; int n = 1; - if (!ctxm->max_entries) + if (!ctxm->max_entries || ctxm->pg_info) continue; if (ctxm->instance_bmap) @@ -8971,8 +8969,8 @@ static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena) continue; } bnxt_bs_trace_init(bp, ctxm); - last_type = type; } + last_type = type; } if (last_type == BNXT_CTX_INV) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 23f1aff214b4..7df7a2233307 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -267,6 +267,9 @@ struct rx_cmp { (((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\ RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK) +#define RX_CMP_ITYPES(rxcmp) \ + (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_FLAGS_ITYPES_MASK) + #define RX_CMP_V3_HASH_TYPE_LEGACY(rxcmp) \ ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_V3_RSS_EXT_OP_LEGACY) >>\ RX_CMP_V3_RSS_EXT_OP_LEGACY_SHIFT) @@ -378,7 +381,7 @@ struct rx_agg_cmp { u32 rx_agg_cmp_opaque; __le32 rx_agg_cmp_v; #define RX_AGG_CMP_V (1 << 0) - #define RX_AGG_CMP_AGG_ID (0xffff << 16) + #define RX_AGG_CMP_AGG_ID (0x0fff << 16) #define RX_AGG_CMP_AGG_ID_SHIFT 16 __le32 rx_agg_cmp_unused; }; @@ -416,7 +419,7 @@ struct rx_tpa_start_cmp { #define RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT 7 #define RX_TPA_START_CMP_AGG_ID (0x7f << 25) #define RX_TPA_START_CMP_AGG_ID_SHIFT 25 - #define RX_TPA_START_CMP_AGG_ID_P5 (0xffff << 16) + #define RX_TPA_START_CMP_AGG_ID_P5 (0x0fff << 16) #define RX_TPA_START_CMP_AGG_ID_SHIFT_P5 16 #define RX_TPA_START_CMP_METADATA1 (0xf << 28) #define RX_TPA_START_CMP_METADATA1_SHIFT 28 @@ -540,7 +543,7 @@ struct rx_tpa_end_cmp { #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16 #define RX_TPA_END_CMP_AGG_ID (0x7f << 25) #define RX_TPA_END_CMP_AGG_ID_SHIFT 25 - #define RX_TPA_END_CMP_AGG_ID_P5 (0xffff << 16) + #define RX_TPA_END_CMP_AGG_ID_P5 (0x0fff << 16) #define RX_TPA_END_CMP_AGG_ID_SHIFT_P5 16 __le32 rx_tpa_end_cmp_tsdelta; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 75bd69ff61a8..c7c2c15a1815 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -2076,7 +2076,7 @@ void t4_idma_monitor(struct adapter *adapter, struct sge_idma_monitor_state *idma, int hz, int ticks); int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf, - unsigned int naddr, u8 *addr); + u8 start, unsigned int naddr, u8 *addr); void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, bool sleep_ok); void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 97a261d5357e..bc3af0054406 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3234,7 +3234,7 @@ static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac) dev_info(pi->adapter->pdev_dev, "Setting MAC %pM on VF %d\n", mac, vf); - ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac); + ret = t4_set_vf_mac_acl(adap, vf + 1, pi->lport, 1, mac); if (!ret) ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac); return ret; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 76de55306c4d..175bf9b13058 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -10215,11 +10215,12 @@ out: * t4_set_vf_mac_acl - Set MAC address for the specified VF * @adapter: The adapter * @vf: one of the VFs instantiated by the specified PF + * @start: The start port id associated with specified VF * @naddr: the number of MAC addresses * @addr: the MAC address(es) to be set to the specified VF */ int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf, - unsigned int naddr, u8 *addr) + u8 start, unsigned int naddr, u8 *addr) { struct fw_acl_mac_cmd cmd; @@ -10234,7 +10235,7 @@ int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf, cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd)); cmd.nmac = naddr; - switch (adapter->pf) { + switch (start) { case 3: memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3)); break; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index da69e454662a..1b765045aa63 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -1452,23 +1452,21 @@ process_flow: * hence modify pcifunc accordingly. */ - /* AF installing for a PF/VF */ - if (!req->hdr.pcifunc) + if (!req->hdr.pcifunc) { + /* AF installing for a PF/VF */ target = req->vf; - - /* PF installing for its VF */ - if (!from_vf && req->vf && !from_rep_dev) { + } else if (!from_vf && req->vf && !from_rep_dev) { + /* PF installing for its VF */ target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf; pf_set_vfs_mac = req->default_rule && (req->features & BIT_ULL(NPC_DMAC)); - } - - /* Representor device installing for a representee */ - if (from_rep_dev && req->vf) + } else if (from_rep_dev && req->vf) { + /* Representor device installing for a representee */ target = req->vf; - else + } else { /* msg received from PF/VF */ target = req->hdr.pcifunc; + } /* ignore chan_mask in case pf func is not AF, revisit later */ if (!is_pffunc_af(req->hdr.pcifunc)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c index 3d74109f8230..49f22cad92bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c @@ -297,7 +297,9 @@ dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport) if (ret) { mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret); kvfree(vport_caps); - return ERR_PTR(ret); + if (ret == -EBUSY) + return ERR_PTR(-EBUSY); + return NULL; } return vport_caps; diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig index 73832fb2bc32..ee046468652c 100644 --- a/drivers/net/ethernet/microchip/Kconfig +++ b/drivers/net/ethernet/microchip/Kconfig @@ -59,7 +59,6 @@ config LAN743X source "drivers/net/ethernet/microchip/lan865x/Kconfig" source "drivers/net/ethernet/microchip/lan966x/Kconfig" -source "drivers/net/ethernet/microchip/lan969x/Kconfig" source "drivers/net/ethernet/microchip/sparx5/Kconfig" source "drivers/net/ethernet/microchip/vcap/Kconfig" source "drivers/net/ethernet/microchip/fdma/Kconfig" diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile index 7770df82200f..3c65baed9fd8 100644 --- a/drivers/net/ethernet/microchip/Makefile +++ b/drivers/net/ethernet/microchip/Makefile @@ -11,7 +11,6 @@ lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o obj-$(CONFIG_LAN865X) += lan865x/ obj-$(CONFIG_LAN966X_SWITCH) += lan966x/ -obj-$(CONFIG_LAN969X_SWITCH) += lan969x/ obj-$(CONFIG_SPARX5_SWITCH) += sparx5/ obj-$(CONFIG_VCAP) += vcap/ obj-$(CONFIG_FDMA) += fdma/ diff --git a/drivers/net/ethernet/microchip/lan969x/Kconfig b/drivers/net/ethernet/microchip/lan969x/Kconfig deleted file mode 100644 index c5c6122ae2ec..000000000000 --- a/drivers/net/ethernet/microchip/lan969x/Kconfig +++ /dev/null @@ -1,5 +0,0 @@ -config LAN969X_SWITCH - bool "Lan969x switch driver" - depends on SPARX5_SWITCH - help - This driver supports the lan969x family of network switch devices. diff --git a/drivers/net/ethernet/microchip/lan969x/Makefile b/drivers/net/ethernet/microchip/lan969x/Makefile deleted file mode 100644 index 316405cbbc71..000000000000 --- a/drivers/net/ethernet/microchip/lan969x/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Makefile for the Microchip lan969x network device drivers. -# - -obj-$(CONFIG_SPARX5_SWITCH) += lan969x-switch.o - -lan969x-switch-y := lan969x_regs.o lan969x.o lan969x_calendar.o \ - lan969x_vcap_ag_api.o lan969x_vcap_impl.o - -# Provide include files -ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/fdma -ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig index 3f04992eace6..35b057c9d0cb 100644 --- a/drivers/net/ethernet/microchip/sparx5/Kconfig +++ b/drivers/net/ethernet/microchip/sparx5/Kconfig @@ -24,3 +24,9 @@ config SPARX5_DCB DSCP and PCP. If unsure, set to Y. + +config LAN969X_SWITCH + bool "Lan969x switch driver" + depends on SPARX5_SWITCH + help + This driver supports the lan969x family of network switch devices. diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile index 3435ca86dd70..4bf2a885a9da 100644 --- a/drivers/net/ethernet/microchip/sparx5/Makefile +++ b/drivers/net/ethernet/microchip/sparx5/Makefile @@ -16,6 +16,12 @@ sparx5-switch-y := sparx5_main.o sparx5_packet.o \ sparx5-switch-$(CONFIG_SPARX5_DCB) += sparx5_dcb.o sparx5-switch-$(CONFIG_DEBUG_FS) += sparx5_vcap_debugfs.o +sparx5-switch-$(CONFIG_LAN969X_SWITCH) += lan969x/lan969x_regs.o \ + lan969x/lan969x.o \ + lan969x/lan969x_calendar.o \ + lan969x/lan969x_vcap_ag_api.o \ + lan969x/lan969x_vcap_impl.o + # Provide include files ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/fdma diff --git a/drivers/net/ethernet/microchip/lan969x/lan969x.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c index ac37d0f74ee3..c2afa2176b08 100644 --- a/drivers/net/ethernet/microchip/lan969x/lan969x.c +++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c @@ -273,9 +273,9 @@ static irqreturn_t lan969x_ptp_irq_handler(int irq, void *args) if (WARN_ON(!skb_match)) continue; - spin_lock(&sparx5->ptp_ts_id_lock); + spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags); sparx5->ptp_skbs--; - spin_unlock(&sparx5->ptp_ts_id_lock); + spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags); /* Get the h/w timestamp */ sparx5_get_hwtimestamp(sparx5, &ts, delay); @@ -346,8 +346,3 @@ const struct sparx5_match_data lan969x_desc = { .consts = &lan969x_consts, .ops = &lan969x_ops, }; -EXPORT_SYMBOL_GPL(lan969x_desc); - -MODULE_DESCRIPTION("Microchip lan969x switch driver"); -MODULE_AUTHOR("Daniel Machon <daniel.machon@microchip.com>"); -MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/net/ethernet/microchip/lan969x/lan969x.h b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h index 2489d0d32dfd..2489d0d32dfd 100644 --- a/drivers/net/ethernet/microchip/lan969x/lan969x.h +++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h diff --git a/drivers/net/ethernet/microchip/lan969x/lan969x_calendar.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_calendar.c index e857640df185..e857640df185 100644 --- a/drivers/net/ethernet/microchip/lan969x/lan969x_calendar.c +++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_calendar.c diff --git a/drivers/net/ethernet/microchip/lan969x/lan969x_regs.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_regs.c index ace4ba21eec4..ace4ba21eec4 100644 --- a/drivers/net/ethernet/microchip/lan969x/lan969x_regs.c +++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_regs.c diff --git a/drivers/net/ethernet/microchip/lan969x/lan969x_vcap_ag_api.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_ag_api.c index 7acc5bcf337a..7acc5bcf337a 100644 --- a/drivers/net/ethernet/microchip/lan969x/lan969x_vcap_ag_api.c +++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_ag_api.c diff --git a/drivers/net/ethernet/microchip/lan969x/lan969x_vcap_impl.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_impl.c index 543a1f2bf6bd..543a1f2bf6bd 100644 --- a/drivers/net/ethernet/microchip/lan969x/lan969x_vcap_impl.c +++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_impl.c diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c b/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c index 5fe941c66c17..5c46d81de530 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c @@ -98,7 +98,6 @@ u32 sparx5_cal_speed_to_value(enum sparx5_cal_bw speed) default: return 0; } } -EXPORT_SYMBOL_GPL(sparx5_cal_speed_to_value); static u32 sparx5_bandwidth_to_calendar(u32 bw) { @@ -150,7 +149,6 @@ enum sparx5_cal_bw sparx5_get_port_cal_speed(struct sparx5 *sparx5, u32 portno) return SPX5_CAL_SPEED_NONE; return sparx5_bandwidth_to_calendar(port->conf.bandwidth); } -EXPORT_SYMBOL_GPL(sparx5_get_port_cal_speed); /* Auto configure the QSYS calendar based on port configuration */ int sparx5_config_auto_calendar(struct sparx5 *sparx5) diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index 2f1013f870fb..f61aa15beab7 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -24,7 +24,7 @@ #include <linux/types.h> #include <linux/reset.h> -#include "../lan969x/lan969x.h" /* for lan969x match data */ +#include "lan969x/lan969x.h" /* for lan969x match data */ #include "sparx5_main_regs.h" #include "sparx5_main.h" @@ -780,12 +780,11 @@ static int sparx5_start(struct sparx5 *sparx5) err = -ENXIO; if (sparx5->fdma_irq >= 0 && is_sparx5(sparx5)) { if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0) - err = devm_request_threaded_irq(sparx5->dev, - sparx5->fdma_irq, - NULL, - sparx5_fdma_handler, - IRQF_ONESHOT, - "sparx5-fdma", sparx5); + err = devm_request_irq(sparx5->dev, + sparx5->fdma_irq, + sparx5_fdma_handler, + 0, + "sparx5-fdma", sparx5); if (!err) err = sparx5_fdma_start(sparx5); if (err) @@ -1093,7 +1092,7 @@ static const struct sparx5_match_data sparx5_desc = { static const struct of_device_id mchp_sparx5_match[] = { { .compatible = "microchip,sparx5-switch", .data = &sparx5_desc }, -#if IS_ENABLED(CONFIG_LAN969X_SWITCH) +#ifdef CONFIG_LAN969X_SWITCH { .compatible = "microchip,lan9691-switch", .data = &lan969x_desc }, #endif { } diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c index 9806729e9c62..76097761fa97 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c @@ -12,7 +12,6 @@ #define SPX5_MIRROR_DISABLED 0 #define SPX5_MIRROR_EGRESS 1 #define SPX5_MIRROR_INGRESS 2 -#define SPX5_MIRROR_MONITOR_PORT_DEFAULT 65 #define SPX5_QFWD_MP_OFFSET 9 /* Mirror port offset in the QFWD register */ /* Convert from bool ingress/egress to mirror direction */ @@ -200,7 +199,7 @@ void sparx5_mirror_del(struct sparx5_mall_entry *entry) sparx5_mirror_monitor_set(sparx5, mirror_idx, - SPX5_MIRROR_MONITOR_PORT_DEFAULT); + sparx5->data->consts->n_ports); } void sparx5_mirror_stats(struct sparx5_mall_entry *entry, diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c index 1401761c6251..f9d1a6bb9bff 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c @@ -1151,7 +1151,7 @@ int sparx5_port_init(struct sparx5 *sparx5, spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN), DEV10G_MAC_MAXLEN_CFG_MAX_LEN, devinst, - DEV10G_MAC_ENA_CFG(0)); + DEV10G_MAC_MAXLEN_CFG(0)); /* Handle Signal Detect in 10G PCS */ spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) | diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c index 1c2903700a9c..2f168700f63c 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c @@ -303,7 +303,6 @@ void sparx5_get_hwtimestamp(struct sparx5 *sparx5, spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); } -EXPORT_SYMBOL_GPL(sparx5_get_hwtimestamp); irqreturn_t sparx5_ptp_irq_handler(int irq, void *args) { diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 550be6dc305d..2dc0c6ad54be 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -1318,7 +1318,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev) GFP_KERNEL); if (!gc->irq_contexts) { err = -ENOMEM; - goto free_irq_vector; + goto free_irq_array; } for (i = 0; i < nvec; i++) { @@ -1375,6 +1375,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev) gc->max_num_msix = nvec; gc->num_msix_usable = nvec; cpus_read_unlock(); + kfree(irqs); return 0; free_irq: @@ -1387,8 +1388,9 @@ free_irq: } kfree(gc->irq_contexts); - kfree(irqs); gc->irq_contexts = NULL; +free_irq_array: + kfree(irqs); free_irq_vector: cpus_read_unlock(); pci_free_irq_vectors(pdev); diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c index e172638b0601..808ce8e68d39 100644 --- a/drivers/net/ethernet/mscc/ocelot_ptp.c +++ b/drivers/net/ethernet/mscc/ocelot_ptp.c @@ -14,6 +14,8 @@ #include <soc/mscc/ocelot.h> #include "ocelot.h" +#define OCELOT_PTP_TX_TSTAMP_TIMEOUT (5 * HZ) + int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info); @@ -495,6 +497,28 @@ static int ocelot_traps_to_ptp_rx_filter(unsigned int proto) return HWTSTAMP_FILTER_NONE; } +static int ocelot_ptp_tx_type_to_cmd(int tx_type, int *ptp_cmd) +{ + switch (tx_type) { + case HWTSTAMP_TX_ON: + *ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; + break; + case HWTSTAMP_TX_ONESTEP_SYNC: + /* IFH_REW_OP_ONE_STEP_PTP updates the correctionField, + * what we need to update is the originTimestamp. + */ + *ptp_cmd = IFH_REW_OP_ORIGIN_PTP; + break; + case HWTSTAMP_TX_OFF: + *ptp_cmd = 0; + break; + default: + return -ERANGE; + } + + return 0; +} + int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr) { struct ocelot_port *ocelot_port = ocelot->ports[port]; @@ -521,30 +545,19 @@ EXPORT_SYMBOL(ocelot_hwstamp_get); int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr) { struct ocelot_port *ocelot_port = ocelot->ports[port]; + int ptp_cmd, old_ptp_cmd = ocelot_port->ptp_cmd; bool l2 = false, l4 = false; struct hwtstamp_config cfg; + bool old_l2, old_l4; int err; if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; /* Tx type sanity check */ - switch (cfg.tx_type) { - case HWTSTAMP_TX_ON: - ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; - break; - case HWTSTAMP_TX_ONESTEP_SYNC: - /* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we - * need to update the origin time. - */ - ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP; - break; - case HWTSTAMP_TX_OFF: - ocelot_port->ptp_cmd = 0; - break; - default: - return -ERANGE; - } + err = ocelot_ptp_tx_type_to_cmd(cfg.tx_type, &ptp_cmd); + if (err) + return err; switch (cfg.rx_filter) { case HWTSTAMP_FILTER_NONE: @@ -569,13 +582,27 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr) return -ERANGE; } + old_l2 = ocelot_port->trap_proto & OCELOT_PROTO_PTP_L2; + old_l4 = ocelot_port->trap_proto & OCELOT_PROTO_PTP_L4; + err = ocelot_setup_ptp_traps(ocelot, port, l2, l4); if (err) return err; + ocelot_port->ptp_cmd = ptp_cmd; + cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto); - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + if (copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg))) { + err = -EFAULT; + goto out_restore_ptp_traps; + } + + return 0; +out_restore_ptp_traps: + ocelot_setup_ptp_traps(ocelot, port, old_l2, old_l4); + ocelot_port->ptp_cmd = old_ptp_cmd; + return err; } EXPORT_SYMBOL(ocelot_hwstamp_set); @@ -603,34 +630,87 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port, } EXPORT_SYMBOL(ocelot_get_ts_info); -static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port, - struct sk_buff *clone) +static struct sk_buff *ocelot_port_dequeue_ptp_tx_skb(struct ocelot *ocelot, + int port, u8 ts_id, + u32 seqid) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - unsigned long flags; + struct sk_buff *skb, *skb_tmp, *skb_match = NULL; + struct ptp_header *hdr; - spin_lock_irqsave(&ocelot->ts_id_lock, flags); + spin_lock(&ocelot->ts_id_lock); - if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID || - ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) { - spin_unlock_irqrestore(&ocelot->ts_id_lock, flags); - return -EBUSY; + skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) { + if (OCELOT_SKB_CB(skb)->ts_id != ts_id) + continue; + + /* Check that the timestamp ID is for the expected PTP + * sequenceId. We don't have to test ptp_parse_header() against + * NULL, because we've pre-validated the packet's ptp_class. + */ + hdr = ptp_parse_header(skb, OCELOT_SKB_CB(skb)->ptp_class); + if (seqid != ntohs(hdr->sequence_id)) + continue; + + __skb_unlink(skb, &ocelot_port->tx_skbs); + ocelot->ptp_skbs_in_flight--; + skb_match = skb; + break; } - skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS; - /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */ - OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id; + spin_unlock(&ocelot->ts_id_lock); - ocelot_port->ts_id++; - if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID) - ocelot_port->ts_id = 0; + return skb_match; +} + +static int ocelot_port_queue_ptp_tx_skb(struct ocelot *ocelot, int port, + struct sk_buff *clone) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + DECLARE_BITMAP(ts_id_in_flight, OCELOT_MAX_PTP_ID); + struct sk_buff *skb, *skb_tmp; + unsigned long n; + + spin_lock(&ocelot->ts_id_lock); + + /* To get a better chance of acquiring a timestamp ID, first flush the + * stale packets still waiting in the TX timestamping queue. They are + * probably lost. + */ + skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) { + if (time_before(OCELOT_SKB_CB(skb)->ptp_tx_time + + OCELOT_PTP_TX_TSTAMP_TIMEOUT, jiffies)) { + dev_warn_ratelimited(ocelot->dev, + "port %d invalidating stale timestamp ID %u which seems lost\n", + port, OCELOT_SKB_CB(skb)->ts_id); + __skb_unlink(skb, &ocelot_port->tx_skbs); + kfree_skb(skb); + ocelot->ptp_skbs_in_flight--; + } else { + __set_bit(OCELOT_SKB_CB(skb)->ts_id, ts_id_in_flight); + } + } + + if (ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) { + spin_unlock(&ocelot->ts_id_lock); + return -EBUSY; + } + + n = find_first_zero_bit(ts_id_in_flight, OCELOT_MAX_PTP_ID); + if (n == OCELOT_MAX_PTP_ID) { + spin_unlock(&ocelot->ts_id_lock); + return -EBUSY; + } - ocelot_port->ptp_skbs_in_flight++; + /* Found an available timestamp ID, use it */ + OCELOT_SKB_CB(clone)->ts_id = n; + OCELOT_SKB_CB(clone)->ptp_tx_time = jiffies; ocelot->ptp_skbs_in_flight++; + __skb_queue_tail(&ocelot_port->tx_skbs, clone); - skb_queue_tail(&ocelot_port->tx_skbs, clone); + spin_unlock(&ocelot->ts_id_lock); - spin_unlock_irqrestore(&ocelot->ts_id_lock, flags); + dev_dbg_ratelimited(ocelot->dev, "port %d timestamp id %lu\n", port, n); return 0; } @@ -687,10 +767,14 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port, if (!(*clone)) return -ENOMEM; - err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone); - if (err) + /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */ + err = ocelot_port_queue_ptp_tx_skb(ocelot, port, *clone); + if (err) { + kfree_skb(*clone); return err; + } + skb_shinfo(*clone)->tx_flags |= SKBTX_IN_PROGRESS; OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd; OCELOT_SKB_CB(*clone)->ptp_class = ptp_class; } @@ -726,28 +810,15 @@ static void ocelot_get_hwtimestamp(struct ocelot *ocelot, spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); } -static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid) -{ - struct ptp_header *hdr; - - hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class); - if (WARN_ON(!hdr)) - return false; - - return seqid == ntohs(hdr->sequence_id); -} - void ocelot_get_txtstamp(struct ocelot *ocelot) { int budget = OCELOT_PTP_QUEUE_SZ; while (budget--) { - struct sk_buff *skb, *skb_tmp, *skb_match = NULL; struct skb_shared_hwtstamps shhwtstamps; u32 val, id, seqid, txport; - struct ocelot_port *port; + struct sk_buff *skb_match; struct timespec64 ts; - unsigned long flags; val = ocelot_read(ocelot, SYS_PTP_STATUS); @@ -762,36 +833,14 @@ void ocelot_get_txtstamp(struct ocelot *ocelot) txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val); seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val); - port = ocelot->ports[txport]; - - spin_lock(&ocelot->ts_id_lock); - port->ptp_skbs_in_flight--; - ocelot->ptp_skbs_in_flight--; - spin_unlock(&ocelot->ts_id_lock); - /* Retrieve its associated skb */ -try_again: - spin_lock_irqsave(&port->tx_skbs.lock, flags); - - skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) { - if (OCELOT_SKB_CB(skb)->ts_id != id) - continue; - __skb_unlink(skb, &port->tx_skbs); - skb_match = skb; - break; - } - - spin_unlock_irqrestore(&port->tx_skbs.lock, flags); - - if (WARN_ON(!skb_match)) - continue; - - if (!ocelot_validate_ptp_skb(skb_match, seqid)) { - dev_err_ratelimited(ocelot->dev, - "port %d received stale TX timestamp for seqid %d, discarding\n", - txport, seqid); - dev_kfree_skb_any(skb); - goto try_again; + skb_match = ocelot_port_dequeue_ptp_tx_skb(ocelot, txport, id, + seqid); + if (!skb_match) { + dev_warn_ratelimited(ocelot->dev, + "port %d received TX timestamp (seqid %d, ts id %u) for packet previously declared stale\n", + txport, seqid, id); + goto next_ts; } /* Get the h/w timestamp */ @@ -802,7 +851,7 @@ try_again: shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); skb_complete_tx_timestamp(skb_match, &shhwtstamps); - /* Next ts */ +next_ts: ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT); } } diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index ef9c02b000e4..38a779f4b866 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -54,7 +54,7 @@ MODULE_PARM_DESC(qcaspi_burst_len, "Number of data bytes per burst. Use 1-5000." #define QCASPI_PLUGGABLE_MIN 0 #define QCASPI_PLUGGABLE_MAX 1 -static int qcaspi_pluggable = QCASPI_PLUGGABLE_MIN; +static int qcaspi_pluggable = QCASPI_PLUGGABLE_MAX; module_param(qcaspi_pluggable, int, 0); MODULE_PARM_DESC(qcaspi_pluggable, "Pluggable SPI connection (yes/no)."); @@ -818,7 +818,6 @@ qcaspi_netdev_init(struct net_device *dev) dev->mtu = QCAFRM_MAX_MTU; dev->type = ARPHRD_ETHER; - qca->clkspeed = qcaspi_clkspeed; qca->burst_len = qcaspi_burst_len; qca->spi_thread = NULL; qca->buffer_size = (QCAFRM_MAX_MTU + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN + @@ -909,17 +908,15 @@ qca_spi_probe(struct spi_device *spi) legacy_mode = of_property_read_bool(spi->dev.of_node, "qca,legacy-mode"); - if (qcaspi_clkspeed == 0) { - if (spi->max_speed_hz) - qcaspi_clkspeed = spi->max_speed_hz; - else - qcaspi_clkspeed = QCASPI_CLK_SPEED; - } + if (qcaspi_clkspeed) + spi->max_speed_hz = qcaspi_clkspeed; + else if (!spi->max_speed_hz) + spi->max_speed_hz = QCASPI_CLK_SPEED; - if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) || - (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) { - dev_err(&spi->dev, "Invalid clkspeed: %d\n", - qcaspi_clkspeed); + if (spi->max_speed_hz < QCASPI_CLK_SPEED_MIN || + spi->max_speed_hz > QCASPI_CLK_SPEED_MAX) { + dev_err(&spi->dev, "Invalid clkspeed: %u\n", + spi->max_speed_hz); return -EINVAL; } @@ -944,14 +941,13 @@ qca_spi_probe(struct spi_device *spi) return -EINVAL; } - dev_info(&spi->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n", + dev_info(&spi->dev, "ver=%s, clkspeed=%u, burst_len=%d, pluggable=%d\n", QCASPI_DRV_VERSION, - qcaspi_clkspeed, + spi->max_speed_hz, qcaspi_burst_len, qcaspi_pluggable); spi->mode = SPI_MODE_3; - spi->max_speed_hz = qcaspi_clkspeed; if (spi_setup(spi) < 0) { dev_err(&spi->dev, "Unable to setup SPI device\n"); return -EFAULT; diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h index 7ba5c9e2f61c..90b290f94c27 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.h +++ b/drivers/net/ethernet/qualcomm/qca_spi.h @@ -89,7 +89,6 @@ struct qcaspi { #endif /* user configurable options */ - u32 clkspeed; u8 legacy_mode; u16 burst_len; }; diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index 8d18dae4d8fb..dbbbf024e7ab 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -862,13 +862,10 @@ static void rswitch_tx_free(struct net_device *ndev) struct rswitch_ext_desc *desc; struct sk_buff *skb; - for (; rswitch_get_num_cur_queues(gq) > 0; - gq->dirty = rswitch_next_queue_index(gq, false, 1)) { - desc = &gq->tx_ring[gq->dirty]; - if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) - break; - + desc = &gq->tx_ring[gq->dirty]; + while ((desc->desc.die_dt & DT_MASK) == DT_FEMPTY) { dma_rmb(); + skb = gq->skbs[gq->dirty]; if (skb) { rdev->ndev->stats.tx_packets++; @@ -879,7 +876,10 @@ static void rswitch_tx_free(struct net_device *ndev) dev_kfree_skb_any(gq->skbs[gq->dirty]); gq->skbs[gq->dirty] = NULL; } + desc->desc.die_dt = DT_EEMPTY; + gq->dirty = rswitch_next_queue_index(gq, false, 1); + desc = &gq->tx_ring[gq->dirty]; } } @@ -908,8 +908,10 @@ retry: if (napi_complete_done(napi, budget - quota)) { spin_lock_irqsave(&priv->lock, flags); - rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true); - rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true); + if (test_bit(rdev->port, priv->opened_ports)) { + rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true); + rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true); + } spin_unlock_irqrestore(&priv->lock, flags); } @@ -1114,25 +1116,40 @@ static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha) static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac) { - u32 val; + u32 pis, lsc; rswitch_etha_write_mac_address(etha, mac); + switch (etha->phy_interface) { + case PHY_INTERFACE_MODE_SGMII: + pis = MPIC_PIS_GMII; + break; + case PHY_INTERFACE_MODE_USXGMII: + case PHY_INTERFACE_MODE_5GBASER: + pis = MPIC_PIS_XGMII; + break; + default: + pis = FIELD_GET(MPIC_PIS, ioread32(etha->addr + MPIC)); + break; + } + switch (etha->speed) { case 100: - val = MPIC_LSC_100M; + lsc = MPIC_LSC_100M; break; case 1000: - val = MPIC_LSC_1G; + lsc = MPIC_LSC_1G; break; case 2500: - val = MPIC_LSC_2_5G; + lsc = MPIC_LSC_2_5G; break; default: - return; + lsc = FIELD_GET(MPIC_LSC, ioread32(etha->addr + MPIC)); + break; } - iowrite32(MPIC_PIS_GMII | val, etha->addr + MPIC); + rswitch_modify(etha->addr, MPIC, MPIC_PIS | MPIC_LSC, + FIELD_PREP(MPIC_PIS, pis) | FIELD_PREP(MPIC_LSC, lsc)); } static void rswitch_etha_enable_mii(struct rswitch_etha *etha) @@ -1538,20 +1555,20 @@ static int rswitch_open(struct net_device *ndev) struct rswitch_device *rdev = netdev_priv(ndev); unsigned long flags; - phy_start(ndev->phydev); + if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) + iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE); napi_enable(&rdev->napi); - netif_start_queue(ndev); spin_lock_irqsave(&rdev->priv->lock, flags); + bitmap_set(rdev->priv->opened_ports, rdev->port, 1); rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true); rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true); spin_unlock_irqrestore(&rdev->priv->lock, flags); - if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) - iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE); + phy_start(ndev->phydev); - bitmap_set(rdev->priv->opened_ports, rdev->port, 1); + netif_start_queue(ndev); return 0; }; @@ -1563,7 +1580,16 @@ static int rswitch_stop(struct net_device *ndev) unsigned long flags; netif_tx_stop_all_queues(ndev); + + phy_stop(ndev->phydev); + + spin_lock_irqsave(&rdev->priv->lock, flags); + rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); + rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); bitmap_clear(rdev->priv->opened_ports, rdev->port, 1); + spin_unlock_irqrestore(&rdev->priv->lock, flags); + + napi_disable(&rdev->napi); if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID); @@ -1576,14 +1602,6 @@ static int rswitch_stop(struct net_device *ndev) kfree(ts_info); } - spin_lock_irqsave(&rdev->priv->lock, flags); - rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); - rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); - spin_unlock_irqrestore(&rdev->priv->lock, flags); - - phy_stop(ndev->phydev); - napi_disable(&rdev->napi); - return 0; }; @@ -1681,8 +1699,11 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd if (dma_mapping_error(ndev->dev.parent, dma_addr_orig)) goto err_kfree; - gq->skbs[gq->cur] = skb; - gq->unmap_addrs[gq->cur] = dma_addr_orig; + /* Stored the skb at the last descriptor to avoid skb free before hardware completes send */ + gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = skb; + gq->unmap_addrs[(gq->cur + nr_desc - 1) % gq->ring_size] = dma_addr_orig; + + dma_wmb(); /* DT_FSTART should be set at last. So, this is reverse order. */ for (i = nr_desc; i-- > 0; ) { @@ -1694,14 +1715,13 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd goto err_unmap; } - wmb(); /* gq->cur must be incremented after die_dt was set */ - gq->cur = rswitch_next_queue_index(gq, true, nr_desc); rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32)); return ret; err_unmap: + gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = NULL; dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE); err_kfree: @@ -1889,7 +1909,6 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index rdev->np_port = rswitch_get_port_node(rdev); rdev->disabled = !rdev->np_port; err = of_get_ethdev_address(rdev->np_port, ndev); - of_node_put(rdev->np_port); if (err) { if (is_valid_ether_addr(rdev->etha->mac_addr)) eth_hw_addr_set(ndev, rdev->etha->mac_addr); @@ -1919,6 +1938,7 @@ out_txdmac: out_rxdmac: out_get_params: + of_node_put(rdev->np_port); netif_napi_del(&rdev->napi); free_netdev(ndev); @@ -1932,6 +1952,7 @@ static void rswitch_device_free(struct rswitch_private *priv, unsigned int index rswitch_txdmac_free(ndev); rswitch_rxdmac_free(ndev); + of_node_put(rdev->np_port); netif_napi_del(&rdev->napi); free_netdev(ndev); } diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h index 72e3ff596d31..e020800dcc57 100644 --- a/drivers/net/ethernet/renesas/rswitch.h +++ b/drivers/net/ethernet/renesas/rswitch.h @@ -724,13 +724,13 @@ enum rswitch_etha_mode { #define EAVCC_VEM_SC_TAG (0x3 << 16) -#define MPIC_PIS_MII 0x00 -#define MPIC_PIS_GMII 0x02 -#define MPIC_PIS_XGMII 0x04 -#define MPIC_LSC_SHIFT 3 -#define MPIC_LSC_100M (1 << MPIC_LSC_SHIFT) -#define MPIC_LSC_1G (2 << MPIC_LSC_SHIFT) -#define MPIC_LSC_2_5G (3 << MPIC_LSC_SHIFT) +#define MPIC_PIS GENMASK(2, 0) +#define MPIC_PIS_GMII 2 +#define MPIC_PIS_XGMII 4 +#define MPIC_LSC GENMASK(5, 3) +#define MPIC_LSC_100M 1 +#define MPIC_LSC_1G 2 +#define MPIC_LSC_2_5G 3 #define MDIO_READ_C45 0x03 #define MDIO_WRITE_C45 0x01 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 9b262cdad60b..c81ea8cdfe6e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -4192,8 +4192,8 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) struct stmmac_txq_stats *txq_stats; struct stmmac_tx_queue *tx_q; u32 pay_len, mss, queue; + dma_addr_t tso_des, des; u8 proto_hdr_len, hdr; - dma_addr_t des; bool set_ic; int i; @@ -4289,14 +4289,15 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) /* If needed take extra descriptors to fill the remaining payload */ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; + tso_des = des; } else { stmmac_set_desc_addr(priv, first, des); tmp_pay_len = pay_len; - des += proto_hdr_len; + tso_des = des + proto_hdr_len; pay_len = 0; } - stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); + stmmac_tso_allocator(priv, tso_des, tmp_pay_len, (nfrags == 0), queue); /* In case two or more DMA transmit descriptors are allocated for this * non-paged SKB data, the DMA buffer address should be saved to diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c index a1b27b69f010..69ea2c3c76bf 100644 --- a/drivers/net/team/team_core.c +++ b/drivers/net/team/team_core.c @@ -983,7 +983,8 @@ static void team_port_disable(struct team *team, #define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \ - NETIF_F_HIGHDMA | NETIF_F_LRO) + NETIF_F_HIGHDMA | NETIF_F_LRO | \ + NETIF_F_GSO_ENCAP_ALL) #define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE) @@ -991,13 +992,14 @@ static void team_port_disable(struct team *team, static void __team_compute_features(struct team *team) { struct team_port *port; - netdev_features_t vlan_features = TEAM_VLAN_FEATURES & - NETIF_F_ALL_FOR_ALL; + netdev_features_t vlan_features = TEAM_VLAN_FEATURES; netdev_features_t enc_features = TEAM_ENC_FEATURES; unsigned short max_hard_header_len = ETH_HLEN; unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; + vlan_features = netdev_base_features(vlan_features); + rcu_read_lock(); list_for_each_entry_rcu(port, &team->port_list, list) { vlan_features = netdev_increment_features(vlan_features, @@ -2011,8 +2013,7 @@ static netdev_features_t team_fix_features(struct net_device *dev, netdev_features_t mask; mask = features; - features &= ~NETIF_F_ONE_FOR_ALL; - features |= NETIF_F_ALL_FOR_ALL; + features = netdev_base_features(features); rcu_read_lock(); list_for_each_entry_rcu(port, &team->port_list, list) { diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 0c011d8f5d4d..9fe7f704a2f7 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1365,6 +1365,9 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a9, 0)}, /* Telit FN920C04 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x10c0, 0)}, /* Telit FE910C04 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x10c4, 0)}, /* Telit FE910C04 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x10c8, 0)}, /* Telit FE910C04 */ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 64c87bb48a41..7646ddd9bef7 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -503,6 +503,7 @@ struct virtio_net_common_hdr { static struct virtio_net_common_hdr xsk_hdr; static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); +static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq); static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, struct net_device *dev, unsigned int *xdp_xmit, @@ -3054,7 +3055,6 @@ static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index) if (err < 0) goto err_xdp_reg_mem_model; - netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, qp_index)); virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi); virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi); @@ -3332,7 +3332,7 @@ static int virtnet_rx_resize(struct virtnet_info *vi, virtnet_rx_pause(vi, rq); - err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf); + err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf, NULL); if (err) netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); @@ -3395,7 +3395,8 @@ static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq, virtnet_tx_pause(vi, sq); - err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf); + err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf, + virtnet_sq_free_unused_buf_done); if (err) netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err); @@ -5710,7 +5711,7 @@ static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queu virtnet_rx_pause(vi, rq); - err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf); + err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf, NULL); if (err) { netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err); @@ -5739,7 +5740,8 @@ static int virtnet_sq_bind_xsk_pool(struct virtnet_info *vi, virtnet_tx_pause(vi, sq); - err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf); + err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf, + virtnet_sq_free_unused_buf_done); if (err) { netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err); pool = NULL; @@ -6214,7 +6216,7 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf) { struct virtnet_info *vi = vq->vdev->priv; struct send_queue *sq; - int i = vq2rxq(vq); + int i = vq2txq(vq); sq = &vi->sq[i]; @@ -6234,6 +6236,14 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf) } } +static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq) +{ + struct virtnet_info *vi = vq->vdev->priv; + int i = vq2txq(vq); + + netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i)); +} + static void free_unused_bufs(struct virtnet_info *vi) { void *buf; @@ -6966,11 +6976,20 @@ free: static void remove_vq_common(struct virtnet_info *vi) { + int i; + virtio_reset_device(vi->vdev); /* Free unused buffers in both send and recv, if any. */ free_unused_bufs(vi); + /* + * Rule of thumb is netdev_tx_reset_queue() should follow any + * skb freeing not followed by netdev_tx_completed_queue() + */ + for (i = 0; i < vi->max_queue_pairs; i++) + netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i)); + free_receive_bufs(vi); free_receive_page_frags(vi); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 2a13d70da46c..51ee62ae70fb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -1972,7 +1972,7 @@ void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm, if (csa_err_mask & (CS_ERR_COUNT_ERROR | CS_ERR_LONG_DELAY_AFTER_CS | CS_ERR_TX_BLOCK_TIMER_EXPIRED)) - ieee80211_channel_switch_disconnect(vif, true); + ieee80211_channel_switch_disconnect(vif); rcu_read_unlock(); } diff --git a/drivers/ptp/ptp_kvm_x86.c b/drivers/ptp/ptp_kvm_x86.c index 617c8d6706d3..6cea4fe39bcf 100644 --- a/drivers/ptp/ptp_kvm_x86.c +++ b/drivers/ptp/ptp_kvm_x86.c @@ -26,7 +26,7 @@ int kvm_arch_ptp_init(void) long ret; if (!kvm_para_available()) - return -ENODEV; + return -EOPNOTSUPP; if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { p = alloc_page(GFP_KERNEL | __GFP_ZERO); @@ -46,14 +46,14 @@ int kvm_arch_ptp_init(void) clock_pair_gpa = slow_virt_to_phys(clock_pair); if (!pvclock_get_pvti_cpu0_va()) { - ret = -ENODEV; + ret = -EOPNOTSUPP; goto err; } ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa, KVM_CLOCK_PAIRING_WALLCLOCK); if (ret == -KVM_ENOSYS) { - ret = -ENODEV; + ret = -EOPNOTSUPP; goto err; } diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index e3cc59b82ea6..dca99cfb7cbb 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -371,8 +371,8 @@ .ops = &axp20x_ops, \ } -#define AXP_DESC(_family, _id, _match, _supply, _min, _max, _step, _vreg, \ - _vmask, _ereg, _emask) \ +#define AXP_DESC_DELAY(_family, _id, _match, _supply, _min, _max, _step, _vreg, \ + _vmask, _ereg, _emask, _ramp_delay) \ [_family##_##_id] = { \ .name = (_match), \ .supply_name = (_supply), \ @@ -388,9 +388,15 @@ .vsel_mask = (_vmask), \ .enable_reg = (_ereg), \ .enable_mask = (_emask), \ + .ramp_delay = (_ramp_delay), \ .ops = &axp20x_ops, \ } +#define AXP_DESC(_family, _id, _match, _supply, _min, _max, _step, _vreg, \ + _vmask, _ereg, _emask) \ + AXP_DESC_DELAY(_family, _id, _match, _supply, _min, _max, _step, _vreg, \ + _vmask, _ereg, _emask, 0) + #define AXP_DESC_SW(_family, _id, _match, _supply, _ereg, _emask) \ [_family##_##_id] = { \ .name = (_match), \ @@ -419,8 +425,8 @@ .ops = &axp20x_ops_fixed \ } -#define AXP_DESC_RANGES(_family, _id, _match, _supply, _ranges, _n_voltages, \ - _vreg, _vmask, _ereg, _emask) \ +#define AXP_DESC_RANGES_DELAY(_family, _id, _match, _supply, _ranges, _n_voltages, \ + _vreg, _vmask, _ereg, _emask, _ramp_delay) \ [_family##_##_id] = { \ .name = (_match), \ .supply_name = (_supply), \ @@ -436,9 +442,15 @@ .enable_mask = (_emask), \ .linear_ranges = (_ranges), \ .n_linear_ranges = ARRAY_SIZE(_ranges), \ + .ramp_delay = (_ramp_delay), \ .ops = &axp20x_ops_range, \ } +#define AXP_DESC_RANGES(_family, _id, _match, _supply, _ranges, _n_voltages, \ + _vreg, _vmask, _ereg, _emask) \ + AXP_DESC_RANGES_DELAY(_family, _id, _match, _supply, _ranges, \ + _n_voltages, _vreg, _vmask, _ereg, _emask, 0) + static const int axp209_dcdc2_ldo3_slew_rates[] = { 1600, 800, @@ -781,21 +793,21 @@ static const struct linear_range axp717_dcdc3_ranges[] = { }; static const struct regulator_desc axp717_regulators[] = { - AXP_DESC_RANGES(AXP717, DCDC1, "dcdc1", "vin1", + AXP_DESC_RANGES_DELAY(AXP717, DCDC1, "dcdc1", "vin1", axp717_dcdc1_ranges, AXP717_DCDC1_NUM_VOLTAGES, AXP717_DCDC1_CONTROL, AXP717_DCDC_V_OUT_MASK, - AXP717_DCDC_OUTPUT_CONTROL, BIT(0)), - AXP_DESC_RANGES(AXP717, DCDC2, "dcdc2", "vin2", + AXP717_DCDC_OUTPUT_CONTROL, BIT(0), 640), + AXP_DESC_RANGES_DELAY(AXP717, DCDC2, "dcdc2", "vin2", axp717_dcdc2_ranges, AXP717_DCDC2_NUM_VOLTAGES, AXP717_DCDC2_CONTROL, AXP717_DCDC_V_OUT_MASK, - AXP717_DCDC_OUTPUT_CONTROL, BIT(1)), - AXP_DESC_RANGES(AXP717, DCDC3, "dcdc3", "vin3", + AXP717_DCDC_OUTPUT_CONTROL, BIT(1), 640), + AXP_DESC_RANGES_DELAY(AXP717, DCDC3, "dcdc3", "vin3", axp717_dcdc3_ranges, AXP717_DCDC3_NUM_VOLTAGES, AXP717_DCDC3_CONTROL, AXP717_DCDC_V_OUT_MASK, - AXP717_DCDC_OUTPUT_CONTROL, BIT(2)), - AXP_DESC(AXP717, DCDC4, "dcdc4", "vin4", 1000, 3700, 100, + AXP717_DCDC_OUTPUT_CONTROL, BIT(2), 640), + AXP_DESC_DELAY(AXP717, DCDC4, "dcdc4", "vin4", 1000, 3700, 100, AXP717_DCDC4_CONTROL, AXP717_DCDC_V_OUT_MASK, - AXP717_DCDC_OUTPUT_CONTROL, BIT(3)), + AXP717_DCDC_OUTPUT_CONTROL, BIT(3), 6400), AXP_DESC(AXP717, ALDO1, "aldo1", "aldoin", 500, 3500, 100, AXP717_ALDO1_CONTROL, AXP717_LDO_V_OUT_MASK, AXP717_LDO0_OUTPUT_CONTROL, BIT(0)), diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c index 8eb843ddb25f..e9beae95dded 100644 --- a/drivers/spi/spi-aspeed-smc.c +++ b/drivers/spi/spi-aspeed-smc.c @@ -239,7 +239,7 @@ static ssize_t aspeed_spi_read_user(struct aspeed_spi_chip *chip, ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, offset, op->cmd.opcode); if (ret < 0) - return ret; + goto stop_user; if (op->dummy.buswidth && op->dummy.nbytes) { for (i = 0; i < op->dummy.nbytes / op->dummy.buswidth; i++) @@ -249,8 +249,9 @@ static ssize_t aspeed_spi_read_user(struct aspeed_spi_chip *chip, aspeed_spi_set_io_mode(chip, io_mode); aspeed_spi_read_from_ahb(buf, chip->ahb_base, len); +stop_user: aspeed_spi_stop_user(chip); - return 0; + return ret; } static ssize_t aspeed_spi_write_user(struct aspeed_spi_chip *chip, @@ -261,10 +262,11 @@ static ssize_t aspeed_spi_write_user(struct aspeed_spi_chip *chip, aspeed_spi_start_user(chip); ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, op->addr.val, op->cmd.opcode); if (ret < 0) - return ret; + goto stop_user; aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out, op->data.nbytes); +stop_user: aspeed_spi_stop_user(chip); - return 0; + return ret; } /* support for 1-1-1, 1-1-2 or 1-1-4 */ diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 0b45b7b2b3ab..a031ecb358e0 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -43,6 +43,7 @@ static_assert(CQSPI_MAX_CHIPSELECT <= SPI_CS_CNT_MAX); #define CQSPI_SLOW_SRAM BIT(4) #define CQSPI_NEEDS_APB_AHB_HAZARD_WAR BIT(5) #define CQSPI_RD_NO_IRQ BIT(6) +#define CQSPI_DISABLE_STIG_MODE BIT(7) /* Capabilities */ #define CQSPI_SUPPORTS_OCTAL BIT(0) @@ -103,6 +104,7 @@ struct cqspi_st { bool apb_ahb_hazard; bool is_jh7110; /* Flag for StarFive JH7110 SoC */ + bool disable_stig_mode; const struct cqspi_driver_platdata *ddata; }; @@ -1416,7 +1418,8 @@ static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op) * reads, prefer STIG mode for such small reads. */ if (!op->addr.nbytes || - op->data.nbytes <= CQSPI_STIG_DATA_LEN_MAX) + (op->data.nbytes <= CQSPI_STIG_DATA_LEN_MAX && + !cqspi->disable_stig_mode)) return cqspi_command_read(f_pdata, op); return cqspi_read(f_pdata, op); @@ -1880,6 +1883,8 @@ static int cqspi_probe(struct platform_device *pdev) if (ret) goto probe_reset_failed; } + if (ddata->quirks & CQSPI_DISABLE_STIG_MODE) + cqspi->disable_stig_mode = true; if (of_device_is_compatible(pdev->dev.of_node, "xlnx,versal-ospi-1.0")) { @@ -2043,7 +2048,8 @@ static const struct cqspi_driver_platdata intel_lgm_qspi = { static const struct cqspi_driver_platdata socfpga_qspi = { .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION - | CQSPI_SLOW_SRAM, + | CQSPI_SLOW_SRAM + | CQSPI_DISABLE_STIG_MODE, }; static const struct cqspi_driver_platdata versal_ospi = { diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index 864e58167417..1bc012fce7cb 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -241,6 +241,20 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable) struct spi_controller *ctlr = spi->controller; struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); bool cs_asserted = spi->mode & SPI_CS_HIGH ? enable : !enable; + bool cs_actual; + + /* + * SPI subsystem tries to avoid no-op calls that would break the PM + * refcount below. It can't however for the first time it is used. + * To detect this case we read it here and bail out early for no-ops. + */ + if (spi_get_csgpiod(spi, 0)) + cs_actual = !!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & 1); + else + cs_actual = !!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & + BIT(spi_get_chipselect(spi, 0))); + if (unlikely(cs_actual == cs_asserted)) + return; if (cs_asserted) { /* Keep things powered as long as CS is asserted */ diff --git a/drivers/staging/gpib/Kconfig b/drivers/staging/gpib/Kconfig index 95308d15a555..259f3ff33646 100644 --- a/drivers/staging/gpib/Kconfig +++ b/drivers/staging/gpib/Kconfig @@ -50,6 +50,7 @@ config GPIB_CEC_PCI tristate "CEC PCI board" depends on PCI depends on HAS_IOPORT + depends on !X86_PAE select GPIB_COMMON select GPIB_NEC7210 help @@ -62,6 +63,8 @@ config GPIB_CEC_PCI config GPIB_NI_PCI_ISA tristate "NI PCI/ISA compatible boards" depends on ISA_BUS || PCI || PCMCIA + depends on HAS_IOPORT + depends on !X86_PAE select GPIB_COMMON select GPIB_NEC7210 help @@ -85,6 +88,7 @@ config GPIB_CB7210 tristate "Measurement Computing compatible boards" depends on HAS_IOPORT depends on ISA_BUS || PCI || PCMCIA + depends on !X86_PAE select GPIB_COMMON select GPIB_NEC7210 help @@ -128,7 +132,7 @@ config GPIB_FMH tristate "FMH FPGA based devices" select GPIB_COMMON select GPIB_NEC7210 - depends on BROKEN + depends on !PPC depends on OF && PCI help GPIB driver for fmhess FPGA based devices @@ -174,6 +178,7 @@ config GPIB_INES tristate "INES" depends on PCI || ISA_BUS || PCMCIA depends on HAS_IOPORT + depends on !X86_PAE select GPIB_COMMON select GPIB_NEC7210 help diff --git a/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c b/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c index 796c3a5be545..267651a15fa0 100644 --- a/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c +++ b/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c @@ -901,7 +901,7 @@ static int usb_gpib_read(gpib_board_t *board, } else { /* we are in the closing <DLE><ETX> sequence */ - + c = nc; if (c == ETX) { c = one_char(board, &b); if (c == ACK) { diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 4d63d80e78a9..649e74e9b52f 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -467,7 +467,8 @@ static void set_io_from_upio(struct uart_port *p) break; #endif default: - WARN(1, "Unsupported UART type %x\n", p->iotype); + WARN(p->iotype != UPIO_PORT || p->iobase, + "Unsupported UART type %x\n", p->iotype); p->serial_in = no_serial_in; p->serial_out = no_serial_out; } diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index df523c744423..924b803af440 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -157,6 +157,7 @@ struct sci_port { bool has_rtscts; bool autorts; + bool tx_occurred; }; #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS @@ -850,6 +851,7 @@ static void sci_transmit_chars(struct uart_port *port) { struct tty_port *tport = &port->state->port; unsigned int stopped = uart_tx_stopped(port); + struct sci_port *s = to_sci_port(port); unsigned short status; unsigned short ctrl; int count; @@ -885,6 +887,7 @@ static void sci_transmit_chars(struct uart_port *port) } sci_serial_out(port, SCxTDR, c); + s->tx_occurred = true; port->icount.tx++; } while (--count > 0); @@ -1241,6 +1244,8 @@ static void sci_dma_tx_complete(void *arg) if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); + s->tx_occurred = true; + if (!kfifo_is_empty(&tport->xmit_fifo)) { s->cookie_tx = 0; schedule_work(&s->work_tx); @@ -1731,6 +1736,19 @@ static void sci_flush_buffer(struct uart_port *port) s->cookie_tx = -EINVAL; } } + +static void sci_dma_check_tx_occurred(struct sci_port *s) +{ + struct dma_tx_state state; + enum dma_status status; + + if (!s->chan_tx) + return; + + status = dmaengine_tx_status(s->chan_tx, s->cookie_tx, &state); + if (status == DMA_COMPLETE || status == DMA_IN_PROGRESS) + s->tx_occurred = true; +} #else /* !CONFIG_SERIAL_SH_SCI_DMA */ static inline void sci_request_dma(struct uart_port *port) { @@ -1740,6 +1758,10 @@ static inline void sci_free_dma(struct uart_port *port) { } +static void sci_dma_check_tx_occurred(struct sci_port *s) +{ +} + #define sci_flush_buffer NULL #endif /* !CONFIG_SERIAL_SH_SCI_DMA */ @@ -2076,6 +2098,12 @@ static unsigned int sci_tx_empty(struct uart_port *port) { unsigned short status = sci_serial_in(port, SCxSR); unsigned short in_tx_fifo = sci_txfill(port); + struct sci_port *s = to_sci_port(port); + + sci_dma_check_tx_occurred(s); + + if (!s->tx_occurred) + return TIOCSER_TEMT; return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0; } @@ -2247,6 +2275,7 @@ static int sci_startup(struct uart_port *port) dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); + s->tx_occurred = false; sci_request_dma(port); ret = sci_request_irq(s); diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index b7ec5797d5ba..8a01e4393159 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -5556,6 +5556,7 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag, lrbp = &hba->lrb[task_tag]; lrbp->compl_time_stamp = ktime_get(); + lrbp->compl_time_stamp_local_clock = local_clock(); cmd = lrbp->cmd; if (cmd) { if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 500dc35e6477..0b2490347b9f 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -2794,8 +2794,14 @@ int usb_add_hcd(struct usb_hcd *hcd, int retval; struct usb_device *rhdev; struct usb_hcd *shared_hcd; + int skip_phy_initialization; - if (!hcd->skip_phy_initialization) { + if (usb_hcd_is_primary_hcd(hcd)) + skip_phy_initialization = hcd->skip_phy_initialization; + else + skip_phy_initialization = hcd->primary_hcd->skip_phy_initialization; + + if (!skip_phy_initialization) { if (usb_hcd_is_primary_hcd(hcd)) { hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev); if (IS_ERR(hcd->phy_roothub)) diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index cb54390e7de4..8c3941ecaaf5 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -3546,11 +3546,9 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq, port_status |= USB_PORT_STAT_C_OVERCURRENT << 16; } - if (!hsotg->flags.b.port_connect_status) { + if (dwc2_is_device_mode(hsotg)) { /* - * The port is disconnected, which means the core is - * either in device mode or it soon will be. Just - * return 0's for the remainder of the port status + * Just return 0's for the remainder of the port status * since the port register can't be read if the core * is in device mode. */ @@ -3620,13 +3618,11 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq, if (wvalue != USB_PORT_FEAT_TEST && (!windex || windex > 1)) goto error; - if (!hsotg->flags.b.port_connect_status) { + if (dwc2_is_device_mode(hsotg)) { /* - * The port is disconnected, which means the core is - * either in device mode or it soon will be. Just - * return without doing anything since the port - * register can't be written if the core is in device - * mode. + * Just return 0's for the remainder of the port status + * since the port register can't be read if the core + * is in device mode. */ break; } @@ -4349,7 +4345,7 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd) if (hsotg->bus_suspended) goto skip_power_saving; - if (hsotg->flags.b.port_connect_status == 0) + if (!(dwc2_read_hprt0(hsotg) & HPRT0_CONNSTS)) goto skip_power_saving; switch (hsotg->params.power_down) { @@ -4431,6 +4427,7 @@ static int _dwc2_hcd_resume(struct usb_hcd *hcd) * Power Down mode. */ if (hprt0 & HPRT0_CONNSTS) { + set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); hsotg->lx_state = DWC2_L0; goto unlock; } diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c index 356812cbcd88..3edc5aca76f9 100644 --- a/drivers/usb/dwc3/dwc3-imx8mp.c +++ b/drivers/usb/dwc3/dwc3-imx8mp.c @@ -129,6 +129,16 @@ static void dwc3_imx8mp_wakeup_disable(struct dwc3_imx8mp *dwc3_imx) writel(val, dwc3_imx->hsio_blk_base + USB_WAKEUP_CTRL); } +static const struct property_entry dwc3_imx8mp_properties[] = { + PROPERTY_ENTRY_BOOL("xhci-missing-cas-quirk"), + PROPERTY_ENTRY_BOOL("xhci-skip-phy-init-quirk"), + {}, +}; + +static const struct software_node dwc3_imx8mp_swnode = { + .properties = dwc3_imx8mp_properties, +}; + static irqreturn_t dwc3_imx8mp_interrupt(int irq, void *_dwc3_imx) { struct dwc3_imx8mp *dwc3_imx = _dwc3_imx; @@ -148,17 +158,6 @@ static irqreturn_t dwc3_imx8mp_interrupt(int irq, void *_dwc3_imx) return IRQ_HANDLED; } -static int dwc3_imx8mp_set_software_node(struct device *dev) -{ - struct property_entry props[3] = { 0 }; - int prop_idx = 0; - - props[prop_idx++] = PROPERTY_ENTRY_BOOL("xhci-missing-cas-quirk"); - props[prop_idx++] = PROPERTY_ENTRY_BOOL("xhci-skip-phy-init-quirk"); - - return device_create_managed_software_node(dev, props, NULL); -} - static int dwc3_imx8mp_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -221,17 +220,17 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev) if (err < 0) goto disable_rpm; - err = dwc3_imx8mp_set_software_node(dev); + err = device_add_software_node(dev, &dwc3_imx8mp_swnode); if (err) { err = -ENODEV; - dev_err(dev, "failed to create software node\n"); + dev_err(dev, "failed to add software node\n"); goto disable_rpm; } err = of_platform_populate(node, NULL, NULL, dev); if (err) { dev_err(&pdev->dev, "failed to create dwc3 core\n"); - goto disable_rpm; + goto remove_swnode; } dwc3_imx->dwc3 = of_find_device_by_node(dwc3_np); @@ -255,6 +254,8 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev) depopulate: of_platform_depopulate(dev); +remove_swnode: + device_remove_software_node(dev); disable_rpm: pm_runtime_disable(dev); pm_runtime_put_noidle(dev); @@ -268,6 +269,7 @@ static void dwc3_imx8mp_remove(struct platform_device *pdev) pm_runtime_get_sync(dev); of_platform_depopulate(dev); + device_remove_software_node(dev); pm_runtime_disable(dev); pm_runtime_put_noidle(dev); diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c index e3738e1610db..a33a42ba0249 100644 --- a/drivers/usb/dwc3/dwc3-xilinx.c +++ b/drivers/usb/dwc3/dwc3-xilinx.c @@ -121,8 +121,11 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data) * in use but the usb3-phy entry is missing from the device tree. * Therefore, skip these operations in this case. */ - if (!priv_data->usb3_phy) + if (!priv_data->usb3_phy) { + /* Deselect the PIPE Clock Select bit in FPD PIPE Clock register */ + writel(PIPE_CLK_DESELECT, priv_data->regs + XLNX_USB_FPD_PIPE_CLK); goto skip_usb3_phy; + } crst = devm_reset_control_get_exclusive(dev, "usb_crst"); if (IS_ERR(crst)) { diff --git a/drivers/usb/gadget/function/f_midi2.c b/drivers/usb/gadget/function/f_midi2.c index ee3d9e3578f7..12e866fb311d 100644 --- a/drivers/usb/gadget/function/f_midi2.c +++ b/drivers/usb/gadget/function/f_midi2.c @@ -1591,7 +1591,11 @@ static int f_midi2_create_card(struct f_midi2 *midi2) fb->info.midi_ci_version = b->midi_ci_version; fb->info.ui_hint = reverse_dir(b->ui_hint); fb->info.sysex8_streams = b->sysex8_streams; - fb->info.flags |= b->is_midi1; + if (b->is_midi1 < 2) + fb->info.flags |= b->is_midi1; + else + fb->info.flags |= SNDRV_UMP_BLOCK_IS_MIDI1 | + SNDRV_UMP_BLOCK_IS_LOWSPEED; strscpy(fb->info.name, ump_fb_name(b), sizeof(fb->info.name)); } diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index 0a8c05b2746b..53d9fc41acc5 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -579,9 +579,12 @@ static int gs_start_io(struct gs_port *port) * we didn't in gs_start_tx() */ tty_wakeup(port->port.tty); } else { - gs_free_requests(ep, head, &port->read_allocated); - gs_free_requests(port->port_usb->in, &port->write_pool, - &port->write_allocated); + /* Free reqs only if we are still connected */ + if (port->port_usb) { + gs_free_requests(ep, head, &port->read_allocated); + gs_free_requests(port->port_usb->in, &port->write_pool, + &port->write_allocated); + } status = -EIO; } diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c index 5d0d972fb7b1..2d23690d72c5 100644 --- a/drivers/usb/host/ehci-sh.c +++ b/drivers/usb/host/ehci-sh.c @@ -119,8 +119,12 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev) if (IS_ERR(priv->iclk)) priv->iclk = NULL; - clk_enable(priv->fclk); - clk_enable(priv->iclk); + ret = clk_enable(priv->fclk); + if (ret) + goto fail_request_resource; + ret = clk_enable(priv->iclk); + if (ret) + goto fail_iclk; ret = usb_add_hcd(hcd, irq, IRQF_SHARED); if (ret != 0) { @@ -136,6 +140,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev) fail_add_hcd: clk_disable(priv->iclk); +fail_iclk: clk_disable(priv->fclk); fail_request_resource: diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c index 9fe4f48b1898..0881fdd1823e 100644 --- a/drivers/usb/host/max3421-hcd.c +++ b/drivers/usb/host/max3421-hcd.c @@ -779,11 +779,17 @@ max3421_check_unlink(struct usb_hcd *hcd) retval = 1; dev_dbg(&spi->dev, "%s: URB %p unlinked=%d", __func__, urb, urb->unlinked); - usb_hcd_unlink_urb_from_ep(hcd, urb); - spin_unlock_irqrestore(&max3421_hcd->lock, - flags); - usb_hcd_giveback_urb(hcd, urb, 0); - spin_lock_irqsave(&max3421_hcd->lock, flags); + if (urb == max3421_hcd->curr_urb) { + max3421_hcd->urb_done = 1; + max3421_hcd->hien &= ~(BIT(MAX3421_HI_HXFRDN_BIT) | + BIT(MAX3421_HI_RCVDAV_BIT)); + } else { + usb_hcd_unlink_urb_from_ep(hcd, urb); + spin_unlock_irqrestore(&max3421_hcd->lock, + flags); + usb_hcd_giveback_urb(hcd, urb, 0); + spin_lock_irqsave(&max3421_hcd->lock, flags); + } } } } diff --git a/drivers/usb/misc/onboard_usb_dev.c b/drivers/usb/misc/onboard_usb_dev.c index 36b11127280f..75ac3c6aa92d 100644 --- a/drivers/usb/misc/onboard_usb_dev.c +++ b/drivers/usb/misc/onboard_usb_dev.c @@ -407,8 +407,10 @@ static int onboard_dev_probe(struct platform_device *pdev) } if (of_device_is_compatible(pdev->dev.of_node, "usb424,2744") || - of_device_is_compatible(pdev->dev.of_node, "usb424,5744")) + of_device_is_compatible(pdev->dev.of_node, "usb424,5744")) { err = onboard_dev_5744_i2c_init(client); + onboard_dev->always_powered_in_suspend = true; + } put_device(&client->dev); if (err < 0) diff --git a/drivers/usb/typec/anx7411.c b/drivers/usb/typec/anx7411.c index d1e7c487ddfb..0ae0a5ee3fae 100644 --- a/drivers/usb/typec/anx7411.c +++ b/drivers/usb/typec/anx7411.c @@ -290,6 +290,8 @@ struct anx7411_data { struct power_supply *psy; struct power_supply_desc psy_desc; struct device *dev; + struct fwnode_handle *switch_node; + struct fwnode_handle *mux_node; }; static u8 snk_identity[] = { @@ -1021,6 +1023,16 @@ static void anx7411_port_unregister_altmodes(struct typec_altmode **adev) } } +static void anx7411_port_unregister(struct typec_params *typecp) +{ + fwnode_handle_put(typecp->caps.fwnode); + anx7411_port_unregister_altmodes(typecp->port_amode); + if (typecp->port) + typec_unregister_port(typecp->port); + if (typecp->role_sw) + usb_role_switch_put(typecp->role_sw); +} + static int anx7411_usb_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state) { @@ -1089,6 +1101,7 @@ static void anx7411_unregister_mux(struct anx7411_data *ctx) if (ctx->typec.typec_mux) { typec_mux_unregister(ctx->typec.typec_mux); ctx->typec.typec_mux = NULL; + fwnode_handle_put(ctx->mux_node); } } @@ -1097,6 +1110,7 @@ static void anx7411_unregister_switch(struct anx7411_data *ctx) if (ctx->typec.typec_switch) { typec_switch_unregister(ctx->typec.typec_switch); ctx->typec.typec_switch = NULL; + fwnode_handle_put(ctx->switch_node); } } @@ -1104,28 +1118,29 @@ static int anx7411_typec_switch_probe(struct anx7411_data *ctx, struct device *dev) { int ret; - struct device_node *node; - node = of_get_child_by_name(dev->of_node, "orientation_switch"); - if (!node) + ctx->switch_node = device_get_named_child_node(dev, "orientation_switch"); + if (!ctx->switch_node) return 0; - ret = anx7411_register_switch(ctx, dev, &node->fwnode); + ret = anx7411_register_switch(ctx, dev, ctx->switch_node); if (ret) { dev_err(dev, "failed register switch"); + fwnode_handle_put(ctx->switch_node); return ret; } - node = of_get_child_by_name(dev->of_node, "mode_switch"); - if (!node) { + ctx->mux_node = device_get_named_child_node(dev, "mode_switch"); + if (!ctx->mux_node) { dev_err(dev, "no typec mux exist"); ret = -ENODEV; goto unregister_switch; } - ret = anx7411_register_mux(ctx, dev, &node->fwnode); + ret = anx7411_register_mux(ctx, dev, ctx->mux_node); if (ret) { dev_err(dev, "failed register mode switch"); + fwnode_handle_put(ctx->mux_node); ret = -ENODEV; goto unregister_switch; } @@ -1154,34 +1169,34 @@ static int anx7411_typec_port_probe(struct anx7411_data *ctx, ret = fwnode_property_read_string(fwnode, "power-role", &buf); if (ret) { dev_err(dev, "power-role not found: %d\n", ret); - return ret; + goto put_fwnode; } ret = typec_find_port_power_role(buf); if (ret < 0) - return ret; + goto put_fwnode; cap->type = ret; ret = fwnode_property_read_string(fwnode, "data-role", &buf); if (ret) { dev_err(dev, "data-role not found: %d\n", ret); - return ret; + goto put_fwnode; } ret = typec_find_port_data_role(buf); if (ret < 0) - return ret; + goto put_fwnode; cap->data = ret; ret = fwnode_property_read_string(fwnode, "try-power-role", &buf); if (ret) { dev_err(dev, "try-power-role not found: %d\n", ret); - return ret; + goto put_fwnode; } ret = typec_find_power_role(buf); if (ret < 0) - return ret; + goto put_fwnode; cap->prefer_role = ret; /* Get source pdos */ @@ -1193,7 +1208,7 @@ static int anx7411_typec_port_probe(struct anx7411_data *ctx, typecp->src_pdo_nr); if (ret < 0) { dev_err(dev, "source cap validate failed: %d\n", ret); - return -EINVAL; + goto put_fwnode; } typecp->caps_flags |= HAS_SOURCE_CAP; @@ -1207,7 +1222,7 @@ static int anx7411_typec_port_probe(struct anx7411_data *ctx, typecp->sink_pdo_nr); if (ret < 0) { dev_err(dev, "sink cap validate failed: %d\n", ret); - return -EINVAL; + goto put_fwnode; } for (i = 0; i < typecp->sink_pdo_nr; i++) { @@ -1251,13 +1266,21 @@ static int anx7411_typec_port_probe(struct anx7411_data *ctx, ret = PTR_ERR(ctx->typec.port); ctx->typec.port = NULL; dev_err(dev, "Failed to register type c port %d\n", ret); - return ret; + goto put_usb_role_switch; } typec_port_register_altmodes(ctx->typec.port, NULL, ctx, ctx->typec.port_amode, MAX_ALTMODE); return 0; + +put_usb_role_switch: + if (ctx->typec.role_sw) + usb_role_switch_put(ctx->typec.role_sw); +put_fwnode: + fwnode_handle_put(fwnode); + + return ret; } static int anx7411_typec_check_connection(struct anx7411_data *ctx) @@ -1523,8 +1546,7 @@ free_wq: destroy_workqueue(plat->workqueue); free_typec_port: - typec_unregister_port(plat->typec.port); - anx7411_port_unregister_altmodes(plat->typec.port_amode); + anx7411_port_unregister(&plat->typec); free_typec_switch: anx7411_unregister_switch(plat); @@ -1548,17 +1570,11 @@ static void anx7411_i2c_remove(struct i2c_client *client) i2c_unregister_device(plat->spi_client); - if (plat->typec.role_sw) - usb_role_switch_put(plat->typec.role_sw); - anx7411_unregister_mux(plat); anx7411_unregister_switch(plat); - if (plat->typec.port) - typec_unregister_port(plat->typec.port); - - anx7411_port_unregister_altmodes(plat->typec.port_amode); + anx7411_port_unregister(&plat->typec); } static const struct i2c_device_id anx7411_id[] = { diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index c435c0835744..fcf499cc9458 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -46,11 +46,11 @@ void ucsi_notify_common(struct ucsi *ucsi, u32 cci) ucsi_connector_change(ucsi, UCSI_CCI_CONNECTOR(cci)); if (cci & UCSI_CCI_ACK_COMPLETE && - test_bit(ACK_PENDING, &ucsi->flags)) + test_and_clear_bit(ACK_PENDING, &ucsi->flags)) complete(&ucsi->complete); if (cci & UCSI_CCI_COMMAND_COMPLETE && - test_bit(COMMAND_PENDING, &ucsi->flags)) + test_and_clear_bit(COMMAND_PENDING, &ucsi->flags)) complete(&ucsi->complete); } EXPORT_SYMBOL_GPL(ucsi_notify_common); @@ -65,6 +65,8 @@ int ucsi_sync_control_common(struct ucsi *ucsi, u64 command) else set_bit(COMMAND_PENDING, &ucsi->flags); + reinit_completion(&ucsi->complete); + ret = ucsi->ops->async_control(ucsi, command); if (ret) goto out_clear_bit; @@ -651,7 +653,8 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient) static int ucsi_get_connector_status(struct ucsi_connector *con, bool conn_ack) { u64 command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num); - size_t size = min(UCSI_GET_CONNECTOR_STATUS_SIZE, UCSI_MAX_DATA_LENGTH(con->ucsi)); + size_t size = min(sizeof(con->status), + UCSI_MAX_DATA_LENGTH(con->ucsi)); int ret; ret = ucsi_send_command_common(con->ucsi, command, &con->status, size, conn_ack); diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c index 7527e277c898..eb7387ee6ebd 100644 --- a/drivers/vfio/pci/mlx5/cmd.c +++ b/drivers/vfio/pci/mlx5/cmd.c @@ -1517,7 +1517,8 @@ int mlx5vf_start_page_tracker(struct vfio_device *vdev, struct mlx5_vhca_qp *host_qp; struct mlx5_vhca_qp *fw_qp; struct mlx5_core_dev *mdev; - u32 max_msg_size = PAGE_SIZE; + u32 log_max_msg_size; + u32 max_msg_size; u64 rq_size = SZ_2M; u32 max_recv_wr; int err; @@ -1534,6 +1535,12 @@ int mlx5vf_start_page_tracker(struct vfio_device *vdev, } mdev = mvdev->mdev; + log_max_msg_size = MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_log_max_msg_size); + max_msg_size = (1ULL << log_max_msg_size); + /* The RQ must hold at least 4 WQEs/messages for successful QP creation */ + if (rq_size < 4 * max_msg_size) + rq_size = 4 * max_msg_size; + memset(tracker, 0, sizeof(*tracker)); tracker->uar = mlx5_get_uars_page(mdev); if (IS_ERR(tracker->uar)) { @@ -1623,25 +1630,41 @@ set_report_output(u32 size, int index, struct mlx5_vhca_qp *qp, { u32 entry_size = MLX5_ST_SZ_BYTES(page_track_report_entry); u32 nent = size / entry_size; + u32 nent_in_page; + u32 nent_to_set; struct page *page; + u32 page_offset; + u32 page_index; + u32 buf_offset; + void *kaddr; u64 addr; u64 *buf; int i; - if (WARN_ON(index >= qp->recv_buf.npages || + buf_offset = index * qp->max_msg_size; + if (WARN_ON(buf_offset + size >= qp->recv_buf.npages * PAGE_SIZE || (nent > qp->max_msg_size / entry_size))) return; - page = qp->recv_buf.page_list[index]; - buf = kmap_local_page(page); - for (i = 0; i < nent; i++) { - addr = MLX5_GET(page_track_report_entry, buf + i, - dirty_address_low); - addr |= (u64)MLX5_GET(page_track_report_entry, buf + i, - dirty_address_high) << 32; - iova_bitmap_set(dirty, addr, qp->tracked_page_size); - } - kunmap_local(buf); + do { + page_index = buf_offset / PAGE_SIZE; + page_offset = buf_offset % PAGE_SIZE; + nent_in_page = (PAGE_SIZE - page_offset) / entry_size; + page = qp->recv_buf.page_list[page_index]; + kaddr = kmap_local_page(page); + buf = kaddr + page_offset; + nent_to_set = min(nent, nent_in_page); + for (i = 0; i < nent_to_set; i++) { + addr = MLX5_GET(page_track_report_entry, buf + i, + dirty_address_low); + addr |= (u64)MLX5_GET(page_track_report_entry, buf + i, + dirty_address_high) << 32; + iova_bitmap_set(dirty, addr, qp->tracked_page_size); + } + kunmap_local(kaddr); + buf_offset += (nent_to_set * entry_size); + nent -= nent_to_set; + } while (nent); } static void diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 82a7d2cbc704..fdd2d2b07b5a 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -2772,6 +2772,7 @@ EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma); * @_vq: the struct virtqueue we're talking about. * @num: new ring num * @recycle: callback to recycle unused buffers + * @recycle_done: callback to be invoked when recycle for all unused buffers done * * When it is really necessary to create a new vring, it will set the current vq * into the reset state. Then call the passed callback to recycle the buffer @@ -2792,7 +2793,8 @@ EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma); * */ int virtqueue_resize(struct virtqueue *_vq, u32 num, - void (*recycle)(struct virtqueue *vq, void *buf)) + void (*recycle)(struct virtqueue *vq, void *buf), + void (*recycle_done)(struct virtqueue *vq)) { struct vring_virtqueue *vq = to_vvq(_vq); int err; @@ -2809,6 +2811,8 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num, err = virtqueue_disable_and_recycle(_vq, recycle); if (err) return err; + if (recycle_done) + recycle_done(_vq); if (vq->packed_ring) err = virtqueue_resize_packed(_vq, num); @@ -2823,6 +2827,7 @@ EXPORT_SYMBOL_GPL(virtqueue_resize); * virtqueue_reset - detach and recycle all unused buffers * @_vq: the struct virtqueue we're talking about. * @recycle: callback to recycle unused buffers + * @recycle_done: callback to be invoked when recycle for all unused buffers done * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). @@ -2834,7 +2839,8 @@ EXPORT_SYMBOL_GPL(virtqueue_resize); * -EPERM: Operation not permitted */ int virtqueue_reset(struct virtqueue *_vq, - void (*recycle)(struct virtqueue *vq, void *buf)) + void (*recycle)(struct virtqueue *vq, void *buf), + void (*recycle_done)(struct virtqueue *vq)) { struct vring_virtqueue *vq = to_vvq(_vq); int err; @@ -2842,6 +2848,8 @@ int virtqueue_reset(struct virtqueue *_vq, err = virtqueue_disable_and_recycle(_vq, recycle); if (err) return err; + if (recycle_done) + recycle_done(_vq); if (vq->packed_ring) virtqueue_reinit_packed(vq); |