diff options
author | Jakub Kicinski <kuba@kernel.org> | 2024-06-27 12:12:07 -0700 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2024-06-27 12:14:11 -0700 |
commit | 193b9b200224dab4eaec407eb2955303a521f5b6 (patch) | |
tree | 95366182cc2871667a60613a921f7bfae0b9e2c7 /drivers | |
parent | cce346d441399209bd582f1aaf190a7784dd12f8 (diff) | |
parent | ffb7aa9fedad817b9fa0d868a3065c4eeb1f68c4 (diff) | |
download | lwn-193b9b200224dab4eaec407eb2955303a521f5b6.tar.gz lwn-193b9b200224dab4eaec407eb2955303a521f5b6.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR.
No conflicts.
Adjacent changes:
e3f02f32a050 ("ionic: fix kernel panic due to multi-buffer handling")
d9c04209990b ("ionic: Mark error paths in the data path as unlikely")
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers')
106 files changed, 1103 insertions, 431 deletions
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c index 8907b8bf4267..c49b9f8de723 100644 --- a/drivers/acpi/acpica/exregion.c +++ b/drivers/acpi/acpica/exregion.c @@ -44,7 +44,6 @@ acpi_ex_system_memory_space_handler(u32 function, struct acpi_mem_mapping *mm = mem_info->cur_mm; u32 length; acpi_size map_length; - acpi_size page_boundary_map_length; #ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED u32 remainder; #endif @@ -138,26 +137,8 @@ acpi_ex_system_memory_space_handler(u32 function, map_length = (acpi_size) ((mem_info->address + mem_info->length) - address); - /* - * If mapping the entire remaining portion of the region will cross - * a page boundary, just map up to the page boundary, do not cross. - * On some systems, crossing a page boundary while mapping regions - * can cause warnings if the pages have different attributes - * due to resource management. - * - * This has the added benefit of constraining a single mapping to - * one page, which is similar to the original code that used a 4k - * maximum window. - */ - page_boundary_map_length = (acpi_size) - (ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address); - if (page_boundary_map_length == 0) { - page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE; - } - - if (map_length > page_boundary_map_length) { - map_length = page_boundary_map_length; - } + if (map_length > ACPI_DEFAULT_PAGE_SIZE) + map_length = ACPI_DEFAULT_PAGE_SIZE; /* Create a new mapping starting at the address given */ diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 2a0e9fc7b74c..601b670356e5 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -302,6 +302,10 @@ void acpi_mipi_check_crs_csi2(acpi_handle handle); void acpi_mipi_scan_crs_csi2(void); void acpi_mipi_init_crs_csi2_swnodes(void); void acpi_mipi_crs_csi2_cleanup(void); +#ifdef CONFIG_X86 bool acpi_graph_ignore_port(acpi_handle handle); +#else +static inline bool acpi_graph_ignore_port(acpi_handle handle) { return false; } +#endif #endif /* _ACPI_INTERNAL_H_ */ diff --git a/drivers/acpi/mipi-disco-img.c b/drivers/acpi/mipi-disco-img.c index d05413a0672a..92b658f92dc0 100644 --- a/drivers/acpi/mipi-disco-img.c +++ b/drivers/acpi/mipi-disco-img.c @@ -725,14 +725,20 @@ void acpi_mipi_crs_csi2_cleanup(void) acpi_mipi_del_crs_csi2(csi2); } -static const struct dmi_system_id dmi_ignore_port_nodes[] = { - { - .matches = { - DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS 9315"), - }, - }, - { } +#ifdef CONFIG_X86 +#include <asm/cpu_device_id.h> +#include <asm/intel-family.h> + +/* CPU matches for Dell generations with broken ACPI MIPI DISCO info */ +static const struct x86_cpu_id dell_broken_mipi_disco_cpu_gens[] = { + X86_MATCH_VFM(INTEL_TIGERLAKE, NULL), + X86_MATCH_VFM(INTEL_TIGERLAKE_L, NULL), + X86_MATCH_VFM(INTEL_ALDERLAKE, NULL), + X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL), + X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL), + X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL), + X86_MATCH_VFM(INTEL_RAPTORLAKE_S, NULL), + {} }; static const char *strnext(const char *s1, const char *s2) @@ -761,7 +767,10 @@ bool acpi_graph_ignore_port(acpi_handle handle) static bool dmi_tested, ignore_port; if (!dmi_tested) { - ignore_port = dmi_first_match(dmi_ignore_port_nodes); + if (dmi_name_in_vendors("Dell Inc.") && + x86_match_cpu(dell_broken_mipi_disco_cpu_gens)) + ignore_port = true; + dmi_tested = true; } @@ -794,3 +803,4 @@ out_free: kfree(orig_path); return false; } +#endif diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 07d66d2c5f0d..5eb38fbbbecd 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1735,6 +1735,14 @@ static void ahci_update_initial_lpm_policy(struct ata_port *ap) if (ap->pflags & ATA_PFLAG_EXTERNAL) return; + /* If no LPM states are supported by the HBA, do not bother with LPM */ + if ((ap->host->flags & ATA_HOST_NO_PART) && + (ap->host->flags & ATA_HOST_NO_SSC) && + (ap->host->flags & ATA_HOST_NO_DEVSLP)) { + ata_port_dbg(ap, "no LPM states supported, not enabling LPM\n"); + return; + } + /* user modified policy via module param */ if (mobile_lpm_policy != -1) { policy = mobile_lpm_policy; diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 002a5ec80620..9fc99cfbef08 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -394,7 +394,7 @@ config LS2X_APB_DMA config MCF_EDMA tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs" - depends on M5441x || COMPILE_TEST + depends on M5441x || (COMPILE_TEST && FSL_EDMA=n) select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c index 8dc029c86551..fc049c9c9892 100644 --- a/drivers/dma/idxd/irq.c +++ b/drivers/dma/idxd/irq.c @@ -611,11 +611,13 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry) spin_unlock(&irq_entry->list_lock); - list_for_each_entry(desc, &flist, list) { + list_for_each_entry_safe(desc, n, &flist, list) { /* * Check against the original status as ABORT is software defined * and 0xff, which DSA_COMP_STATUS_MASK can mask out. */ + list_del(&desc->list); + if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) { idxd_desc_complete(desc, IDXD_COMPLETE_ABORT, true); continue; diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 9c364e92cb82..e8f45a7fded4 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -534,18 +534,6 @@ err_out: return err; } -static int ioat_register(struct ioatdma_device *ioat_dma) -{ - int err = dma_async_device_register(&ioat_dma->dma_dev); - - if (err) { - ioat_disable_interrupts(ioat_dma); - dma_pool_destroy(ioat_dma->completion_pool); - } - - return err; -} - static void ioat_dma_remove(struct ioatdma_device *ioat_dma) { struct dma_device *dma = &ioat_dma->dma_dev; @@ -1181,9 +1169,9 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); } - err = ioat_register(ioat_dma); + err = dma_async_device_register(&ioat_dma->dma_dev); if (err) - return err; + goto err_disable_interrupts; ioat_kobject_add(ioat_dma, &ioat_ktype); @@ -1192,20 +1180,29 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) /* disable relaxed ordering */ err = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &val16); - if (err) - return pcibios_err_to_errno(err); + if (err) { + err = pcibios_err_to_errno(err); + goto err_disable_interrupts; + } /* clear relaxed ordering enable */ val16 &= ~PCI_EXP_DEVCTL_RELAX_EN; err = pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, val16); - if (err) - return pcibios_err_to_errno(err); + if (err) { + err = pcibios_err_to_errno(err); + goto err_disable_interrupts; + } if (ioat_dma->cap & IOAT_CAP_DPS) writeb(ioat_pending_level + 1, ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET); return 0; + +err_disable_interrupts: + ioat_disable_interrupts(ioat_dma); + dma_pool_destroy(ioat_dma->completion_pool); + return err; } static void ioat_shutdown(struct pci_dev *pdev) @@ -1350,6 +1347,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) void __iomem * const *iomap; struct device *dev = &pdev->dev; struct ioatdma_device *device; + unsigned int i; + u8 version; int err; err = pcim_enable_device(pdev); @@ -1363,6 +1362,10 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!iomap) return -ENOMEM; + version = readb(iomap[IOAT_MMIO_BAR] + IOAT_VER_OFFSET); + if (version < IOAT_VER_3_0) + return -ENODEV; + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) return err; @@ -1373,17 +1376,18 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); pci_set_drvdata(pdev, device); - device->version = readb(device->reg_base + IOAT_VER_OFFSET); + device->version = version; if (device->version >= IOAT_VER_3_4) ioat_dca_enabled = 0; - if (device->version >= IOAT_VER_3_0) { - if (is_skx_ioat(pdev)) - device->version = IOAT_VER_3_2; - err = ioat3_dma_probe(device, ioat_dca_enabled); - } else - return -ENODEV; + if (is_skx_ioat(pdev)) + device->version = IOAT_VER_3_2; + + err = ioat3_dma_probe(device, ioat_dca_enabled); if (err) { + for (i = 0; i < IOAT_MAX_CHANS; i++) + kfree(device->idx[i]); + kfree(device); dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); return -ENODEV; } @@ -1445,6 +1449,7 @@ module_init(ioat_init_module); static void __exit ioat_exit_module(void) { pci_unregister_driver(&ioat_pci_driver); + kmem_cache_destroy(ioat_sed_cache); kmem_cache_destroy(ioat_cache); } module_exit(ioat_exit_module); diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c index b96b448a0e69..dd1a068f905d 100644 --- a/drivers/dma/ti/k3-udma-glue.c +++ b/drivers/dma/ti/k3-udma-glue.c @@ -200,12 +200,9 @@ of_k3_udma_glue_parse_chn_by_id(struct device_node *udmax_np, struct k3_udma_glu ret = of_k3_udma_glue_parse(udmax_np, common); if (ret) - goto out_put_spec; + return ret; ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn); - -out_put_spec: - of_node_put(udmax_np); return ret; } diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c index e143a7330816..718842fdaf98 100644 --- a/drivers/dma/xilinx/xdma.c +++ b/drivers/dma/xilinx/xdma.c @@ -885,11 +885,11 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id) u32 st; bool repeat_tx; + spin_lock(&xchan->vchan.lock); + if (xchan->stop_requested) complete(&xchan->last_interrupt); - spin_lock(&xchan->vchan.lock); - /* get submitted request */ vd = vchan_next_desc(&xchan->vchan); if (!vd) diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c index d9629ff87861..2328ca58bba6 100644 --- a/drivers/firmware/psci/psci.c +++ b/drivers/firmware/psci/psci.c @@ -497,10 +497,12 @@ int psci_cpu_suspend_enter(u32 state) static int psci_system_suspend(unsigned long unused) { + int err; phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume); - return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND), + err = invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND), pa_cpu_resume, 0, 0); + return psci_to_linux_errno(err); } static int psci_system_suspend_enter(suspend_state_t state) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 055ba2ea4c12..662d0f28f358 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -41,8 +41,6 @@ #include <linux/dma-buf.h> #include <linux/dma-fence-array.h> #include <linux/pci-p2pdma.h> -#include <linux/pm_runtime.h> -#include "amdgpu_trace.h" /** * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation @@ -58,42 +56,11 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, struct drm_gem_object *obj = dmabuf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - int r; if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0) attach->peer2peer = false; - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); - trace_amdgpu_runpm_reference_dumps(1, __func__); - if (r < 0) - goto out; - return 0; - -out: - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - trace_amdgpu_runpm_reference_dumps(0, __func__); - return r; -} - -/** - * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation - * - * @dmabuf: DMA-buf where we remove the attachment from - * @attach: the attachment to remove - * - * Called when an attachment is removed from the DMA-buf. - */ -static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf, - struct dma_buf_attachment *attach) -{ - struct drm_gem_object *obj = dmabuf->priv; - struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - trace_amdgpu_runpm_reference_dumps(0, __func__); } /** @@ -267,7 +234,6 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, const struct dma_buf_ops amdgpu_dmabuf_ops = { .attach = amdgpu_dma_buf_attach, - .detach = amdgpu_dma_buf_detach, .pin = amdgpu_dma_buf_pin, .unpin = amdgpu_dma_buf_unpin, .map_dma_buf = amdgpu_dma_buf_map, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 10832b470448..bc3ac73b6b8d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -181,7 +181,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, seq, flags | AMDGPU_FENCE_FLAG_INT); pm_runtime_get_noresume(adev_to_drm(adev)->dev); - trace_amdgpu_runpm_reference_dumps(1, __func__); ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; if (unlikely(rcu_dereference_protected(*ptr, 1))) { struct dma_fence *old; @@ -309,7 +308,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring) dma_fence_put(fence); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - trace_amdgpu_runpm_reference_dumps(0, __func__); } while (last_seq != seq); return true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index be4629cdac04..08b9dfb65335 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -684,12 +684,17 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid, struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring; struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst]; unsigned int ndw; - signed long r; + int r; uint32_t seq; - if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready || - !down_read_trylock(&adev->reset_domain->sem)) { + /* + * A GPU reset should flush all TLBs anyway, so no need to do + * this while one is ongoing. + */ + if (!down_read_trylock(&adev->reset_domain->sem)) + return 0; + if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready) { if (adev->gmc.flush_tlb_needs_extra_type_2) adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid, 2, all_hub, @@ -703,43 +708,40 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid, adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub, inst); - return 0; - } + r = 0; + } else { + /* 2 dwords flush + 8 dwords fence */ + ndw = kiq->pmf->invalidate_tlbs_size + 8; - /* 2 dwords flush + 8 dwords fence */ - ndw = kiq->pmf->invalidate_tlbs_size + 8; + if (adev->gmc.flush_tlb_needs_extra_type_2) + ndw += kiq->pmf->invalidate_tlbs_size; - if (adev->gmc.flush_tlb_needs_extra_type_2) - ndw += kiq->pmf->invalidate_tlbs_size; + if (adev->gmc.flush_tlb_needs_extra_type_0) + ndw += kiq->pmf->invalidate_tlbs_size; - if (adev->gmc.flush_tlb_needs_extra_type_0) - ndw += kiq->pmf->invalidate_tlbs_size; + spin_lock(&adev->gfx.kiq[inst].ring_lock); + amdgpu_ring_alloc(ring, ndw); + if (adev->gmc.flush_tlb_needs_extra_type_2) + kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub); - spin_lock(&adev->gfx.kiq[inst].ring_lock); - amdgpu_ring_alloc(ring, ndw); - if (adev->gmc.flush_tlb_needs_extra_type_2) - kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub); + if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0) + kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub); - if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0) - kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub); + kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub); + r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); + if (r) { + amdgpu_ring_undo(ring); + spin_unlock(&adev->gfx.kiq[inst].ring_lock); + goto error_unlock_reset; + } - kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub); - r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); - if (r) { - amdgpu_ring_undo(ring); + amdgpu_ring_commit(ring); spin_unlock(&adev->gfx.kiq[inst].ring_lock); - goto error_unlock_reset; - } - - amdgpu_ring_commit(ring); - spin_unlock(&adev->gfx.kiq[inst].ring_lock); - r = amdgpu_fence_wait_polling(ring, seq, usec_timeout); - if (r < 1) { - dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); - r = -ETIME; - goto error_unlock_reset; + if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) { + dev_err(adev->dev, "timeout waiting for kiq fence\n"); + r = -ETIME; + } } - r = 0; error_unlock_reset: up_read(&adev->reset_domain->sem); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 7aafeb763e5d..383fce40d4dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -554,21 +554,6 @@ TRACE_EVENT(amdgpu_reset_reg_dumps, __entry->value) ); -TRACE_EVENT(amdgpu_runpm_reference_dumps, - TP_PROTO(uint32_t index, const char *func), - TP_ARGS(index, func), - TP_STRUCT__entry( - __field(uint32_t, index) - __string(func, func) - ), - TP_fast_assign( - __entry->index = index; - __assign_str(func); - ), - TP_printk("amdgpu runpm reference dump 0x%x: 0x%s\n", - __entry->index, - __get_str(func)) -); #undef AMDGPU_JOB_GET_TIMELINE_NAME #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 7b16e8cca86a..f5b9f443cfdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -4195,9 +4195,10 @@ static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_i static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info) { - int i, j, k, counter, xcc_id, active_cu_number = 0; - u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; + int i, j, k, prev_counter, counter, xcc_id, active_cu_number = 0; + u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0, tmp; unsigned disable_masks[4 * 4]; + bool is_symmetric_cus; if (!adev || !cu_info) return -EINVAL; @@ -4215,6 +4216,7 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) { + is_symmetric_cus = true; for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { mask = 1; @@ -4242,6 +4244,15 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); cu_info->ao_cu_bitmap[i][j] = ao_bitmap; } + if (i && is_symmetric_cus && prev_counter != counter) + is_symmetric_cus = false; + prev_counter = counter; + } + if (is_symmetric_cus) { + tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG); + tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_RELAUNCH_DISABLE, 1); + tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_DISPATCH_DISABLE, 1); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG, tmp); } gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, xcc_id); diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 0d1407f25005..32d4519541c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -154,18 +154,18 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes, void *pkt, int size, int api_status_off) { - int ndw = size / 4; - signed long r; - union MESAPI__MISC *x_pkt = pkt; - struct MES_API_STATUS *api_status; + union MESAPI__QUERY_MES_STATUS mes_status_pkt; + signed long timeout = 3000000; /* 3000 ms */ struct amdgpu_device *adev = mes->adev; struct amdgpu_ring *ring = &mes->ring; - unsigned long flags; - signed long timeout = 3000000; /* 3000 ms */ + struct MES_API_STATUS *api_status; + union MESAPI__MISC *x_pkt = pkt; const char *op_str, *misc_op_str; - u32 fence_offset; - u64 fence_gpu_addr; - u64 *fence_ptr; + unsigned long flags; + u64 status_gpu_addr; + u32 status_offset; + u64 *status_ptr; + signed long r; int ret; if (x_pkt->header.opcode >= MES_SCH_API_MAX) @@ -177,28 +177,38 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes, /* Worst case in sriov where all other 15 VF timeout, each VF needs about 600ms */ timeout = 15 * 600 * 1000; } - BUG_ON(size % 4 != 0); - ret = amdgpu_device_wb_get(adev, &fence_offset); + ret = amdgpu_device_wb_get(adev, &status_offset); if (ret) return ret; - fence_gpu_addr = - adev->wb.gpu_addr + (fence_offset * 4); - fence_ptr = (u64 *)&adev->wb.wb[fence_offset]; - *fence_ptr = 0; + + status_gpu_addr = adev->wb.gpu_addr + (status_offset * 4); + status_ptr = (u64 *)&adev->wb.wb[status_offset]; + *status_ptr = 0; spin_lock_irqsave(&mes->ring_lock, flags); - if (amdgpu_ring_alloc(ring, ndw)) { - spin_unlock_irqrestore(&mes->ring_lock, flags); - amdgpu_device_wb_free(adev, fence_offset); - return -ENOMEM; - } + r = amdgpu_ring_alloc(ring, (size + sizeof(mes_status_pkt)) / 4); + if (r) + goto error_unlock_free; api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off); - api_status->api_completion_fence_addr = fence_gpu_addr; + api_status->api_completion_fence_addr = status_gpu_addr; api_status->api_completion_fence_value = 1; - amdgpu_ring_write_multiple(ring, pkt, ndw); + amdgpu_ring_write_multiple(ring, pkt, size / 4); + + memset(&mes_status_pkt, 0, sizeof(mes_status_pkt)); + mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER; + mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS; + mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; + mes_status_pkt.api_status.api_completion_fence_addr = + ring->fence_drv.gpu_addr; + mes_status_pkt.api_status.api_completion_fence_value = + ++ring->fence_drv.sync_seq; + + amdgpu_ring_write_multiple(ring, &mes_status_pkt, + sizeof(mes_status_pkt) / 4); + amdgpu_ring_commit(ring); spin_unlock_irqrestore(&mes->ring_lock, flags); @@ -206,15 +216,16 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes, misc_op_str = mes_v11_0_get_misc_op_string(x_pkt); if (misc_op_str) - dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str, misc_op_str); + dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str, + misc_op_str); else if (op_str) dev_dbg(adev->dev, "MES msg=%s was emitted\n", op_str); else - dev_dbg(adev->dev, "MES msg=%d was emitted\n", x_pkt->header.opcode); + dev_dbg(adev->dev, "MES msg=%d was emitted\n", + x_pkt->header.opcode); - r = amdgpu_mes_fence_wait_polling(fence_ptr, (u64)1, timeout); - amdgpu_device_wb_free(adev, fence_offset); - if (r < 1) { + r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, timeout); + if (r < 1 || !*status_ptr) { if (misc_op_str) dev_err(adev->dev, "MES failed to respond to msg=%s (%s)\n", @@ -229,10 +240,19 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes, while (halt_if_hws_hang) schedule(); - return -ETIMEDOUT; + r = -ETIMEDOUT; + goto error_wb_free; } + amdgpu_device_wb_free(adev, status_offset); return 0; + +error_unlock_free: + spin_unlock_irqrestore(&mes->ring_lock, flags); + +error_wb_free: + amdgpu_device_wb_free(adev, status_offset); + return r; } static int convert_to_mes_queue_type(int queue_type) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c index f08a32c18694..40b28298af30 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c @@ -32,7 +32,9 @@ #include "mp/mp_14_0_2_sh_mask.h" MODULE_FIRMWARE("amdgpu/psp_14_0_2_sos.bin"); +MODULE_FIRMWARE("amdgpu/psp_14_0_2_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos.bin"); +MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta.bin"); /* For large FW files the time to complete can be very long */ #define USBC_PD_POLLING_LIMIT_S 240 @@ -66,6 +68,9 @@ static int psp_v14_0_init_microcode(struct psp_context *psp) err = psp_init_sos_microcode(psp, ucode_prefix); if (err) return err; + err = psp_init_ta_microcode(psp, ucode_prefix); + if (err) + return err; break; default: BUG(); diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index 5fcd4f778dc3..47b8b49da8a7 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -8,7 +8,7 @@ config DRM_AMD_DC depends on BROKEN || !CC_IS_CLANG || ARM64 || RISCV || SPARC64 || X86_64 select SND_HDA_COMPONENT if SND_HDA_CORE # !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752 - select DRM_AMD_DC_FP if ARCH_HAS_KERNEL_FPU_SUPPORT && (!ARM64 || !CC_IS_CLANG) + select DRM_AMD_DC_FP if ARCH_HAS_KERNEL_FPU_SUPPORT && !(CC_IS_CLANG && (ARM64 || RISCV)) help Choose this option if you want to use the new display engine support for AMDGPU. This adds required support for Vega and diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f1d67c6f4b98..e9ac20bed0f2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -9169,9 +9169,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) trace_amdgpu_dm_atomic_commit_tail_begin(state); - if (dm->dc->caps.ips_support && dm->dc->idle_optimizations_allowed) - dc_allow_idle_optimizations(dm->dc, false); - drm_atomic_helper_update_legacy_modeset_state(dev, state); drm_dp_mst_atomic_wait_for_dependencies(state); @@ -11440,6 +11437,12 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev) mutex_unlock(&adev->dm.dc_lock); } +static inline void amdgpu_dm_exit_ips_for_hw_access(struct dc *dc) +{ + if (dc->ctx->dmub_srv && !dc->ctx->dmub_srv->idle_exit_counter) + dc_exit_ips_for_hw_access(dc); +} + void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, u32 value, const char *func_name) { @@ -11450,6 +11453,8 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, return; } #endif + + amdgpu_dm_exit_ips_for_hw_access(ctx->dc); cgs_write_register(ctx->cgs_device, address, value); trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value); } @@ -11473,6 +11478,8 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, return 0; } + amdgpu_dm_exit_ips_for_hw_access(ctx->dc); + value = cgs_read_register(ctx->cgs_device, address); trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index 60f251cf973b..beed7adbbd43 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -177,7 +177,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = { .urgent_latency_pixel_data_only_us = 4.0, .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, .urgent_latency_vm_data_only_us = 4.0, - .dram_clock_change_latency_us = 11.72, + .dram_clock_change_latency_us = 34.0, .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c index e4f333d4fb54..a201dbb743d7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c @@ -215,7 +215,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = { .urgent_latency_pixel_data_only_us = 4.0, .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, .urgent_latency_vm_data_only_us = 4.0, - .dram_clock_change_latency_us = 11.72, + .dram_clock_change_latency_us = 34, .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 5295f52e4fc8..dcced89c07b3 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -1439,3 +1439,75 @@ void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx, } } } + +static bool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx) +{ + /* Calculate average pixel count per TU, return false if under ~2.00 to + * avoid empty TUs. This is only required for DPIA tunneling as empty TUs + * are legal to generate for native DP links. Assume TU size 64 as there + * is currently no scenario where it's reprogrammed from HW default. + * MTPs have no such limitation, so this does not affect MST use cases. + */ + unsigned int pix_clk_mhz; + unsigned int symclk_mhz; + unsigned int avg_pix_per_tu_x1000; + unsigned int tu_size_bytes = 64; + struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; + struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings; + const struct dc *dc = pipe_ctx->stream->link->dc; + + if (pipe_ctx->stream->link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) + return false; + + // Not necessary for MST configurations + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + return false; + + pix_clk_mhz = timing->pix_clk_100hz / 10000; + + // If this is true, can't block due to dynamic ODM + if (pix_clk_mhz > dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz) + return false; + + switch (link_settings->link_rate) { + case LINK_RATE_LOW: + symclk_mhz = 162; + break; + case LINK_RATE_HIGH: + symclk_mhz = 270; + break; + case LINK_RATE_HIGH2: + symclk_mhz = 540; + break; + case LINK_RATE_HIGH3: + symclk_mhz = 810; + break; + default: + // We shouldn't be tunneling any other rates, something is wrong + ASSERT(0); + return false; + } + + avg_pix_per_tu_x1000 = (1000 * pix_clk_mhz * tu_size_bytes) + / (symclk_mhz * link_settings->lane_count); + + // Add small empirically-decided margin to account for potential jitter + return (avg_pix_per_tu_x1000 < 2020); +} + +bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx) +{ + struct dc *dc = pipe_ctx->stream->ctx->dc; + + if (!is_h_timing_divisible_by_2(pipe_ctx->stream)) + return false; + + if (should_avoid_empty_tu(pipe_ctx)) + return false; + + if (dc_is_dp_signal(pipe_ctx->stream->signal) && !dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) && + dc->debug.enable_dp_dig_pixel_rate_div_policy) + return true; + + return false; +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h index a731c8880d60..f0ea7d1511ae 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h @@ -95,4 +95,6 @@ void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx, void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx, int num_pipes, uint32_t v_total_min, uint32_t v_total_max); +bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx); + #endif /* __DC_HWSS_DCN35_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c index df3bf77f3fb4..199781233fd5 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c @@ -158,7 +158,7 @@ static const struct hwseq_private_funcs dcn35_private_funcs = { .setup_hpo_hw_control = dcn35_setup_hpo_hw_control, .calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values, .set_pixels_per_cycle = dcn32_set_pixels_per_cycle, - .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy, + .is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy, .dsc_pg_control = dcn35_dsc_pg_control, .dsc_pg_status = dcn32_dsc_pg_status, .enable_plane = dcn35_enable_plane, diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c index 6bb42d04b247..e8b6989a40f3 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -164,6 +164,8 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { if (table[i].ulSupportedSCLK != 0) { + if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES) + continue; vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = table[i].usVoltageID; vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index e05e25cd4a94..5b3b6ae1e3d7 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -442,6 +442,10 @@ bool intel_dp_has_bigjoiner(struct intel_dp *intel_dp) struct intel_encoder *encoder = &intel_dig_port->base; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + /* eDP MSO is not compatible with joiner */ + if (intel_dp->mso_link_count) + return false; + return DISPLAY_VER(dev_priv) >= 12 || (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A); diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c index 21d27e6235f3..b11f7c5bbcbe 100644 --- a/drivers/gpu/drm/radeon/sumo_dpm.c +++ b/drivers/gpu/drm/radeon/sumo_dpm.c @@ -1619,6 +1619,8 @@ void sumo_construct_vid_mapping_table(struct radeon_device *rdev, for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { if (table[i].ulSupportedSCLK != 0) { + if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES) + continue; vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = table[i].usVoltageID; vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig index faddae3d6ac2..6f1ac940cbae 100644 --- a/drivers/gpu/drm/vmwgfx/Kconfig +++ b/drivers/gpu/drm/vmwgfx/Kconfig @@ -2,7 +2,7 @@ config DRM_VMWGFX tristate "DRM driver for VMware Virtual GPU" depends on DRM && PCI && MMU - depends on X86 || ARM64 + depends on (X86 && HYPERVISOR_GUEST) || ARM64 select DRM_TTM select DRM_TTM_HELPER select MAPPING_DIRTY_HELPERS diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index 240e7a4bbff1..5faca4fc2fef 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -631,8 +631,6 @@ int xe_guc_enable_communication(struct xe_guc *guc) struct xe_device *xe = guc_to_xe(guc); int err; - guc_enable_irq(guc); - if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) { struct xe_gt *gt = guc_to_gt(guc); struct xe_tile *tile = gt_to_tile(gt); @@ -640,6 +638,8 @@ int xe_guc_enable_communication(struct xe_guc *guc) err = xe_memirq_init_guc(&tile->sriov.vf.memirq, guc); if (err) return err; + } else { + guc_enable_irq(guc); } xe_mmio_rmw32(guc_to_gt(guc), PMINTRMSK, diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index 56a4dabf5a38..4ad670a80a63 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c @@ -431,8 +431,8 @@ static int ocores_init(struct device *dev, struct ocores_i2c *i2c) oc_setreg(i2c, OCI2C_PREHIGH, prescale >> 8); /* Init the device */ - oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK); oc_setreg(i2c, OCI2C_CONTROL, ctrl | OCI2C_CTRL_EN); + oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK); return 0; } diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index 9dca451ed522..6974922e5609 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -107,8 +107,6 @@ struct bnxt_re_gsi_context { struct bnxt_re_sqp_entries *sqp_tbl; }; -#define BNXT_RE_MIN_MSIX 2 -#define BNXT_RE_MAX_MSIX 9 #define BNXT_RE_AEQ_IDX 0 #define BNXT_RE_NQ_IDX 1 #define BNXT_RE_GEN_P5_MAX_VF 64 @@ -168,7 +166,7 @@ struct bnxt_re_dev { struct bnxt_qplib_rcfw rcfw; /* NQ */ - struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX]; + struct bnxt_qplib_nq nq[BNXT_MAX_ROCE_MSIX]; /* Device Resources */ struct bnxt_qplib_dev_attr dev_attr; diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c index 4f13423ecdbd..887b09dd86e7 100644 --- a/drivers/infiniband/hw/mana/mr.c +++ b/drivers/infiniband/hw/mana/mr.c @@ -112,6 +112,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length, "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x", start, iova, length, access_flags); + access_flags &= ~IB_ACCESS_OPTIONAL; if (access_flags & ~VALID_MR_FLAGS) return ERR_PTR(-EINVAL); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 2366c46eebc8..43660c831b22 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -3759,10 +3759,10 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) spin_lock_init(&dev->dm.lock); dev->dm.dev = mdev; return 0; -err: - mlx5r_macsec_dealloc_gids(dev); err_mp: mlx5_ib_cleanup_multiport_master(dev); +err: + mlx5r_macsec_dealloc_gids(dev); return err; } diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index ecc111ed5d86..d3c1f63791a2 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -246,6 +246,7 @@ static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc) MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3); MLX5_SET(mkc, mkc, access_mode_4_2, (ent->rb_key.access_mode >> 2) & 0x7); + MLX5_SET(mkc, mkc, ma_translation_mode, !!ent->rb_key.ats); MLX5_SET(mkc, mkc, translations_octword_size, get_mkc_octo_size(ent->rb_key.access_mode, @@ -641,10 +642,8 @@ static int mlx5_cache_ent_insert(struct mlx5_mkey_cache *cache, new = &((*new)->rb_left); if (cmp < 0) new = &((*new)->rb_right); - if (cmp == 0) { - mutex_unlock(&cache->rb_lock); + if (cmp == 0) return -EEXIST; - } } /* Add new node and rebalance tree. */ @@ -719,6 +718,8 @@ static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, } mr->mmkey.cache_ent = ent; mr->mmkey.type = MLX5_MKEY_MR; + mr->mmkey.rb_key = ent->rb_key; + mr->mmkey.cacheable = true; init_waitqueue_head(&mr->mmkey.wait); return mr; } @@ -1169,7 +1170,6 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd, mr->ibmr.pd = pd; mr->umem = umem; mr->page_shift = order_base_2(page_size); - mr->mmkey.cacheable = true; set_mr_fields(dev, mr, umem->length, access_flags, iova); return mr; diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index a056ea835da5..84be0c3d5699 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -199,17 +199,20 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq, int err; struct mlx5_srq_attr in = {}; __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); + __u32 max_sge_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq) / + sizeof(struct mlx5_wqe_data_seg); if (init_attr->srq_type != IB_SRQT_BASIC && init_attr->srq_type != IB_SRQT_XRC && init_attr->srq_type != IB_SRQT_TM) return -EOPNOTSUPP; - /* Sanity check SRQ size before proceeding */ - if (init_attr->attr.max_wr >= max_srq_wqes) { - mlx5_ib_dbg(dev, "max_wr %d, cap %d\n", - init_attr->attr.max_wr, - max_srq_wqes); + /* Sanity check SRQ and sge size before proceeding */ + if (init_attr->attr.max_wr >= max_srq_wqes || + init_attr->attr.max_sge > max_sge_sz) { + mlx5_ib_dbg(dev, "max_wr %d,wr_cap %d,max_sge %d, sge_cap:%d\n", + init_attr->attr.max_wr, max_srq_wqes, + init_attr->attr.max_sge, max_sge_sz); return -EINVAL; } diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index c6a7fa3054fa..6596a85723c9 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -344,6 +344,19 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp, * receive buffer later. For rmda operations additional * length checks are performed in check_rkey. */ + if ((qp_type(qp) == IB_QPT_GSI) || (qp_type(qp) == IB_QPT_UD)) { + unsigned int payload = payload_size(pkt); + unsigned int recv_buffer_len = 0; + int i; + + for (i = 0; i < qp->resp.wqe->dma.num_sge; i++) + recv_buffer_len += qp->resp.wqe->dma.sge[i].length; + if (payload + 40 > recv_buffer_len) { + rxe_dbg_qp(qp, "The receive buffer is too small for this UD packet.\n"); + return RESPST_ERR_LENGTH; + } + } + if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))) { unsigned int mtu = qp->mtu; diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index c7d4d8ab5a09..de6238ee4379 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -812,7 +812,7 @@ static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe, int i; for (i = 0; i < ibwr->num_sge; i++, sge++) { - memcpy(p, ib_virt_dma_to_page(sge->addr), sge->length); + memcpy(p, ib_virt_dma_to_ptr(sge->addr), sge->length); p += sge->length; } } diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 70f0654c58b6..2b8370ecf42a 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -209,6 +209,7 @@ static const struct xpad_device { { 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 }, { 0x07ff, 0xffff, "Mad Catz GamePad", 0, XTYPE_XBOX360 }, { 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", 0, XTYPE_XBOXONE }, + { 0x0b05, 0x1abb, "ASUS ROG RAIKIRI PRO", 0, XTYPE_XBOXONE }, { 0x0c12, 0x0005, "Intec wireless", 0, XTYPE_XBOX }, { 0x0c12, 0x8801, "Nyko Xbox Controller", 0, XTYPE_XBOX }, { 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX }, diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 4e38229404b4..b4723ea395eb 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1477,15 +1477,46 @@ static void elantech_disconnect(struct psmouse *psmouse) } /* + * Some hw_version 4 models fail to properly activate absolute mode on + * resume without going through disable/enable cycle. + */ +static const struct dmi_system_id elantech_needs_reenable[] = { +#if defined(CONFIG_DMI) && defined(CONFIG_X86) + { + /* Lenovo N24 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "81AF"), + }, + }, +#endif + { } +}; + +/* * Put the touchpad back into absolute mode when reconnecting */ static int elantech_reconnect(struct psmouse *psmouse) { + int err; + psmouse_reset(psmouse); if (elantech_detect(psmouse, 0)) return -1; + if (dmi_check_system(elantech_needs_reenable)) { + err = ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE); + if (err) + psmouse_warn(psmouse, "failed to deactivate mouse on %s: %d\n", + psmouse->ps2dev.serio->phys, err); + + err = ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE); + if (err) + psmouse_warn(psmouse, "failed to reactivate mouse on %s: %d\n", + psmouse->ps2dev.serio->phys, err); + } + if (elantech_set_absolute_mode(psmouse)) { psmouse_err(psmouse, "failed to put touchpad back into absolute mode.\n"); diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h index dfc6c581873b..5b50475ec414 100644 --- a/drivers/input/serio/i8042-acpipnpio.h +++ b/drivers/input/serio/i8042-acpipnpio.h @@ -76,7 +76,7 @@ static inline void i8042_write_command(int val) #define SERIO_QUIRK_PROBE_DEFER BIT(5) #define SERIO_QUIRK_RESET_ALWAYS BIT(6) #define SERIO_QUIRK_RESET_NEVER BIT(7) -#define SERIO_QUIRK_DIECT BIT(8) +#define SERIO_QUIRK_DIRECT BIT(8) #define SERIO_QUIRK_DUMBKBD BIT(9) #define SERIO_QUIRK_NOLOOP BIT(10) #define SERIO_QUIRK_NOTIMEOUT BIT(11) @@ -1332,6 +1332,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, + { + /* + * The Ayaneo Kun is a handheld device where some the buttons + * are handled by an AT keyboard. The keyboard is usually + * detected as raw, but sometimes, usually after a cold boot, + * it is detected as translated. Make sure that the keyboard + * is always in raw mode. + */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"), + DMI_MATCH(DMI_BOARD_NAME, "KUN"), + }, + .driver_data = (void *)(SERIO_QUIRK_DIRECT) + }, { } }; @@ -1655,7 +1669,7 @@ static void __init i8042_check_quirks(void) if (quirks & SERIO_QUIRK_RESET_NEVER) i8042_reset = I8042_RESET_NEVER; } - if (quirks & SERIO_QUIRK_DIECT) + if (quirks & SERIO_QUIRK_DIRECT) i8042_direct = true; if (quirks & SERIO_QUIRK_DUMBKBD) i8042_dumbkbd = true; diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index d2bbb436a77d..4d13db13b9e5 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -1111,6 +1111,16 @@ static const struct of_device_id ads7846_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, ads7846_dt_ids); +static const struct spi_device_id ads7846_spi_ids[] = { + { "tsc2046", 7846 }, + { "ads7843", 7843 }, + { "ads7845", 7845 }, + { "ads7846", 7846 }, + { "ads7873", 7873 }, + { }, +}; +MODULE_DEVICE_TABLE(spi, ads7846_spi_ids); + static const struct ads7846_platform_data *ads7846_get_props(struct device *dev) { struct ads7846_platform_data *pdata; @@ -1386,10 +1396,10 @@ static struct spi_driver ads7846_driver = { }, .probe = ads7846_probe, .remove = ads7846_remove, + .id_table = ads7846_spi_ids, }; module_spi_driver(ads7846_driver); MODULE_DESCRIPTION("ADS7846 TouchScreen Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("spi:ads7846"); diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c index 31ffdc2a93f3..79bdb2b10949 100644 --- a/drivers/input/touchscreen/ili210x.c +++ b/drivers/input/touchscreen/ili210x.c @@ -261,8 +261,8 @@ static int ili251x_read_touch_data(struct i2c_client *client, u8 *data) if (!error && data[0] == 2) { error = i2c_master_recv(client, data + ILI251X_DATA_SIZE1, ILI251X_DATA_SIZE2); - if (error >= 0 && error != ILI251X_DATA_SIZE2) - error = -EIO; + if (error >= 0) + error = error == ILI251X_DATA_SIZE2 ? 0 : -EIO; } return error; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 3c3fcce4acd4..d19aabf5d4fb 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -5773,6 +5773,9 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev, if (real_dev) { ret = ethtool_get_ts_info_by_layer(real_dev, info); } else { + info->phc_index = -1; + info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; /* Check if all slaves support software tx timestamping */ rcu_read_lock(); bond_for_each_slave_rcu(bond, slave, iter) { diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index 4b7e08e8ed56..c116d3255207 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -1618,11 +1618,20 @@ static int mcp251xfd_open(struct net_device *ndev) clear_bit(MCP251XFD_FLAGS_DOWN, priv->flags); can_rx_offload_enable(&priv->offload); + priv->wq = alloc_ordered_workqueue("%s-mcp251xfd_wq", + WQ_FREEZABLE | WQ_MEM_RECLAIM, + dev_name(&spi->dev)); + if (!priv->wq) { + err = -ENOMEM; + goto out_can_rx_offload_disable; + } + INIT_WORK(&priv->tx_work, mcp251xfd_tx_obj_write_sync); + err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq, IRQF_SHARED | IRQF_ONESHOT, dev_name(&spi->dev), priv); if (err) - goto out_can_rx_offload_disable; + goto out_destroy_workqueue; err = mcp251xfd_chip_interrupts_enable(priv); if (err) @@ -1634,6 +1643,8 @@ static int mcp251xfd_open(struct net_device *ndev) out_free_irq: free_irq(spi->irq, priv); + out_destroy_workqueue: + destroy_workqueue(priv->wq); out_can_rx_offload_disable: can_rx_offload_disable(&priv->offload); set_bit(MCP251XFD_FLAGS_DOWN, priv->flags); @@ -1661,6 +1672,7 @@ static int mcp251xfd_stop(struct net_device *ndev) hrtimer_cancel(&priv->tx_irq_timer); mcp251xfd_chip_interrupts_disable(priv); free_irq(ndev->irq, priv); + destroy_workqueue(priv->wq); can_rx_offload_disable(&priv->offload); mcp251xfd_timestamp_stop(priv); mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c index 160528d3cc26..b1de8052a45c 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c @@ -131,6 +131,39 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv, tx_obj->xfer[0].len = len; } +static void mcp251xfd_tx_failure_drop(const struct mcp251xfd_priv *priv, + struct mcp251xfd_tx_ring *tx_ring, + int err) +{ + struct net_device *ndev = priv->ndev; + struct net_device_stats *stats = &ndev->stats; + unsigned int frame_len = 0; + u8 tx_head; + + tx_ring->head--; + stats->tx_dropped++; + tx_head = mcp251xfd_get_tx_head(tx_ring); + can_free_echo_skb(ndev, tx_head, &frame_len); + netdev_completed_queue(ndev, 1, frame_len); + netif_wake_queue(ndev); + + if (net_ratelimit()) + netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err); +} + +void mcp251xfd_tx_obj_write_sync(struct work_struct *work) +{ + struct mcp251xfd_priv *priv = container_of(work, struct mcp251xfd_priv, + tx_work); + struct mcp251xfd_tx_obj *tx_obj = priv->tx_work_obj; + struct mcp251xfd_tx_ring *tx_ring = priv->tx; + int err; + + err = spi_sync(priv->spi, &tx_obj->msg); + if (err) + mcp251xfd_tx_failure_drop(priv, tx_ring, err); +} + static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv, struct mcp251xfd_tx_obj *tx_obj) { @@ -162,6 +195,11 @@ static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv, return false; } +static bool mcp251xfd_work_busy(struct work_struct *work) +{ + return work_busy(work); +} + netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb, struct net_device *ndev) { @@ -175,7 +213,8 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb, if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; - if (mcp251xfd_tx_busy(priv, tx_ring)) + if (mcp251xfd_tx_busy(priv, tx_ring) || + mcp251xfd_work_busy(&priv->tx_work)) return NETDEV_TX_BUSY; tx_obj = mcp251xfd_get_tx_obj_next(tx_ring); @@ -193,13 +232,13 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb, netdev_sent_queue(priv->ndev, frame_len); err = mcp251xfd_tx_obj_write(priv, tx_obj); - if (err) - goto out_err; - - return NETDEV_TX_OK; - - out_err: - netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err); + if (err == -EBUSY) { + netif_stop_queue(ndev); + priv->tx_work_obj = tx_obj; + queue_work(priv->wq, &priv->tx_work); + } else if (err) { + mcp251xfd_tx_failure_drop(priv, tx_ring, err); + } return NETDEV_TX_OK; } diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h index 24510b3b8020..b35bfebd23f2 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h @@ -633,6 +633,10 @@ struct mcp251xfd_priv { struct mcp251xfd_rx_ring *rx[MCP251XFD_FIFO_RX_NUM]; struct mcp251xfd_tx_ring tx[MCP251XFD_FIFO_TX_NUM]; + struct workqueue_struct *wq; + struct work_struct tx_work; + struct mcp251xfd_tx_obj *tx_work_obj; + DECLARE_BITMAP(flags, __MCP251XFD_FLAGS_SIZE__); u8 rx_ring_num; @@ -952,6 +956,7 @@ void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv, void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv); void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv); +void mcp251xfd_tx_obj_write_sync(struct work_struct *work); netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb, struct net_device *ndev); diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index 6c70eb1ceb8b..7faec0eef1ca 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -303,7 +303,7 @@ int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd, } usb_free_urb(urb); - return 0; + return err; } int kvaser_usb_can_rx_over_error(struct net_device *netdev) diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index f8ad7833f5d9..425e20daf1e9 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -355,10 +355,8 @@ int ksz9477_reset_switch(struct ksz_device *dev) SPI_AUTO_EDGE_DETECTION, 0); /* default configuration */ - ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8); - data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING | - SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE; - ksz_write8(dev, REG_SW_LUE_CTRL_1, data8); + ksz_write8(dev, REG_SW_LUE_CTRL_1, + SW_AGING_ENABLE | SW_LINK_AUTO_AGING | SW_SRC_ADDR_FILTER); /* disable interrupts */ ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK); @@ -429,6 +427,57 @@ void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze) mutex_unlock(&p->mib.cnt_mutex); } +int ksz9477_errata_monitor(struct ksz_device *dev, int port, + u64 tx_late_col) +{ + u32 pmavbc; + u8 status; + u16 pqm; + int ret; + + ret = ksz_pread8(dev, port, REG_PORT_STATUS_0, &status); + if (ret) + return ret; + if (!(FIELD_GET(PORT_INTF_SPEED_MASK, status) == PORT_INTF_SPEED_NONE) && + !(status & PORT_INTF_FULL_DUPLEX)) { + /* Errata DS80000754 recommends monitoring potential faults in + * half-duplex mode. The switch might not be able to communicate anymore + * in these states. + * If you see this message, please read the errata-sheet for more information: + * https://ww1.microchip.com/downloads/aemDocuments/documents/UNG/ProductDocuments/Errata/KSZ9477S-Errata-DS80000754.pdf + * To workaround this issue, half-duplex mode should be avoided. + * A software reset could be implemented to recover from this state. + */ + dev_warn_once(dev->dev, + "Half-duplex detected on port %d, transmission halt may occur\n", + port); + if (tx_late_col != 0) { + /* Transmission halt with late collisions */ + dev_crit_once(dev->dev, + "TX late collisions detected, transmission may be halted on port %d\n", + port); + } + ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &status); + if (ret) + return ret; + if (status & SW_VLAN_ENABLE) { + ret = ksz_pread16(dev, port, REG_PORT_QM_TX_CNT_0__4, &pqm); + if (ret) + return ret; + ret = ksz_read32(dev, REG_PMAVBC, &pmavbc); + if (ret) + return ret; + if ((FIELD_GET(PMAVBC_MASK, pmavbc) <= PMAVBC_MIN) || + (FIELD_GET(PORT_QM_TX_CNT_M, pqm) >= PORT_QM_TX_CNT_MAX)) { + /* Transmission halt with Half-Duplex and VLAN */ + dev_crit_once(dev->dev, + "resources out of limits, transmission may be halted\n"); + } + } + } + return ret; +} + void ksz9477_port_init_cnt(struct ksz_device *dev, int port) { struct ksz_port_mib *mib = &dev->ports[port].mib; @@ -1299,6 +1348,10 @@ int ksz9477_setup(struct dsa_switch *ds) /* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */ ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true); + /* Use collision based back pressure mode. */ + ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_BACK_PRESSURE, + SW_BACK_PRESSURE_COLLISION); + /* Now we can configure default MTU value */ ret = regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2, REG_SW_MTU_MASK, VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h index ce1e656b800b..239a281da10b 100644 --- a/drivers/net/dsa/microchip/ksz9477.h +++ b/drivers/net/dsa/microchip/ksz9477.h @@ -36,6 +36,8 @@ int ksz9477_port_mirror_add(struct ksz_device *dev, int port, bool ingress, struct netlink_ext_ack *extack); void ksz9477_port_mirror_del(struct ksz_device *dev, int port, struct dsa_mall_mirror_tc_entry *mirror); +int ksz9477_errata_monitor(struct ksz_device *dev, int port, + u64 tx_late_col); void ksz9477_get_caps(struct ksz_device *dev, int port, struct phylink_config *config); int ksz9477_fdb_dump(struct ksz_device *dev, int port, diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h index f3a205ee483f..d5354c600ea1 100644 --- a/drivers/net/dsa/microchip/ksz9477_reg.h +++ b/drivers/net/dsa/microchip/ksz9477_reg.h @@ -247,6 +247,7 @@ #define REG_SW_MAC_CTRL_1 0x0331 #define SW_BACK_PRESSURE BIT(5) +#define SW_BACK_PRESSURE_COLLISION 0 #define FAIR_FLOW_CTRL BIT(4) #define NO_EXC_COLLISION_DROP BIT(3) #define SW_JUMBO_PACKET BIT(2) @@ -842,8 +843,8 @@ #define REG_PORT_STATUS_0 0x0030 -#define PORT_INTF_SPEED_M 0x3 -#define PORT_INTF_SPEED_S 3 +#define PORT_INTF_SPEED_MASK GENMASK(4, 3) +#define PORT_INTF_SPEED_NONE GENMASK(1, 0) #define PORT_INTF_FULL_DUPLEX BIT(2) #define PORT_TX_FLOW_CTRL BIT(1) #define PORT_RX_FLOW_CTRL BIT(0) @@ -1167,6 +1168,11 @@ #define PORT_RMII_CLK_SEL BIT(7) #define PORT_MII_SEL_EDGE BIT(5) +#define REG_PMAVBC 0x03AC + +#define PMAVBC_MASK GENMASK(26, 16) +#define PMAVBC_MIN 0x580 + /* 4 - MAC */ #define REG_PORT_MAC_CTRL_0 0x0400 @@ -1494,6 +1500,7 @@ #define PORT_QM_TX_CNT_USED_S 0 #define PORT_QM_TX_CNT_M (BIT(11) - 1) +#define PORT_QM_TX_CNT_MAX 0x200 #define REG_PORT_QM_TX_CNT_1__4 0x0A14 diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index debb554b60ed..baa1eeb9a1b0 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -1382,6 +1382,7 @@ const struct ksz_chip_data ksz_switch_chips[] = { .tc_cbs_supported = true, .ops = &ksz9477_dev_ops, .phylink_mac_ops = &ksz9477_phylink_mac_ops, + .phy_errata_9477 = true, .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, @@ -1416,6 +1417,7 @@ const struct ksz_chip_data ksz_switch_chips[] = { .num_ipms = 8, .ops = &ksz9477_dev_ops, .phylink_mac_ops = &ksz9477_phylink_mac_ops, + .phy_errata_9477 = true, .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, @@ -1450,6 +1452,7 @@ const struct ksz_chip_data ksz_switch_chips[] = { .num_ipms = 8, .ops = &ksz9477_dev_ops, .phylink_mac_ops = &ksz9477_phylink_mac_ops, + .phy_errata_9477 = true, .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, @@ -1540,6 +1543,7 @@ const struct ksz_chip_data ksz_switch_chips[] = { .tc_cbs_supported = true, .ops = &ksz9477_dev_ops, .phylink_mac_ops = &ksz9477_phylink_mac_ops, + .phy_errata_9477 = true, .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, @@ -1820,6 +1824,7 @@ void ksz_r_mib_stats64(struct ksz_device *dev, int port) struct rtnl_link_stats64 *stats; struct ksz_stats_raw *raw; struct ksz_port_mib *mib; + int ret; mib = &dev->ports[port].mib; stats = &mib->stats64; @@ -1861,6 +1866,12 @@ void ksz_r_mib_stats64(struct ksz_device *dev, int port) pstats->rx_pause_frames = raw->rx_pause; spin_unlock(&mib->stats64_lock); + + if (dev->info->phy_errata_9477) { + ret = ksz9477_errata_monitor(dev, port, raw->tx_late_col); + if (ret) + dev_err(dev->dev, "Failed to monitor transmission halt\n"); + } } void ksz88xx_r_mib_stats64(struct ksz_device *dev, int port) @@ -2185,7 +2196,7 @@ static void ksz_irq_bus_sync_unlock(struct irq_data *d) struct ksz_device *dev = kirq->dev; int ret; - ret = ksz_write32(dev, kirq->reg_mask, kirq->masked); + ret = ksz_write8(dev, kirq->reg_mask, kirq->masked); if (ret) dev_err(dev->dev, "failed to change IRQ mask\n"); diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index c784fd23a993..ee7db46e469d 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -66,6 +66,7 @@ struct ksz_chip_data { bool tc_cbs_supported; const struct ksz_dev_ops *ops; const struct phylink_mac_ops *phylink_mac_ops; + bool phy_errata_9477; bool ksz87xx_eee_link_erratum; const struct ksz_mib_names *mib_names; int mib_cnt; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 5e9a93bdb518..23ebeb143987 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2482,6 +2482,18 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) (tx_pool->consumer_index + 1) % tx_pool->num_buffers; tx_buff = &tx_pool->tx_buff[bufidx]; + + /* Sanity checks on our free map to make sure it points to an index + * that is not being occupied by another skb. If skb memory is + * not freed then we see congestion control kick in and halt tx. + */ + if (unlikely(tx_buff->skb)) { + dev_warn_ratelimited(dev, "TX free map points to untracked skb (%s %d idx=%d)\n", + skb_is_gso(skb) ? "tso_pool" : "tx_pool", + queue_num, bufidx); + dev_kfree_skb_any(tx_buff->skb); + } + tx_buff->skb = skb; tx_buff->index = bufidx; tx_buff->pool_index = queue_num; @@ -4061,6 +4073,12 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free) adapter->num_active_tx_scrqs = 0; } + /* Clean any remaining outstanding SKBs + * we freed the irq so we won't be hearing + * from them + */ + clean_tx_pools(adapter); + if (adapter->rx_scrq) { for (i = 0; i < adapter->num_active_rx_scrqs; i++) { if (!adapter->rx_scrq[i]) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 643384c9fbf4..080efb7473aa 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -4139,7 +4139,7 @@ bool ice_is_wol_supported(struct ice_hw *hw) int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked) { struct ice_pf *pf = vsi->back; - int err = 0, timeout = 50; + int i, err = 0, timeout = 50; if (!new_rx && !new_tx) return -EINVAL; @@ -4165,6 +4165,14 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked) ice_vsi_close(vsi); ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); + + ice_for_each_traffic_class(i) { + if (vsi->tc_cfg.ena_tc & BIT(i)) + netdev_set_tc_queue(vsi->netdev, + vsi->tc_cfg.tc_info[i].netdev_tc, + vsi->tc_cfg.tc_info[i].qcount_tx, + vsi->tc_cfg.tc_info[i].qoffset); + } ice_pf_dcb_recfg(pf, locked); ice_vsi_open(vsi); done: diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 671368d2c77e..9adf4301c9b1 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -6907,6 +6907,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, /* 9704 == 9728 - 20 and rounding to 8 */ dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; device_set_node(&dev->dev, port_fwnode); + dev->dev_port = port->id; port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops; port->pcs_gmac.neg_mode = true; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index a85ac039d779..87d5776e3b88 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -648,14 +648,14 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for } else if (lvl == NIX_TXSCH_LVL_TL4) { parent = schq_list[NIX_TXSCH_LVL_TL3][prio]; req->reg[0] = NIX_AF_TL4X_PARENT(schq); - req->regval[0] = parent << 16; + req->regval[0] = (u64)parent << 16; req->num_regs++; req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq); req->regval[1] = dwrr_val; } else if (lvl == NIX_TXSCH_LVL_TL3) { parent = schq_list[NIX_TXSCH_LVL_TL2][prio]; req->reg[0] = NIX_AF_TL3X_PARENT(schq); - req->regval[0] = parent << 16; + req->regval[0] = (u64)parent << 16; req->num_regs++; req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq); req->regval[1] = dwrr_val; @@ -670,11 +670,11 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for } else if (lvl == NIX_TXSCH_LVL_TL2) { parent = schq_list[NIX_TXSCH_LVL_TL1][prio]; req->reg[0] = NIX_AF_TL2X_PARENT(schq); - req->regval[0] = parent << 16; + req->regval[0] = (u64)parent << 16; req->num_regs++; req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq); - req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val; + req->regval[1] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 | dwrr_val; if (lvl == hw->txschq_link_cfg_lvl) { req->num_regs++; @@ -698,7 +698,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for req->num_regs++; req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq); - req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1); + req->regval[1] = hw->txschq_aggr_lvl_rr_prio << 1; req->num_regs++; req->reg[2] = NIX_AF_TL1X_CIR(schq); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h index 45a32e4b49d1..e3aee6e36215 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h @@ -139,33 +139,34 @@ #define NIX_LF_CINTX_ENA_W1C(a) (NIX_LFBASE | 0xD50 | (a) << 12) /* NIX AF transmit scheduler registers */ -#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16) -#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16) -#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16) -#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16) -#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16) -#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16) -#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (a) << 16) -#define NIX_AF_TL2X_CIR(a) (0xE20 | (a) << 16) -#define NIX_AF_TL2X_PIR(a) (0xE30 | (a) << 16) -#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16) -#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16) -#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (a) << 16) -#define NIX_AF_TL3X_CIR(a) (0x1020 | (a) << 16) -#define NIX_AF_TL3X_PIR(a) (0x1030 | (a) << 16) -#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (a) << 16) -#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16) -#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16) -#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (a) << 16) -#define NIX_AF_TL4X_CIR(a) (0x1220 | (a) << 16) -#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16) -#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (a) << 16) -#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16) -#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (a) << 16) -#define NIX_AF_MDQX_CIR(a) (0x1420 | (a) << 16) -#define NIX_AF_MDQX_PIR(a) (0x1430 | (a) << 16) -#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16) -#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3) +#define NIX_AF_SMQX_CFG(a) (0x700 | (u64)(a) << 16) +#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (u64)(a) << 16) +#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (u64)(a) << 16) +#define NIX_AF_TL1X_CIR(a) (0xC20 | (u64)(a) << 16) +#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (u64)(a) << 16) +#define NIX_AF_TL2X_PARENT(a) (0xE88 | (u64)(a) << 16) +#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (u64)(a) << 16) +#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (u64)(a) << 16) +#define NIX_AF_TL2X_CIR(a) (0xE20 | (u64)(a) << 16) +#define NIX_AF_TL2X_PIR(a) (0xE30 | (u64)(a) << 16) +#define NIX_AF_TL3X_PARENT(a) (0x1088 | (u64)(a) << 16) +#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (u64)(a) << 16) +#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (u64)(a) << 16) +#define NIX_AF_TL3X_CIR(a) (0x1020 | (u64)(a) << 16) +#define NIX_AF_TL3X_PIR(a) (0x1030 | (u64)(a) << 16) +#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (u64)(a) << 16) +#define NIX_AF_TL4X_PARENT(a) (0x1288 | (u64)(a) << 16) +#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (u64)(a) << 16) +#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (u64)(a) << 16) +#define NIX_AF_TL4X_CIR(a) (0x1220 | (u64)(a) << 16) +#define NIX_AF_TL4X_PIR(a) (0x1230 | (u64)(a) << 16) +#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (u64)(a) << 16) +#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (u64)(a) << 16) +#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (u64)(a) << 16) +#define NIX_AF_MDQX_CIR(a) (0x1420 | (u64)(a) << 16) +#define NIX_AF_MDQX_PIR(a) (0x1430 | (u64)(a) << 16) +#define NIX_AF_MDQX_PARENT(a) (0x1480 | (u64)(a) << 16) +#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (u64)(a) << 16 | (b) << 3) /* LMT LF registers */ #define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 929b4eac25d9..3eb85949677a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -513,7 +513,7 @@ process_cqe: static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll) { - struct dim_sample dim_sample; + struct dim_sample dim_sample = { 0 }; u64 rx_frames, rx_bytes; u64 tx_frames, tx_bytes; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c index edac008099c0..0f844c14485a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c @@ -153,7 +153,6 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf, num_regs++; otx2_config_sched_shaping(pfvf, node, cfg, &num_regs); - } else if (level == NIX_TXSCH_LVL_TL4) { otx2_config_sched_shaping(pfvf, node, cfg, &num_regs); } else if (level == NIX_TXSCH_LVL_TL3) { @@ -176,7 +175,7 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf, /* check if node is root */ if (node->qid == OTX2_QOS_QID_INNER && !node->parent) { cfg->reg[num_regs] = NIX_AF_TL2X_SCHEDULE(node->schq); - cfg->regval[num_regs] = TXSCH_TL1_DFLT_RR_PRIO << 24 | + cfg->regval[num_regs] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 | mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); num_regs++; diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index e794162a91d0..0320dabd1380 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1779,18 +1779,25 @@ static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci, return -EBUSY; } -static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci) +static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci, + bool pci_reset_sbr_supported) { struct pci_dev *pdev = mlxsw_pci->pdev; char mrsr_pl[MLXSW_REG_MRSR_LEN]; int err; + if (!pci_reset_sbr_supported) { + pci_dbg(pdev, "Performing PCI hot reset instead of \"all reset\"\n"); + goto sbr; + } + mlxsw_reg_mrsr_pack(mrsr_pl, MLXSW_REG_MRSR_COMMAND_RESET_AT_PCI_DISABLE); err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl); if (err) return err; +sbr: device_lock_assert(&pdev->dev); pci_cfg_access_lock(pdev); @@ -1818,6 +1825,7 @@ static int mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id) { struct pci_dev *pdev = mlxsw_pci->pdev; + bool pci_reset_sbr_supported = false; char mcam_pl[MLXSW_REG_MCAM_LEN]; bool pci_reset_supported = false; u32 sys_status; @@ -1837,13 +1845,17 @@ mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id) mlxsw_reg_mcam_pack(mcam_pl, MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES); err = mlxsw_reg_query(mlxsw_pci->core, MLXSW_REG(mcam), mcam_pl); - if (!err) + if (!err) { mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET, &pci_reset_supported); + mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET_SBR, + &pci_reset_sbr_supported); + } if (pci_reset_supported) { pci_dbg(pdev, "Starting PCI reset flow\n"); - err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci); + err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci, + pci_reset_sbr_supported); } else { pci_dbg(pdev, "Starting software reset flow\n"); err = mlxsw_pci_reset_sw(mlxsw_pci); diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 8adf86a6f5cc..3bb89045eaf5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -10671,6 +10671,8 @@ enum mlxsw_reg_mcam_mng_feature_cap_mask_bits { MLXSW_REG_MCAM_MCIA_128B = 34, /* If set, MRSR.command=6 is supported. */ MLXSW_REG_MCAM_PCI_RESET = 48, + /* If set, MRSR.command=6 is supported with Secondary Bus Reset. */ + MLXSW_REG_MCAM_PCI_RESET_SBR = 67, }; #define MLXSW_REG_BYTES_PER_DWORD 0x4 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 1b9ed393fbd4..2c0cfa79d138 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -1611,8 +1611,8 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core, int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core, unsigned int sb_index) { + u16 local_port, local_port_1, first_local_port, last_local_port; struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); - u16 local_port, local_port_1, last_local_port; struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx; u8 masked_count, current_page = 0; unsigned long cb_priv = 0; @@ -1632,6 +1632,7 @@ next_batch: masked_count = 0; mlxsw_reg_sbsr_pack(sbsr_pl, false); mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page); + first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE; last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE + MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1; @@ -1649,9 +1650,12 @@ next_batch: if (local_port != MLXSW_PORT_CPU_PORT) { /* Ingress quotas are not supported for the CPU port */ mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, - local_port, 1); + local_port - first_local_port, + 1); } - mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); + mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, + local_port - first_local_port, + 1); for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) { err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i, &bulk_list); @@ -1688,7 +1692,7 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core, unsigned int sb_index) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); - u16 local_port, last_local_port; + u16 local_port, first_local_port, last_local_port; LIST_HEAD(bulk_list); unsigned int masked_count; u8 current_page = 0; @@ -1706,6 +1710,7 @@ next_batch: masked_count = 0; mlxsw_reg_sbsr_pack(sbsr_pl, true); mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page); + first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE; last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE + MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1; @@ -1723,9 +1728,12 @@ next_batch: if (local_port != MLXSW_PORT_CPU_PORT) { /* Ingress quotas are not supported for the CPU port */ mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, - local_port, 1); + local_port - first_local_port, + 1); } - mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); + mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, + local_port - first_local_port, + 1); for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) { err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i, &bulk_list); diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 3f094a8a4e97..91f10910ea44 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -2838,6 +2838,8 @@ static int add_adev(struct gdma_dev *gd) if (ret) goto init_fail; + /* madev is owned by the auxiliary device */ + madev = NULL; ret = auxiliary_device_add(adev); if (ret) goto add_fail; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h index 92f16b6c5662..c647033f3ad2 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -375,7 +375,9 @@ typedef void (*ionic_cq_done_cb)(void *done_arg); unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do, ionic_cq_cb cb, ionic_cq_done_cb done_cb, void *done_arg); -unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do); +unsigned int ionic_tx_cq_service(struct ionic_cq *cq, + unsigned int work_to_do, + bool in_napi); int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev, struct ionic_queue *q, unsigned int index, const char *name, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 38ce35462737..aa0cc31dfe6e 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -1203,7 +1203,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget) ionic_rx_service, NULL, NULL); if (lif->hwstamp_txq) - tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget); + tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget, !!budget); work_done = max(max(n_work, a_work), max(rx_work, tx_work)); if (work_done < budget && napi_complete_done(napi, work_done)) { diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 5bf13a5d411c..fc79baad4561 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -23,7 +23,8 @@ static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q, static void ionic_tx_clean(struct ionic_queue *q, struct ionic_tx_desc_info *desc_info, - struct ionic_txq_comp *comp); + struct ionic_txq_comp *comp, + bool in_napi); static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell) { @@ -480,6 +481,20 @@ int ionic_xdp_xmit(struct net_device *netdev, int n, return nxmit; } +static void ionic_xdp_rx_put_bufs(struct ionic_queue *q, + struct ionic_buf_info *buf_info, + int nbufs) +{ + int i; + + for (i = 0; i < nbufs; i++) { + dma_unmap_page(q->dev, buf_info->dma_addr, + IONIC_PAGE_SIZE, DMA_FROM_DEVICE); + buf_info->page = NULL; + buf_info++; + } +} + static bool ionic_run_xdp(struct ionic_rx_stats *stats, struct net_device *netdev, struct bpf_prog *xdp_prog, @@ -493,6 +508,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, struct netdev_queue *nq; struct xdp_frame *xdpf; int remain_len; + int nbufs = 1; int frag_len; int err = 0; @@ -542,6 +558,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, if (page_is_pfmemalloc(bi->page)) xdp_buff_set_frag_pfmemalloc(&xdp_buf); } while (remain_len > 0); + nbufs += sinfo->nr_frags; } xdp_action = bpf_prog_run_xdp(xdp_prog, &xdp_buf); @@ -574,9 +591,6 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, goto out_xdp_abort; } - dma_unmap_page(rxq->dev, buf_info->dma_addr, - IONIC_PAGE_SIZE, DMA_FROM_DEVICE); - err = ionic_xdp_post_frame(txq, xdpf, XDP_TX, buf_info->page, buf_info->page_offset, @@ -586,23 +600,19 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err); goto out_xdp_abort; } - buf_info->page = NULL; + ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs); stats->xdp_tx++; /* the Tx completion will free the buffers */ break; case XDP_REDIRECT: - /* unmap the pages before handing them to a different device */ - dma_unmap_page(rxq->dev, buf_info->dma_addr, - IONIC_PAGE_SIZE, DMA_FROM_DEVICE); - err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog); if (unlikely(err)) { netdev_dbg(netdev, "xdp_do_redirect err %d\n", err); goto out_xdp_abort; } - buf_info->page = NULL; + ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs); rxq->xdp_flush = true; stats->xdp_redirect++; break; @@ -932,7 +942,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget) u32 work_done = 0; u32 flags = 0; - work_done = ionic_tx_cq_service(cq, budget); + work_done = ionic_tx_cq_service(cq, budget, !!budget); if (unlikely(!budget)) return budget; @@ -1015,7 +1025,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) txqcq = lif->txqcqs[qi]; txcq = &lif->txqcqs[qi]->cq; - tx_work_done = ionic_tx_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT); + tx_work_done = ionic_tx_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT, !!budget); if (unlikely(!budget)) return budget; @@ -1148,7 +1158,8 @@ static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q, static void ionic_tx_clean(struct ionic_queue *q, struct ionic_tx_desc_info *desc_info, - struct ionic_txq_comp *comp) + struct ionic_txq_comp *comp, + bool in_napi) { struct ionic_tx_stats *stats = q_to_tx_stats(q); struct ionic_qcq *qcq = q_to_qcq(q); @@ -1200,11 +1211,13 @@ static void ionic_tx_clean(struct ionic_queue *q, desc_info->bytes = skb->len; stats->clean++; - napi_consume_skb(skb, 1); + napi_consume_skb(skb, likely(in_napi) ? 1 : 0); } static bool ionic_tx_service(struct ionic_cq *cq, - unsigned int *total_pkts, unsigned int *total_bytes) + unsigned int *total_pkts, + unsigned int *total_bytes, + bool in_napi) { struct ionic_tx_desc_info *desc_info; struct ionic_queue *q = cq->bound_q; @@ -1226,7 +1239,7 @@ static bool ionic_tx_service(struct ionic_cq *cq, desc_info->bytes = 0; index = q->tail_idx; q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); - ionic_tx_clean(q, desc_info, comp); + ionic_tx_clean(q, desc_info, comp, in_napi); if (desc_info->skb) { pkts++; bytes += desc_info->bytes; @@ -1240,7 +1253,9 @@ static bool ionic_tx_service(struct ionic_cq *cq, return true; } -unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do) +unsigned int ionic_tx_cq_service(struct ionic_cq *cq, + unsigned int work_to_do, + bool in_napi) { unsigned int work_done = 0; unsigned int bytes = 0; @@ -1249,7 +1264,7 @@ unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do) if (work_to_do == 0) return 0; - while (ionic_tx_service(cq, &pkts, &bytes)) { + while (ionic_tx_service(cq, &pkts, &bytes, in_napi)) { if (cq->tail_idx == cq->num_descs - 1) cq->done_color = !cq->done_color; cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); @@ -1275,7 +1290,7 @@ void ionic_tx_flush(struct ionic_cq *cq) { u32 work_done; - work_done = ionic_tx_cq_service(cq, cq->num_descs); + work_done = ionic_tx_cq_service(cq, cq->num_descs, false); if (work_done) ionic_intr_credits(cq->idev->intr_ctrl, cq->bound_intr->index, work_done, IONIC_INTR_CRED_RESET_COALESCE); @@ -1292,7 +1307,7 @@ void ionic_tx_empty(struct ionic_queue *q) desc_info = &q->tx_info[q->tail_idx]; desc_info->bytes = 0; q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); - ionic_tx_clean(q, desc_info, NULL); + ionic_tx_clean(q, desc_info, NULL, false); if (desc_info->skb) { pkts++; bytes += desc_info->bytes; diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 5aada7cf3da7..ebafedde0ab7 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -5607,6 +5607,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = { { PHY_ID_KSZ8081, MICREL_PHY_ID_MASK }, { PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK }, { PHY_ID_KSZ886X, MICREL_PHY_ID_MASK }, + { PHY_ID_KSZ9477, MICREL_PHY_ID_MASK }, { PHY_ID_LAN8814, MICREL_PHY_ID_MASK }, { PHY_ID_LAN8804, MICREL_PHY_ID_MASK }, { PHY_ID_LAN8841, MICREL_PHY_ID_MASK }, diff --git a/drivers/net/pse-pd/Kconfig b/drivers/net/pse-pd/Kconfig index 577ea904b3d9..7fab916a7f46 100644 --- a/drivers/net/pse-pd/Kconfig +++ b/drivers/net/pse-pd/Kconfig @@ -23,6 +23,7 @@ config PSE_REGULATOR config PSE_PD692X0 tristate "PD692X0 PSE controller" depends on I2C + select FW_LOADER select FW_UPLOAD help This module provides support for PD692x0 regulator based Ethernet diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index c2fb736f78b2..b034ef8a73ea 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -326,7 +326,8 @@ static void ax88179_status(struct usbnet *dev, struct urb *urb) if (netif_carrier_ok(dev->net) != link) { usbnet_link_change(dev, link, 1); - netdev_info(dev->net, "ax88179 - Link status is: %d\n", link); + if (!link) + netdev_info(dev->net, "ax88179 - Link status is: 0\n"); } } @@ -1542,6 +1543,7 @@ static int ax88179_link_reset(struct usbnet *dev) GMII_PHY_PHYSR, 2, &tmp16); if (!(tmp16 & GMII_PHY_PHYSR_LINK)) { + netdev_info(dev->net, "ax88179 - Link status is: 0\n"); return 0; } else if (GMII_PHY_PHYSR_GIGA == (tmp16 & GMII_PHY_PHYSR_SMASK)) { mode |= AX_MEDIUM_GIGAMODE | AX_MEDIUM_EN_125MHZ; @@ -1579,6 +1581,8 @@ static int ax88179_link_reset(struct usbnet *dev) netif_carrier_on(dev->net); + netdev_info(dev->net, "ax88179 - Link status is: 1\n"); + return 0; } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 663e46348ce3..386d62769ded 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1372,6 +1372,8 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x3000, 0)}, /* Telit FN912 series */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x3001, 0)}, /* Telit FN912 series */ {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */ {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */ {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index 567cb3faab70..ba59e92ab941 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -2339,7 +2339,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct ip_tunnel_key *pkey; struct ip_tunnel_key key; struct vxlan_dev *vxlan = netdev_priv(dev); - const struct iphdr *old_iph = ip_hdr(skb); + const struct iphdr *old_iph; struct vxlan_metadata _md; struct vxlan_metadata *md = &_md; unsigned int pkt_len = skb->len; @@ -2353,8 +2353,15 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, bool use_cache; bool udp_sum = false; bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev)); + bool no_eth_encap; __be32 vni = 0; + no_eth_encap = flags & VXLAN_F_GPE && skb->protocol != htons(ETH_P_TEB); + if (!skb_vlan_inet_prepare(skb, no_eth_encap)) + goto drop; + + old_iph = ip_hdr(skb); + info = skb_tunnel_info(skb); use_cache = ip_tunnel_dst_cache_usable(skb, info); diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index a939fd89a7f5..92fc2d456c2c 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -1566,13 +1566,6 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif, cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates, wlvif->band)); - if (!cmd->supported_rates) { - wl1271_debug(DEBUG_CMD, - "peer has no supported rates yet, configuring basic rates: 0x%x", - wlvif->basic_rate_set); - cmd->supported_rates = cpu_to_le32(wlvif->basic_rate_set); - } - wl1271_debug(DEBUG_CMD, "new peer rates=0x%x queues=0x%x", cmd->supported_rates, sta->uapsd_queues); diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index ef12169f8044..492cd7aef44f 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -5139,19 +5139,23 @@ static int wl12xx_update_sta_state(struct wl1271 *wl, /* Add station (AP mode) */ if (is_ap && - old_state == IEEE80211_STA_NOTEXIST && - new_state == IEEE80211_STA_NONE) { + old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_ASSOC) { ret = wl12xx_sta_add(wl, wlvif, sta); if (ret) return ret; + wl_sta->fw_added = true; + wlcore_update_inconn_sta(wl, wlvif, wl_sta, true); } /* Remove station (AP mode) */ if (is_ap && - old_state == IEEE80211_STA_NONE && - new_state == IEEE80211_STA_NOTEXIST) { + old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTH) { + wl_sta->fw_added = false; + /* must not fail */ wl12xx_sta_remove(wl, wlvif, sta); @@ -5165,11 +5169,6 @@ static int wl12xx_update_sta_state(struct wl1271 *wl, if (ret < 0) return ret; - /* reconfigure rates */ - ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid); - if (ret < 0) - return ret; - ret = wl1271_acx_set_ht_capabilities(wl, &sta->deflink.ht_cap, true, wl_sta->hlid); diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c index 7bd3ce2f0804..464587d16ab2 100644 --- a/drivers/net/wireless/ti/wlcore/tx.c +++ b/drivers/net/wireless/ti/wlcore/tx.c @@ -140,11 +140,8 @@ EXPORT_SYMBOL(wl12xx_is_dummy_packet); static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct sk_buff *skb, struct ieee80211_sta *sta) { - if (sta) { - struct wl1271_station *wl_sta; - - wl_sta = (struct wl1271_station *)sta->drv_priv; - return wl_sta->hlid; + if (sta && wl1271_station(sta)->fw_added) { + return wl1271_station(sta)->hlid; } else { struct ieee80211_hdr *hdr; diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h index eefae3f867b9..817a8a61cac6 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore_i.h +++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h @@ -324,6 +324,7 @@ struct wl12xx_rx_filter { struct wl1271_station { u8 hlid; + bool fw_added; bool in_connection; /* @@ -335,6 +336,11 @@ struct wl1271_station { u64 total_freed_pkts; }; +static inline struct wl1271_station *wl1271_station(struct ieee80211_sta *sta) +{ + return (struct wl1271_station *)sta->drv_priv; +} + struct wl12xx_vif { struct wl1271 *wl; struct list_head list; diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c index 7f999e8a433d..7b00945f7191 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c @@ -187,6 +187,31 @@ static const unsigned int qmp_v6_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = { [QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V6_TX_TRANSCEIVER_BIAS_EN, }; +static const unsigned int qmp_v6_n4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = { + [QPHY_SW_RESET] = QPHY_V6_N4_PCS_SW_RESET, + [QPHY_START_CTRL] = QPHY_V6_N4_PCS_START_CONTROL, + [QPHY_PCS_STATUS] = QPHY_V6_N4_PCS_PCS_STATUS1, + [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V6_N4_PCS_POWER_DOWN_CONTROL, + + /* In PCS_USB */ + [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V6_PCS_USB3_AUTONOMOUS_MODE_CTRL, + [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V6_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR, + + [QPHY_COM_RESETSM_CNTRL] = QSERDES_V6_COM_RESETSM_CNTRL, + [QPHY_COM_C_READY_STATUS] = QSERDES_V6_COM_C_READY_STATUS, + [QPHY_COM_CMN_STATUS] = QSERDES_V6_COM_CMN_STATUS, + [QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN, + + [QPHY_DP_PHY_STATUS] = QSERDES_V6_DP_PHY_STATUS, + [QPHY_DP_PHY_VCO_DIV] = QSERDES_V6_DP_PHY_VCO_DIV, + + [QPHY_TX_TX_POL_INV] = QSERDES_V6_N4_TX_TX_POL_INV, + [QPHY_TX_TX_DRV_LVL] = QSERDES_V6_N4_TX_TX_DRV_LVL, + [QPHY_TX_TX_EMP_POST1_LVL] = QSERDES_V6_N4_TX_TX_EMP_POST1_LVL, + [QPHY_TX_HIGHZ_DRVR_EN] = QSERDES_V6_N4_TX_HIGHZ_DRVR_EN, + [QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V6_N4_TX_TRANSCEIVER_BIAS_EN, +}; + static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07), QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14), @@ -997,6 +1022,31 @@ static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x0f), }; +static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SVS_MODE_CLK_SEL, 0x15), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x3b), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYS_CLK_CTRL, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_ENABLE1, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_BUF_ENABLE, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_SELECT, 0x30), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN0_MODE0, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CORE_CLK_DIV_MODE0, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_CTRL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN, 0x17), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x0f), +}; + static const struct qmp_phy_init_tbl qmp_v6_dp_tx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V6_TX_VMODE_CTRL1, 0x40), QMP_PHY_INIT_CFG(QSERDES_V6_TX_PRE_STALL_LDO_BOOST_EN, 0x30), @@ -1011,6 +1061,19 @@ static const struct qmp_phy_init_tbl qmp_v6_dp_tx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V6_TX_TX_BAND, 0x4), }; +static const struct qmp_phy_init_tbl qmp_v6_n4_dp_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_VMODE_CTRL1, 0x40), + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_PRE_STALL_LDO_BOOST_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_INTERFACE_SELECT, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_CLKBUF_ENABLE, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RESET_TSYNC_EN, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_TRAN_DRVR_EMP_EN, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_PARRATE_REC_DETECT_IDLE_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_TX, 0x11), + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_RX, 0x11), + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_TX_BAND, 0x1), +}; + static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl_rbr[] = { QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x05), QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34), @@ -1059,6 +1122,74 @@ static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl_hbr3[] = { QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c), }; +static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_rbr[] = { + QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x37), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01), +}; + +static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr[] = { + QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01), +}; + +static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr2[] = { + QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x46), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x97), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x18), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x02), +}; + +static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr3[] = { + QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x17), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x15), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01), +}; + static const struct qmp_phy_init_tbl sc8280xp_usb43dp_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x01), QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31), @@ -1273,20 +1404,20 @@ static const struct qmp_phy_init_tbl x1e80100_usb43dp_rx_tbl[] = { }; static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_tbl[] = { - QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG1, 0xc4), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG2, 0x89), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG3, 0x20), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG6, 0x13), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_REFGEN_REQ_CONFIG1, 0x21), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_RX_SIGDET_LVL, 0x55), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_CDR_RESET_TIME, 0x0a), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG1, 0xd4), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG2, 0x30), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_PCS_TX_RX_CONFIG, 0x0c), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG1, 0x4b), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG5, 0x10), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG1, 0xc4), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG2, 0x89), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG3, 0x20), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG6, 0x13), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_REFGEN_REQ_CONFIG1, 0x21), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RX_SIGDET_LVL, 0x55), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RX_CONFIG, 0x0a), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG1, 0xd4), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG2, 0x30), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_PCS_TX_RX_CONFIG, 0x0c), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_EQ_CONFIG1, 0x4b), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_EQ_CONFIG5, 0x10), }; static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_usb_tbl[] = { @@ -1794,22 +1925,22 @@ static const struct qmp_phy_cfg x1e80100_usb3dpphy_cfg = { .pcs_usb_tbl = x1e80100_usb43dp_pcs_usb_tbl, .pcs_usb_tbl_num = ARRAY_SIZE(x1e80100_usb43dp_pcs_usb_tbl), - .dp_serdes_tbl = qmp_v6_dp_serdes_tbl, - .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl), - .dp_tx_tbl = qmp_v6_dp_tx_tbl, - .dp_tx_tbl_num = ARRAY_SIZE(qmp_v6_dp_tx_tbl), + .dp_serdes_tbl = qmp_v6_n4_dp_serdes_tbl, + .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl), + .dp_tx_tbl = qmp_v6_n4_dp_tx_tbl, + .dp_tx_tbl_num = ARRAY_SIZE(qmp_v6_n4_dp_tx_tbl), - .serdes_tbl_rbr = qmp_v6_dp_serdes_tbl_rbr, - .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_rbr), - .serdes_tbl_hbr = qmp_v6_dp_serdes_tbl_hbr, - .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr), - .serdes_tbl_hbr2 = qmp_v6_dp_serdes_tbl_hbr2, - .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr2), - .serdes_tbl_hbr3 = qmp_v6_dp_serdes_tbl_hbr3, - .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr3), + .serdes_tbl_rbr = qmp_v6_n4_dp_serdes_tbl_rbr, + .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_rbr), + .serdes_tbl_hbr = qmp_v6_n4_dp_serdes_tbl_hbr, + .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr), + .serdes_tbl_hbr2 = qmp_v6_n4_dp_serdes_tbl_hbr2, + .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr2), + .serdes_tbl_hbr3 = qmp_v6_n4_dp_serdes_tbl_hbr3, + .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr3), - .swing_hbr_rbr = &qmp_dp_v5_voltage_swing_hbr_rbr, - .pre_emphasis_hbr_rbr = &qmp_dp_v5_pre_emphasis_hbr_rbr, + .swing_hbr_rbr = &qmp_dp_v6_voltage_swing_hbr_rbr, + .pre_emphasis_hbr_rbr = &qmp_dp_v6_pre_emphasis_hbr_rbr, .swing_hbr3_hbr2 = &qmp_dp_v5_voltage_swing_hbr3_hbr2, .pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2, @@ -1822,7 +1953,7 @@ static const struct qmp_phy_cfg x1e80100_usb3dpphy_cfg = { .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), .vreg_list = qmp_phy_vreg_l, .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), - .regs = qmp_v45_usb3phy_regs_layout, + .regs = qmp_v6_n4_usb3phy_regs_layout, }; static const struct qmp_phy_cfg sm6350_usb3dpphy_cfg = { diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h new file mode 100644 index 000000000000..b3024714dab4 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2023, Linaro Limited + */ + +#ifndef QCOM_PHY_QMP_PCS_V6_N4_H_ +#define QCOM_PHY_QMP_PCS_V6_N4_H_ + +/* Only for QMP V6 N4 PHY - USB/PCIe PCS registers */ +#define QPHY_V6_N4_PCS_SW_RESET 0x000 +#define QPHY_V6_N4_PCS_PCS_STATUS1 0x014 +#define QPHY_V6_N4_PCS_POWER_DOWN_CONTROL 0x040 +#define QPHY_V6_N4_PCS_START_CONTROL 0x044 +#define QPHY_V6_N4_PCS_POWER_STATE_CONFIG1 0x090 +#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG1 0x0c4 +#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG2 0x0c8 +#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG3 0x0cc +#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG6 0x0d8 +#define QPHY_V6_N4_PCS_REFGEN_REQ_CONFIG1 0x0dc +#define QPHY_V6_N4_PCS_RX_SIGDET_LVL 0x188 +#define QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_L 0x190 +#define QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_H 0x194 +#define QPHY_V6_N4_PCS_RATE_SLEW_CNTRL1 0x198 +#define QPHY_V6_N4_PCS_RX_CONFIG 0x1b0 +#define QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG1 0x1c0 +#define QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG2 0x1c4 +#define QPHY_V6_N4_PCS_PCS_TX_RX_CONFIG 0x1d0 +#define QPHY_V6_N4_PCS_EQ_CONFIG1 0x1dc +#define QPHY_V6_N4_PCS_EQ_CONFIG2 0x1e0 +#define QPHY_V6_N4_PCS_EQ_CONFIG5 0x1ec + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h index a814ad11af07..d37cc0d4fd36 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h @@ -6,11 +6,24 @@ #ifndef QCOM_PHY_QMP_QSERDES_TXRX_V6_N4_H_ #define QCOM_PHY_QMP_QSERDES_TXRX_V6_N4_H_ +#define QSERDES_V6_N4_TX_CLKBUF_ENABLE 0x08 +#define QSERDES_V6_N4_TX_TX_EMP_POST1_LVL 0x0c +#define QSERDES_V6_N4_TX_TX_DRV_LVL 0x14 +#define QSERDES_V6_N4_TX_RESET_TSYNC_EN 0x1c +#define QSERDES_V6_N4_TX_PRE_STALL_LDO_BOOST_EN 0x20 #define QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_TX 0x30 #define QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_RX 0x34 +#define QSERDES_V6_N4_TX_TRANSCEIVER_BIAS_EN 0x48 +#define QSERDES_V6_N4_TX_HIGHZ_DRVR_EN 0x4c +#define QSERDES_V6_N4_TX_TX_POL_INV 0x50 +#define QSERDES_V6_N4_TX_PARRATE_REC_DETECT_IDLE_EN 0x54 #define QSERDES_V6_N4_TX_LANE_MODE_1 0x78 #define QSERDES_V6_N4_TX_LANE_MODE_2 0x7c #define QSERDES_V6_N4_TX_LANE_MODE_3 0x80 +#define QSERDES_V6_N4_TX_TRAN_DRVR_EMP_EN 0xac +#define QSERDES_V6_N4_TX_TX_BAND 0xd8 +#define QSERDES_V6_N4_TX_INTERFACE_SELECT 0xe4 +#define QSERDES_V6_N4_TX_VMODE_CTRL1 0xb0 #define QSERDES_V6_N4_RX_UCDR_FO_GAIN_RATE2 0x8 #define QSERDES_V6_N4_RX_UCDR_SO_GAIN_RATE2 0x18 diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h index d10b8f653c4b..d0f41e4aaa85 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp.h @@ -46,6 +46,8 @@ #include "phy-qcom-qmp-pcs-v6.h" +#include "phy-qcom-qmp-pcs-v6-n4.h" + #include "phy-qcom-qmp-pcs-v6_20.h" #include "phy-qcom-qmp-pcs-v7.h" diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index 7178a38475cc..27fd54795791 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c @@ -245,7 +245,7 @@ static const char * const irq_type_names[] = { }; static bool persist_gpio_outputs; -module_param(persist_gpio_outputs, bool, 0644); +module_param(persist_gpio_outputs, bool, 0444); MODULE_PARM_DESC(persist_gpio_outputs, "Enable GPIO_OUT persistence when pin is freed"); static inline u32 bcm2835_gpio_rd(struct bcm2835_pinctrl *pc, unsigned reg) diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index cffeb869130d..f424a57f0013 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -1106,8 +1106,8 @@ static struct pinctrl *create_pinctrl(struct device *dev, * an -EPROBE_DEFER later, as that is the worst case. */ if (ret == -EPROBE_DEFER) { - pinctrl_free(p, false); mutex_unlock(&pinctrl_maps_mutex); + pinctrl_free(p, false); return ERR_PTR(ret); } } diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index 3bedf36a0019..3f56991f5b89 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -634,23 +634,68 @@ static struct rockchip_mux_recalced_data rk3308_mux_recalced_data[] = { static struct rockchip_mux_recalced_data rk3328_mux_recalced_data[] = { { - .num = 2, - .pin = 12, - .reg = 0x24, - .bit = 8, - .mask = 0x3 - }, { + /* gpio2_b7_sel */ .num = 2, .pin = 15, .reg = 0x28, .bit = 0, .mask = 0x7 }, { + /* gpio2_c7_sel */ .num = 2, .pin = 23, .reg = 0x30, .bit = 14, .mask = 0x3 + }, { + /* gpio3_b1_sel */ + .num = 3, + .pin = 9, + .reg = 0x44, + .bit = 2, + .mask = 0x3 + }, { + /* gpio3_b2_sel */ + .num = 3, + .pin = 10, + .reg = 0x44, + .bit = 4, + .mask = 0x3 + }, { + /* gpio3_b3_sel */ + .num = 3, + .pin = 11, + .reg = 0x44, + .bit = 6, + .mask = 0x3 + }, { + /* gpio3_b4_sel */ + .num = 3, + .pin = 12, + .reg = 0x44, + .bit = 8, + .mask = 0x3 + }, { + /* gpio3_b5_sel */ + .num = 3, + .pin = 13, + .reg = 0x44, + .bit = 10, + .mask = 0x3 + }, { + /* gpio3_b6_sel */ + .num = 3, + .pin = 14, + .reg = 0x44, + .bit = 12, + .mask = 0x3 + }, { + /* gpio3_b7_sel */ + .num = 3, + .pin = 15, + .reg = 0x44, + .bit = 14, + .mask = 0x3 }, }; @@ -2433,6 +2478,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num) case RK3188: case RK3288: case RK3308: + case RK3328: case RK3368: case RK3399: case RK3568: @@ -2491,6 +2537,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank, case RK3188: case RK3288: case RK3308: + case RK3328: case RK3368: case RK3399: case RK3568: @@ -2704,8 +2751,10 @@ static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector, if (ret) { /* revert the already done pin settings */ - for (cnt--; cnt >= 0; cnt--) + for (cnt--; cnt >= 0; cnt--) { + bank = pin_to_bank(info, pins[cnt]); rockchip_set_mux(bank, pins[cnt] - bank->pin_base, 0); + } return ret; } @@ -2753,6 +2802,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl, case RK3188: case RK3288: case RK3308: + case RK3328: case RK3368: case RK3399: case RK3568: @@ -3763,7 +3813,7 @@ static struct rockchip_pin_bank rk3328_pin_banks[] = { PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", 0, 0, 0, 0), PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", 0, 0, 0, 0), PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", 0, - IOMUX_WIDTH_3BIT, + 0, IOMUX_WIDTH_3BIT, 0), PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3", @@ -3777,7 +3827,7 @@ static struct rockchip_pin_ctrl rk3328_pin_ctrl = { .pin_banks = rk3328_pin_banks, .nr_banks = ARRAY_SIZE(rk3328_pin_banks), .label = "RK3328-GPIO", - .type = RK3288, + .type = RK3328, .grf_mux_offset = 0x0, .iomux_recalced = rk3328_mux_recalced_data, .niomux_recalced = ARRAY_SIZE(rk3328_mux_recalced_data), diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h index 4759f336941e..849266f8b191 100644 --- a/drivers/pinctrl/pinctrl-rockchip.h +++ b/drivers/pinctrl/pinctrl-rockchip.h @@ -193,6 +193,7 @@ enum rockchip_pinctrl_type { RK3188, RK3288, RK3308, + RK3328, RK3368, RK3399, RK3568, diff --git a/drivers/pinctrl/pinctrl-tps6594.c b/drivers/pinctrl/pinctrl-tps6594.c index 085047320853..5e7c7cf93445 100644 --- a/drivers/pinctrl/pinctrl-tps6594.c +++ b/drivers/pinctrl/pinctrl-tps6594.c @@ -486,6 +486,7 @@ static int tps6594_pinctrl_probe(struct platform_device *pdev) break; case TPS6593: case TPS6594: + case LP8764: pctrl_desc->pins = tps6594_pins; pctrl_desc->npins = ARRAY_SIZE(tps6594_pins); diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c index 4e80c7204e5f..4abd6f18bbef 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c @@ -1207,7 +1207,6 @@ static const struct of_device_id pmic_gpio_of_match[] = { { .compatible = "qcom,pm7325-gpio", .data = (void *) 10 }, { .compatible = "qcom,pm7550ba-gpio", .data = (void *) 8}, { .compatible = "qcom,pm8005-gpio", .data = (void *) 4 }, - { .compatible = "qcom,pm8008-gpio", .data = (void *) 2 }, { .compatible = "qcom,pm8019-gpio", .data = (void *) 6 }, /* pm8150 has 10 GPIOs with holes on 2, 5, 7 and 8 */ { .compatible = "qcom,pm8150-gpio", .data = (void *) 10 }, diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c index c3256bfde502..60be78da9f52 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c +++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c @@ -2071,11 +2071,11 @@ static void rzg2l_gpio_irq_restore(struct rzg2l_pinctrl *pctrl) * This has to be atomically executed to protect against a concurrent * interrupt. */ - raw_spin_lock_irqsave(&pctrl->lock.rlock, flags); + spin_lock_irqsave(&pctrl->lock, flags); ret = rzg2l_gpio_irq_set_type(data, irqd_get_trigger_type(data)); if (!ret && !irqd_irq_disabled(data)) rzg2l_gpio_irq_enable(data); - raw_spin_unlock_irqrestore(&pctrl->lock.rlock, flags); + spin_unlock_irqrestore(&pctrl->lock, flags); if (ret) dev_crit(pctrl->dev, "Failed to set IRQ type for virq=%u\n", virq); diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c index a2f231d13a9f..8bae3fd2b330 100644 --- a/drivers/pwm/pwm-stm32.c +++ b/drivers/pwm/pwm-stm32.c @@ -321,22 +321,30 @@ static int stm32_pwm_config(struct stm32_pwm *priv, unsigned int ch, * First we need to find the minimal value for prescaler such that * * period_ns * clkrate - * ------------------------------ + * ------------------------------ < max_arr + 1 * NSEC_PER_SEC * (prescaler + 1) * - * isn't bigger than max_arr. + * This equation is equivalent to + * + * period_ns * clkrate + * ---------------------------- < prescaler + 1 + * NSEC_PER_SEC * (max_arr + 1) + * + * Using integer division and knowing that the right hand side is + * integer, this is further equivalent to + * + * (period_ns * clkrate) // (NSEC_PER_SEC * (max_arr + 1)) ≤ prescaler */ prescaler = mul_u64_u64_div_u64(period_ns, clk_get_rate(priv->clk), - (u64)NSEC_PER_SEC * priv->max_arr); - if (prescaler > 0) - prescaler -= 1; - + (u64)NSEC_PER_SEC * ((u64)priv->max_arr + 1)); if (prescaler > MAX_TIM_PSC) return -EINVAL; prd = mul_u64_u64_div_u64(period_ns, clk_get_rate(priv->clk), (u64)NSEC_PER_SEC * (prescaler + 1)); + if (!prd) + return -EINVAL; /* * All channels share the same prescaler and counter so when two @@ -673,7 +681,8 @@ static int stm32_pwm_probe(struct platform_device *pdev) * .apply() won't overflow. */ if (clk_get_rate(priv->clk) > 1000000000) - return dev_err_probe(dev, -EINVAL, "Failed to lock clock\n"); + return dev_err_probe(dev, -EINVAL, "Clock freq too high (%lu)\n", + clk_get_rate(priv->clk)); chip->ops = &stm32pwm_ops; diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index 34fcdd82b2ea..f3c447ecdc3b 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -140,7 +140,7 @@ #define AXP717_DCDC1_NUM_VOLTAGES 88 #define AXP717_DCDC2_NUM_VOLTAGES 107 -#define AXP717_DCDC3_NUM_VOLTAGES 104 +#define AXP717_DCDC3_NUM_VOLTAGES 103 #define AXP717_DCDC_V_OUT_MASK GENMASK(6, 0) #define AXP717_LDO_V_OUT_MASK GENMASK(4, 0) @@ -763,10 +763,15 @@ static const struct linear_range axp717_dcdc1_ranges[] = { REGULATOR_LINEAR_RANGE(1220000, 71, 87, 20000), }; +/* + * The manual says that the last voltage is 3.4V, encoded as 0b1101011 (107), + * but every other method proves that this is wrong, so it's really 106 that + * programs the final 3.4V. + */ static const struct linear_range axp717_dcdc2_ranges[] = { REGULATOR_LINEAR_RANGE(500000, 0, 70, 10000), REGULATOR_LINEAR_RANGE(1220000, 71, 87, 20000), - REGULATOR_LINEAR_RANGE(1600000, 88, 107, 100000), + REGULATOR_LINEAR_RANGE(1600000, 88, 106, 100000), }; static const struct linear_range axp717_dcdc3_ranges[] = { @@ -790,40 +795,40 @@ static const struct regulator_desc axp717_regulators[] = { AXP_DESC(AXP717, DCDC4, "dcdc4", "vin4", 1000, 3700, 100, AXP717_DCDC4_CONTROL, AXP717_DCDC_V_OUT_MASK, AXP717_DCDC_OUTPUT_CONTROL, BIT(3)), - AXP_DESC(AXP717, ALDO1, "aldo1", "vin1", 500, 3500, 100, + AXP_DESC(AXP717, ALDO1, "aldo1", "aldoin", 500, 3500, 100, AXP717_ALDO1_CONTROL, AXP717_LDO_V_OUT_MASK, AXP717_LDO0_OUTPUT_CONTROL, BIT(0)), - AXP_DESC(AXP717, ALDO2, "aldo2", "vin1", 500, 3500, 100, + AXP_DESC(AXP717, ALDO2, "aldo2", "aldoin", 500, 3500, 100, AXP717_ALDO2_CONTROL, AXP717_LDO_V_OUT_MASK, AXP717_LDO0_OUTPUT_CONTROL, BIT(1)), - AXP_DESC(AXP717, ALDO3, "aldo3", "vin1", 500, 3500, 100, + AXP_DESC(AXP717, ALDO3, "aldo3", "aldoin", 500, 3500, 100, AXP717_ALDO3_CONTROL, AXP717_LDO_V_OUT_MASK, AXP717_LDO0_OUTPUT_CONTROL, BIT(2)), - AXP_DESC(AXP717, ALDO4, "aldo4", "vin1", 500, 3500, 100, + AXP_DESC(AXP717, ALDO4, "aldo4", "aldoin", 500, 3500, 100, AXP717_ALDO4_CONTROL, AXP717_LDO_V_OUT_MASK, AXP717_LDO0_OUTPUT_CONTROL, BIT(3)), - AXP_DESC(AXP717, BLDO1, "bldo1", "vin1", 500, 3500, 100, + AXP_DESC(AXP717, BLDO1, "bldo1", "bldoin", 500, 3500, 100, AXP717_BLDO1_CONTROL, AXP717_LDO_V_OUT_MASK, AXP717_LDO0_OUTPUT_CONTROL, BIT(4)), - AXP_DESC(AXP717, BLDO2, "bldo2", "vin1", 500, 3500, 100, + AXP_DESC(AXP717, BLDO2, "bldo2", "bldoin", 500, 3500, 100, AXP717_BLDO2_CONTROL, AXP717_LDO_V_OUT_MASK, AXP717_LDO0_OUTPUT_CONTROL, BIT(5)), - AXP_DESC(AXP717, BLDO3, "bldo3", "vin1", 500, 3500, 100, + AXP_DESC(AXP717, BLDO3, "bldo3", "bldoin", 500, 3500, 100, AXP717_BLDO3_CONTROL, AXP717_LDO_V_OUT_MASK, AXP717_LDO0_OUTPUT_CONTROL, BIT(6)), - AXP_DESC(AXP717, BLDO4, "bldo4", "vin1", 500, 3500, 100, + AXP_DESC(AXP717, BLDO4, "bldo4", "bldoin", 500, 3500, 100, AXP717_BLDO4_CONTROL, AXP717_LDO_V_OUT_MASK, AXP717_LDO0_OUTPUT_CONTROL, BIT(7)), - AXP_DESC(AXP717, CLDO1, "cldo1", "vin1", 500, 3500, 100, + AXP_DESC(AXP717, CLDO1, "cldo1", "cldoin", 500, 3500, 100, AXP717_CLDO1_CONTROL, AXP717_LDO_V_OUT_MASK, AXP717_LDO1_OUTPUT_CONTROL, BIT(0)), - AXP_DESC(AXP717, CLDO2, "cldo2", "vin1", 500, 3500, 100, + AXP_DESC(AXP717, CLDO2, "cldo2", "cldoin", 500, 3500, 100, AXP717_CLDO2_CONTROL, AXP717_LDO_V_OUT_MASK, AXP717_LDO1_OUTPUT_CONTROL, BIT(1)), - AXP_DESC(AXP717, CLDO3, "cldo3", "vin1", 500, 3500, 100, + AXP_DESC(AXP717, CLDO3, "cldo3", "cldoin", 500, 3500, 100, AXP717_CLDO3_CONTROL, AXP717_LDO_V_OUT_MASK, AXP717_LDO1_OUTPUT_CONTROL, BIT(2)), - AXP_DESC(AXP717, CLDO4, "cldo4", "vin1", 500, 3500, 100, + AXP_DESC(AXP717, CLDO4, "cldo4", "cldoin", 500, 3500, 100, AXP717_CLDO4_CONTROL, AXP717_LDO_V_OUT_MASK, AXP717_LDO1_OUTPUT_CONTROL, BIT(3)), AXP_DESC(AXP717, CPUSLDO, "cpusldo", "vin1", 500, 1400, 50, diff --git a/drivers/regulator/bd71815-regulator.c b/drivers/regulator/bd71815-regulator.c index 26192d55a685..79fbb45297f6 100644 --- a/drivers/regulator/bd71815-regulator.c +++ b/drivers/regulator/bd71815-regulator.c @@ -256,7 +256,7 @@ static int buck12_set_hw_dvs_levels(struct device_node *np, * 10: 2.50mV/usec 10mV 4uS * 11: 1.25mV/usec 10mV 8uS */ -static const unsigned int bd7181x_ramp_table[] = { 1250, 2500, 5000, 10000 }; +static const unsigned int bd7181x_ramp_table[] = { 10000, 5000, 2500, 1250 }; static int bd7181x_led_set_current_limit(struct regulator_dev *rdev, int min_uA, int max_uA) diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 5794f4e9dd52..844e9587a880 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -3347,6 +3347,7 @@ struct regmap *regulator_get_regmap(struct regulator *regulator) return map ? map : ERR_PTR(-EOPNOTSUPP); } +EXPORT_SYMBOL_GPL(regulator_get_regmap); /** * regulator_get_hardware_vsel_register - get the HW voltage selector register diff --git a/drivers/regulator/tps6594-regulator.c b/drivers/regulator/tps6594-regulator.c index 4a859f4c0f83..ac53792e3fed 100644 --- a/drivers/regulator/tps6594-regulator.c +++ b/drivers/regulator/tps6594-regulator.c @@ -653,18 +653,14 @@ static int tps6594_regulator_probe(struct platform_device *pdev) } } - if (tps->chip_id == LP8764) { - nr_buck = ARRAY_SIZE(buck_regs); - nr_ldo = 0; - nr_types = REGS_INT_NB; - } else if (tps->chip_id == TPS65224) { + if (tps->chip_id == TPS65224) { nr_buck = ARRAY_SIZE(tps65224_buck_regs); nr_ldo = ARRAY_SIZE(tps65224_ldo_regs); - nr_types = REGS_INT_NB; + nr_types = TPS65224_REGS_INT_NB; } else { nr_buck = ARRAY_SIZE(buck_regs); - nr_ldo = ARRAY_SIZE(tps6594_ldo_regs); - nr_types = TPS65224_REGS_INT_NB; + nr_ldo = (tps->chip_id == LP8764) ? 0 : ARRAY_SIZE(tps6594_ldo_regs); + nr_types = REGS_INT_NB; } reg_irq_nb = nr_types * (nr_buck + nr_ldo); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 37dd6ead72a4..6b64af7d4927 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -63,6 +63,7 @@ #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_device.h> +#include <scsi/scsi_devinfo.h> #include <scsi/scsi_driver.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> @@ -3118,6 +3119,9 @@ static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer) struct scsi_mode_data data; int res; + if (sdp->sdev_bflags & BLIST_SKIP_IO_HINTS) + return; + res = scsi_mode_sense(sdp, /*dbd=*/0x8, /*modepage=*/0x0a, /*subpage=*/0x05, buffer, SD_BUF_SIZE, SD_TIMEOUT, sdkp->max_retries, &data, &sshdr); diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c index 20d94bcfc9b4..795e223f7e5c 100644 --- a/drivers/soundwire/amd_manager.c +++ b/drivers/soundwire/amd_manager.c @@ -571,6 +571,9 @@ static int sdw_master_read_amd_prop(struct sdw_bus *bus) amd_manager->wake_en_mask = wake_en_mask; fwnode_property_read_u32(link, "amd-sdw-power-mode", &power_mode_mask); amd_manager->power_mode_mask = power_mode_mask; + + fwnode_handle_put(link); + return 0; } diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c index 17cf27e6ea73..18517121cc89 100644 --- a/drivers/soundwire/intel_auxdevice.c +++ b/drivers/soundwire/intel_auxdevice.c @@ -155,8 +155,10 @@ static int sdw_master_read_intel_prop(struct sdw_bus *bus) SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY; intel_prop = devm_kzalloc(bus->dev, sizeof(*intel_prop), GFP_KERNEL); - if (!intel_prop) + if (!intel_prop) { + fwnode_handle_put(link); return -ENOMEM; + } /* initialize with hardware defaults, in case the properties are not found */ intel_prop->doaise = 0x1; @@ -184,6 +186,8 @@ static int sdw_master_read_intel_prop(struct sdw_bus *bus) intel_prop->dodse, intel_prop->dods); + fwnode_handle_put(link); + return 0; } diff --git a/drivers/soundwire/mipi_disco.c b/drivers/soundwire/mipi_disco.c index 55a9c51c84c1..e5d9df26d4dc 100644 --- a/drivers/soundwire/mipi_disco.c +++ b/drivers/soundwire/mipi_disco.c @@ -66,8 +66,10 @@ int sdw_master_read_prop(struct sdw_bus *bus) prop->clk_freq = devm_kcalloc(bus->dev, prop->num_clk_freq, sizeof(*prop->clk_freq), GFP_KERNEL); - if (!prop->clk_freq) + if (!prop->clk_freq) { + fwnode_handle_put(link); return -ENOMEM; + } fwnode_property_read_u32_array(link, "mipi-sdw-clock-frequencies-supported", @@ -92,8 +94,10 @@ int sdw_master_read_prop(struct sdw_bus *bus) prop->clk_gears = devm_kcalloc(bus->dev, prop->num_clk_gears, sizeof(*prop->clk_gears), GFP_KERNEL); - if (!prop->clk_gears) + if (!prop->clk_gears) { + fwnode_handle_put(link); return -ENOMEM; + } fwnode_property_read_u32_array(link, "mipi-sdw-supported-clock-gears", @@ -116,6 +120,8 @@ int sdw_master_read_prop(struct sdw_bus *bus) fwnode_property_read_u32(link, "mipi-sdw-command-error-threshold", &prop->err_threshold); + fwnode_handle_put(link); + return 0; } EXPORT_SYMBOL(sdw_master_read_prop); @@ -197,8 +203,10 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave, dpn[i].num_words, sizeof(*dpn[i].words), GFP_KERNEL); - if (!dpn[i].words) + if (!dpn[i].words) { + fwnode_handle_put(node); return -ENOMEM; + } fwnode_property_read_u32_array(node, "mipi-sdw-port-wordlength-configs", @@ -236,8 +244,10 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave, dpn[i].num_channels, sizeof(*dpn[i].channels), GFP_KERNEL); - if (!dpn[i].channels) + if (!dpn[i].channels) { + fwnode_handle_put(node); return -ENOMEM; + } fwnode_property_read_u32_array(node, "mipi-sdw-channel-number-list", @@ -251,8 +261,10 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave, dpn[i].num_ch_combinations, sizeof(*dpn[i].ch_combinations), GFP_KERNEL); - if (!dpn[i].ch_combinations) + if (!dpn[i].ch_combinations) { + fwnode_handle_put(node); return -ENOMEM; + } fwnode_property_read_u32_array(node, "mipi-sdw-channel-combination-list", @@ -274,6 +286,8 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave, /* TODO: Read audio mode */ + fwnode_handle_put(node); + i++; } @@ -348,10 +362,14 @@ int sdw_slave_read_prop(struct sdw_slave *slave) prop->dp0_prop = devm_kzalloc(&slave->dev, sizeof(*prop->dp0_prop), GFP_KERNEL); - if (!prop->dp0_prop) + if (!prop->dp0_prop) { + fwnode_handle_put(port); return -ENOMEM; + } sdw_slave_read_dp0(slave, port, prop->dp0_prop); + + fwnode_handle_put(port); } /* diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c index 9d747ea69926..8b618ef0f711 100644 --- a/drivers/spi/spi-cs42l43.c +++ b/drivers/spi/spi-cs42l43.c @@ -26,7 +26,7 @@ #include <linux/units.h> #define CS42L43_FIFO_SIZE 16 -#define CS42L43_SPI_ROOT_HZ (40 * HZ_PER_MHZ) +#define CS42L43_SPI_ROOT_HZ 49152000 #define CS42L43_SPI_MAX_LENGTH 65532 enum cs42l43_spi_cmd { @@ -54,7 +54,7 @@ static const struct software_node ampr = { static struct spi_board_info ampl_info = { .modalias = "cs35l56", - .max_speed_hz = 20 * HZ_PER_MHZ, + .max_speed_hz = 11 * HZ_PER_MHZ, .chip_select = 0, .mode = SPI_MODE_0, .swnode = &l, @@ -62,7 +62,7 @@ static struct spi_board_info ampl_info = { static struct spi_board_info ampr_info = { .modalias = "cs35l56", - .max_speed_hz = 20 * HZ_PER_MHZ, + .max_speed_hz = 11 * HZ_PER_MHZ, .chip_select = 1, .mode = SPI_MODE_0, .swnode = &r, diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index f4006c82f867..33164ebdb583 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -660,18 +660,8 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx, ctrl |= (spi_imx->target_burst * 8 - 1) << MX51_ECSPI_CTRL_BL_OFFSET; else { - if (spi_imx->usedma) { - ctrl |= (spi_imx->bits_per_word - 1) - << MX51_ECSPI_CTRL_BL_OFFSET; - } else { - if (spi_imx->count >= MX51_ECSPI_CTRL_MAX_BURST) - ctrl |= (MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1) - << MX51_ECSPI_CTRL_BL_OFFSET; - else - ctrl |= (spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word, - BITS_PER_BYTE) * spi_imx->bits_per_word - 1) - << MX51_ECSPI_CTRL_BL_OFFSET; - } + ctrl |= (spi_imx->bits_per_word - 1) + << MX51_ECSPI_CTRL_BL_OFFSET; } /* set clock speed */ diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c index f1e922fd362a..955c920c4b63 100644 --- a/drivers/spi/spi-stm32-qspi.c +++ b/drivers/spi/spi-stm32-qspi.c @@ -349,7 +349,7 @@ static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi) static int stm32_qspi_get_mode(u8 buswidth) { - if (buswidth == 4) + if (buswidth >= 4) return CCR_BUSWIDTH_4; return buswidth; @@ -653,9 +653,7 @@ static int stm32_qspi_setup(struct spi_device *spi) return -EINVAL; mode = spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL); - if ((mode == SPI_TX_OCTAL || mode == SPI_RX_OCTAL) || - ((mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) && - gpiod_count(qspi->dev, "cs") == -ENOENT)) { + if (mode && gpiod_count(qspi->dev, "cs") == -ENOENT) { dev_err(qspi->dev, "spi-rx-bus-width\\/spi-tx-bus-width\\/cs-gpios\n"); dev_err(qspi->dev, "configuration not supported\n"); @@ -676,10 +674,10 @@ static int stm32_qspi_setup(struct spi_device *spi) qspi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN; /* - * Dual flash mode is only enable in case SPI_TX_OCTAL and SPI_TX_OCTAL - * are both set in spi->mode and "cs-gpios" properties is found in DT + * Dual flash mode is only enable in case SPI_TX_OCTAL or SPI_RX_OCTAL + * is set in spi->mode and "cs-gpios" properties is found in DT */ - if (mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) { + if (mode) { qspi->cr_reg |= CR_DFM; dev_dbg(qspi->dev, "Dual flash mode enable"); } diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 9bc9fd10d538..fc13fa192189 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -689,10 +689,12 @@ static int __spi_add_device(struct spi_device *spi) * Make sure that multiple logical CS doesn't map to the same physical CS. * For example, spi->chip_select[0] != spi->chip_select[1] and so on. */ - for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { - status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1); - if (status) - return status; + if (!spi_controller_is_target(ctlr)) { + for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { + status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1); + if (status) + return status; + } } /* Set the bus ID string */ @@ -4156,7 +4158,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) return -EINVAL; if (xfer->tx_nbits != SPI_NBITS_SINGLE && xfer->tx_nbits != SPI_NBITS_DUAL && - xfer->tx_nbits != SPI_NBITS_QUAD) + xfer->tx_nbits != SPI_NBITS_QUAD && + xfer->tx_nbits != SPI_NBITS_OCTAL) return -EINVAL; if ((xfer->tx_nbits == SPI_NBITS_DUAL) && !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) @@ -4171,7 +4174,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) return -EINVAL; if (xfer->rx_nbits != SPI_NBITS_SINGLE && xfer->rx_nbits != SPI_NBITS_DUAL && - xfer->rx_nbits != SPI_NBITS_QUAD) + xfer->rx_nbits != SPI_NBITS_QUAD && + xfer->rx_nbits != SPI_NBITS_OCTAL) return -EINVAL; if ((xfer->rx_nbits == SPI_NBITS_DUAL) && !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c index 14e34eabc419..4a1bfebb1b8e 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c @@ -150,7 +150,7 @@ static irqreturn_t proc_thermal_irq_handler(int irq, void *devid) { struct proc_thermal_pci *pci_info = devid; struct proc_thermal_device *proc_priv; - int ret = IRQ_HANDLED; + int ret = IRQ_NONE; u32 status; proc_priv = pci_info->proc_priv; @@ -175,6 +175,7 @@ static irqreturn_t proc_thermal_irq_handler(int irq, void *devid) /* Disable enable interrupt flag */ proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0); pkg_thermal_schedule_work(&pci_info->work); + ret = IRQ_HANDLED; } pci_write_config_byte(pci_info->pdev, 0xdc, 0x01); diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c index 0bb3a495b56e..819ed0110f3e 100644 --- a/drivers/thermal/mediatek/lvts_thermal.c +++ b/drivers/thermal/mediatek/lvts_thermal.c @@ -769,7 +769,11 @@ static int lvts_golden_temp_init(struct device *dev, u8 *calib, */ gt = (((u32 *)calib)[0] >> lvts_data->gt_calib_bit_offset) & 0xff; - if (gt && gt < LVTS_GOLDEN_TEMP_MAX) + /* A zero value for gt means that device has invalid efuse data */ + if (!gt) + return -ENODATA; + + if (gt < LVTS_GOLDEN_TEMP_MAX) golden_temp = gt; golden_temp_offset = golden_temp * 500 + lvts_data->temp_offset; @@ -1458,7 +1462,6 @@ static const struct lvts_ctrl_data mt8188_lvts_mcu_data_ctrl[] = { }, VALID_SENSOR_MAP(1, 1, 1, 1), .offset = 0x0, - .mode = LVTS_MSR_FILTERED_MODE, }, { .lvts_sensor = { @@ -1469,7 +1472,6 @@ static const struct lvts_ctrl_data mt8188_lvts_mcu_data_ctrl[] = { }, VALID_SENSOR_MAP(1, 1, 0, 0), .offset = 0x100, - .mode = LVTS_MSR_FILTERED_MODE, } }; @@ -1483,7 +1485,6 @@ static const struct lvts_ctrl_data mt8188_lvts_ap_data_ctrl[] = { }, VALID_SENSOR_MAP(0, 1, 0, 0), .offset = 0x0, - .mode = LVTS_MSR_FILTERED_MODE, }, { .lvts_sensor = { @@ -1496,7 +1497,6 @@ static const struct lvts_ctrl_data mt8188_lvts_ap_data_ctrl[] = { }, VALID_SENSOR_MAP(1, 1, 1, 0), .offset = 0x100, - .mode = LVTS_MSR_FILTERED_MODE, }, { .lvts_sensor = { @@ -1507,7 +1507,6 @@ static const struct lvts_ctrl_data mt8188_lvts_ap_data_ctrl[] = { }, VALID_SENSOR_MAP(1, 1, 0, 0), .offset = 0x200, - .mode = LVTS_MSR_FILTERED_MODE, }, { .lvts_sensor = { @@ -1518,7 +1517,6 @@ static const struct lvts_ctrl_data mt8188_lvts_ap_data_ctrl[] = { }, VALID_SENSOR_MAP(1, 1, 0, 0), .offset = 0x300, - .mode = LVTS_MSR_FILTERED_MODE, } }; diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index d70e76dd3c94..1b0ab2790860 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -1406,6 +1406,7 @@ thermal_zone_device_register_with_trips(const char *type, ida_init(&tz->ida); mutex_init(&tz->lock); init_completion(&tz->removal); + init_completion(&tz->resume); id = ida_alloc(&thermal_tz_ida, GFP_KERNEL); if (id < 0) { result = id; @@ -1651,6 +1652,9 @@ static void thermal_zone_device_resume(struct work_struct *work) thermal_zone_device_init(tz); __thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); + complete(&tz->resume); + tz->resuming = false; + mutex_unlock(&tz->lock); } @@ -1668,6 +1672,20 @@ static int thermal_pm_notify(struct notifier_block *nb, list_for_each_entry(tz, &thermal_tz_list, node) { mutex_lock(&tz->lock); + if (tz->resuming) { + /* + * thermal_zone_device_resume() queued up for + * this zone has not acquired the lock yet, so + * release it to let the function run and wait + * util it has done the work. + */ + mutex_unlock(&tz->lock); + + wait_for_completion(&tz->resume); + + mutex_lock(&tz->lock); + } + tz->suspended = true; mutex_unlock(&tz->lock); @@ -1685,6 +1703,9 @@ static int thermal_pm_notify(struct notifier_block *nb, cancel_delayed_work(&tz->poll_queue); + reinit_completion(&tz->resume); + tz->resuming = true; + /* * Replace the work function with the resume one, which * will restore the original work function and schedule @@ -1709,6 +1730,12 @@ static int thermal_pm_notify(struct notifier_block *nb, static struct notifier_block thermal_pm_nb = { .notifier_call = thermal_pm_notify, + /* + * Run at the lowest priority to avoid interference between the thermal + * zone resume work items spawned by thermal_pm_notify() and the other + * PM notifiers. + */ + .priority = INT_MIN, }; static int __init thermal_init(void) diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 20e7b45673d6..66f67e54e0c8 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -55,6 +55,7 @@ struct thermal_governor { * @type: the thermal zone device type * @device: &struct device for this thermal zone * @removal: removal completion + * @resume: resume completion * @trip_temp_attrs: attributes for trip points for sysfs: trip temperature * @trip_type_attrs: attributes for trip points for sysfs: trip type * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis @@ -89,6 +90,7 @@ struct thermal_governor { * @poll_queue: delayed work for polling * @notify_event: Last notification event * @suspended: thermal zone suspend indicator + * @resuming: indicates whether or not thermal zone resume is in progress * @trips: array of struct thermal_trip objects */ struct thermal_zone_device { @@ -96,6 +98,7 @@ struct thermal_zone_device { char type[THERMAL_NAME_LENGTH]; struct device device; struct completion removal; + struct completion resume; struct attribute_group trips_attribute_group; struct thermal_attr *trip_temp_attrs; struct thermal_attr *trip_type_attrs; @@ -123,6 +126,7 @@ struct thermal_zone_device { struct delayed_work poll_queue; enum thermal_notify_event notify_event; bool suspended; + bool resuming; #ifdef CONFIG_THERMAL_DEBUGFS struct thermal_debugfs *debugfs; #endif diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index e5e9da61f15d..1b65e6ae4137 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -8787,6 +8787,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) { /* Reset the device and controller before doing reinit */ ufshcd_device_reset(hba); + ufs_put_device_desc(hba); ufshcd_hba_stop(hba); ufshcd_vops_reinit_notify(hba); ret = ufshcd_hba_enable(hba); diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index b31464740f6c..8c8b5e6041cc 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -79,6 +79,12 @@ static int slave_alloc (struct scsi_device *sdev) if (us->protocol == USB_PR_BULK && us->max_lun > 0) sdev->sdev_bflags |= BLIST_FORCELUN; + /* + * Some USB storage devices reset if the IO advice hints grouping mode + * page is queried. Hence skip that mode page. + */ + sdev->sdev_bflags |= BLIST_SKIP_IO_HINTS; + return 0; } diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index a48870a87a29..b610a2de4ae5 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -21,6 +21,7 @@ #include <scsi/scsi.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_dbg.h> +#include <scsi/scsi_devinfo.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> @@ -820,6 +821,12 @@ static int uas_slave_alloc(struct scsi_device *sdev) struct uas_dev_info *devinfo = (struct uas_dev_info *)sdev->host->hostdata; + /* + * Some USB storage devices reset if the IO advice hints grouping mode + * page is queried. Hence skip that mode page. + */ + sdev->sdev_bflags |= BLIST_SKIP_IO_HINTS; + sdev->hostdata = devinfo; return 0; } |