diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2024-02-19 09:13:29 +0100 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2024-02-19 09:13:29 +0100 |
commit | a09ebb32afbe2b215075a81aa3f78795d553b91f (patch) | |
tree | 047a0abd5a122d919d2558acb1d484f7a46a1efc /drivers | |
parent | cf92b9df3dcf45eab0fe3fcf9e97ce55c31c48c0 (diff) | |
parent | b401b621758e46812da61fa58a67c3fd8d91de0d (diff) | |
download | lwn-a09ebb32afbe2b215075a81aa3f78795d553b91f.tar.gz lwn-a09ebb32afbe2b215075a81aa3f78795d553b91f.zip |
Merge 6.8-rc5 into usb-next
We need the USB fixes in here as well.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers')
343 files changed, 2362 insertions, 1372 deletions
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index 9418c73ee8ef..4b0640226986 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -480,9 +480,8 @@ static int ivpu_pci_init(struct ivpu_device *vdev) /* Clear any pending errors */ pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f); - /* VPU 37XX does not require 10m D3hot delay */ - if (ivpu_hw_gen(vdev) == IVPU_HW_37XX) - pdev->d3hot_delay = 0; + /* NPU does not require 10m D3hot delay */ + pdev->d3hot_delay = 0; ret = pcim_enable_device(pdev); if (ret) { diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c index 6576232f3e67..5fa8bd4603d5 100644 --- a/drivers/accel/ivpu/ivpu_fw.c +++ b/drivers/accel/ivpu/ivpu_fw.c @@ -222,7 +222,6 @@ ivpu_fw_init_wa(struct ivpu_device *vdev) const struct vpu_firmware_header *fw_hdr = (const void *)vdev->fw->file->data; if (IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, BOOT, 3, 17) || - (ivpu_hw_gen(vdev) > IVPU_HW_37XX) || (ivpu_test_mode & IVPU_TEST_MODE_D0I3_MSG_DISABLE)) vdev->wa.disable_d0i3_msg = true; diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c index f15a93d83057..89af1006df55 100644 --- a/drivers/accel/ivpu/ivpu_hw_37xx.c +++ b/drivers/accel/ivpu/ivpu_hw_37xx.c @@ -510,22 +510,12 @@ static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev) return ret; } -static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev) -{ - ivpu_boot_dpu_active_drive(vdev, false); - ivpu_boot_pwr_island_isolation_drive(vdev, true); - ivpu_boot_pwr_island_trickle_drive(vdev, false); - ivpu_boot_pwr_island_drive(vdev, false); - - return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0); -} - static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev) { u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES); val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val); - val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val); + val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val); val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val); REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val); @@ -616,12 +606,37 @@ static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev) return 0; } +static int ivpu_hw_37xx_ip_reset(struct ivpu_device *vdev) +{ + int ret; + u32 val; + + if (IVPU_WA(punit_disabled)) + return 0; + + ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US); + if (ret) { + ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n"); + return ret; + } + + val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET); + val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val); + REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val); + + ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US); + if (ret) + ivpu_err(vdev, "Timed out waiting for RESET completion\n"); + + return ret; +} + static int ivpu_hw_37xx_reset(struct ivpu_device *vdev) { int ret = 0; - if (ivpu_boot_pwr_domain_disable(vdev)) { - ivpu_err(vdev, "Failed to disable power domain\n"); + if (ivpu_hw_37xx_ip_reset(vdev)) { + ivpu_err(vdev, "Failed to reset NPU\n"); ret = -EIO; } @@ -661,6 +676,11 @@ static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev) { int ret; + /* PLL requests may fail when powering down, so issue WP 0 here */ + ret = ivpu_pll_disable(vdev); + if (ret) + ivpu_warn(vdev, "Failed to disable PLL: %d\n", ret); + ret = ivpu_hw_37xx_d0i3_disable(vdev); if (ret) ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret); diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c index 704288084f37..1c995307c113 100644 --- a/drivers/accel/ivpu/ivpu_hw_40xx.c +++ b/drivers/accel/ivpu/ivpu_hw_40xx.c @@ -530,7 +530,7 @@ static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev) u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES); val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val); - val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val); + val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val); val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val); REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val); @@ -704,7 +704,6 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev) { struct ivpu_hw_info *hw = vdev->hw; u32 tile_disable; - u32 tile_enable; u32 fuse; fuse = REGB_RD32(VPU_40XX_BUTTRESS_TILE_FUSE); @@ -725,10 +724,6 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev) else ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", TILE_MAX_NUM); - tile_enable = (~tile_disable) & TILE_MAX_MASK; - - hw->sku = REG_SET_FLD_NUM(SKU, HW_ID, LNL_HW_ID, hw->sku); - hw->sku = REG_SET_FLD_NUM(SKU, TILE, tile_enable, hw->sku); hw->tile_fuse = tile_disable; hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT; diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c index 0440bee3ecaf..e70cfb859339 100644 --- a/drivers/accel/ivpu/ivpu_job.c +++ b/drivers/accel/ivpu/ivpu_job.c @@ -294,7 +294,7 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 return -ENOENT; if (job->file_priv->has_mmu_faults) - job_status = VPU_JSM_STATUS_ABORTED; + job_status = DRM_IVPU_JOB_STATUS_ABORTED; job->bos[CMD_BUF_IDX]->job_status = job_status; dma_fence_signal(job->done_fence); @@ -315,7 +315,7 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev) unsigned long id; xa_for_each(&vdev->submitted_jobs_xa, id, job) - ivpu_job_signal_and_destroy(vdev, id, VPU_JSM_STATUS_ABORTED); + ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED); } static int ivpu_job_submit(struct ivpu_job *job) diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c index 9a3122ffce03..91bd640655ab 100644 --- a/drivers/accel/ivpu/ivpu_mmu.c +++ b/drivers/accel/ivpu/ivpu_mmu.c @@ -72,10 +72,10 @@ #define IVPU_MMU_Q_COUNT_LOG2 4 /* 16 entries */ #define IVPU_MMU_Q_COUNT ((u32)1 << IVPU_MMU_Q_COUNT_LOG2) -#define IVPU_MMU_Q_WRAP_BIT (IVPU_MMU_Q_COUNT << 1) -#define IVPU_MMU_Q_WRAP_MASK (IVPU_MMU_Q_WRAP_BIT - 1) -#define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1) +#define IVPU_MMU_Q_WRAP_MASK GENMASK(IVPU_MMU_Q_COUNT_LOG2, 0) +#define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1) #define IVPU_MMU_Q_IDX(val) ((val) & IVPU_MMU_Q_IDX_MASK) +#define IVPU_MMU_Q_WRP(val) ((val) & IVPU_MMU_Q_COUNT) #define IVPU_MMU_CMDQ_CMD_SIZE 16 #define IVPU_MMU_CMDQ_SIZE (IVPU_MMU_Q_COUNT * IVPU_MMU_CMDQ_CMD_SIZE) @@ -475,20 +475,32 @@ static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev) return 0; } +static bool ivpu_mmu_queue_is_full(struct ivpu_mmu_queue *q) +{ + return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) && + (IVPU_MMU_Q_WRP(q->prod) != IVPU_MMU_Q_WRP(q->cons))); +} + +static bool ivpu_mmu_queue_is_empty(struct ivpu_mmu_queue *q) +{ + return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) && + (IVPU_MMU_Q_WRP(q->prod) == IVPU_MMU_Q_WRP(q->cons))); +} + static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1) { - struct ivpu_mmu_queue *q = &vdev->mmu->cmdq; - u64 *queue_buffer = q->base; - int idx = IVPU_MMU_Q_IDX(q->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer)); + struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; + u64 *queue_buffer = cmdq->base; + int idx = IVPU_MMU_Q_IDX(cmdq->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer)); - if (!CIRC_SPACE(IVPU_MMU_Q_IDX(q->prod), IVPU_MMU_Q_IDX(q->cons), IVPU_MMU_Q_COUNT)) { + if (ivpu_mmu_queue_is_full(cmdq)) { ivpu_err(vdev, "Failed to write MMU CMD %s\n", name); return -EBUSY; } queue_buffer[idx] = data0; queue_buffer[idx + 1] = data1; - q->prod = (q->prod + 1) & IVPU_MMU_Q_WRAP_MASK; + cmdq->prod = (cmdq->prod + 1) & IVPU_MMU_Q_WRAP_MASK; ivpu_dbg(vdev, MMU, "CMD write: %s data: 0x%llx 0x%llx\n", name, data0, data1); @@ -560,7 +572,6 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev) mmu->cmdq.cons = 0; memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE); - clflush_cache_range(mmu->evtq.base, IVPU_MMU_EVTQ_SIZE); mmu->evtq.prod = 0; mmu->evtq.cons = 0; @@ -874,14 +885,10 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev) u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE); evtq->prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC); - if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT)) + if (ivpu_mmu_queue_is_empty(evtq)) return NULL; - clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE); - evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK; - REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, evtq->cons); - return evt; } @@ -902,6 +909,7 @@ void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev) } ivpu_mmu_user_context_mark_invalid(vdev, ssid); + REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons); } } diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c index f501f27ebafd..5f73854234ba 100644 --- a/drivers/accel/ivpu/ivpu_pm.c +++ b/drivers/accel/ivpu/ivpu_pm.c @@ -58,11 +58,14 @@ static int ivpu_suspend(struct ivpu_device *vdev) { int ret; + /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */ + pci_save_state(to_pci_dev(vdev->drm.dev)); + ret = ivpu_shutdown(vdev); - if (ret) { + if (ret) ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret); - return ret; - } + + pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot); return ret; } @@ -71,6 +74,9 @@ static int ivpu_resume(struct ivpu_device *vdev) { int ret; + pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0); + pci_restore_state(to_pci_dev(vdev->drm.dev)); + retry: ret = ivpu_hw_power_up(vdev); if (ret) { @@ -120,15 +126,20 @@ static void ivpu_pm_recovery_work(struct work_struct *work) ivpu_fw_log_dump(vdev); -retry: - ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev)); - if (ret == -EAGAIN && !drm_dev_is_unplugged(&vdev->drm)) { - cond_resched(); - goto retry; - } + atomic_inc(&vdev->pm->reset_counter); + atomic_set(&vdev->pm->reset_pending, 1); + down_write(&vdev->pm->reset_lock); + + ivpu_suspend(vdev); + ivpu_pm_prepare_cold_boot(vdev); + ivpu_jobs_abort_all(vdev); + + ret = ivpu_resume(vdev); + if (ret) + ivpu_err(vdev, "Failed to resume NPU: %d\n", ret); - if (ret && ret != -EAGAIN) - ivpu_err(vdev, "Failed to reset VPU: %d\n", ret); + up_write(&vdev->pm->reset_lock); + atomic_set(&vdev->pm->reset_pending, 0); kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt); pm_runtime_mark_last_busy(vdev->drm.dev); @@ -200,9 +211,6 @@ int ivpu_pm_suspend_cb(struct device *dev) ivpu_suspend(vdev); ivpu_pm_prepare_warm_boot(vdev); - pci_save_state(to_pci_dev(dev)); - pci_set_power_state(to_pci_dev(dev), PCI_D3hot); - ivpu_dbg(vdev, PM, "Suspend done.\n"); return 0; @@ -216,9 +224,6 @@ int ivpu_pm_resume_cb(struct device *dev) ivpu_dbg(vdev, PM, "Resume..\n"); - pci_set_power_state(to_pci_dev(dev), PCI_D0); - pci_restore_state(to_pci_dev(dev)); - ret = ivpu_resume(vdev); if (ret) ivpu_err(vdev, "Failed to resume: %d\n", ret); diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 7b7c605166e0..fe825a432c5b 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -680,32 +680,6 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata, static DECLARE_RWSEM(cxl_cper_rw_sem); static cxl_cper_callback cper_callback; -/* CXL Event record UUIDs are formatted as GUIDs and reported in section type */ - -/* - * General Media Event Record - * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43 - */ -#define CPER_SEC_CXL_GEN_MEDIA_GUID \ - GUID_INIT(0xfbcd0a77, 0xc260, 0x417f, \ - 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6) - -/* - * DRAM Event Record - * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44 - */ -#define CPER_SEC_CXL_DRAM_GUID \ - GUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, \ - 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24) - -/* - * Memory Module Event Record - * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45 - */ -#define CPER_SEC_CXL_MEM_MODULE_GUID \ - GUID_INIT(0xfe927475, 0xdd59, 0x4339, \ - 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74) - static void cxl_cper_post_event(enum cxl_event_type event_type, struct cxl_cper_event_rec *rec) { diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index e327a0229dc1..e7f713cd70d3 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -2930,6 +2930,8 @@ open_card_ubr0(struct idt77252_dev *card) vc->scq = alloc_scq(card, vc->class); if (!vc->scq) { printk("%s: can't get SCQ.\n", card->name); + kfree(card->vcs[0]); + card->vcs[0] = NULL; return -ENOMEM; } diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 018ac202de34..024b78a0cfc1 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -431,9 +431,6 @@ init_cpu_capacity_callback(struct notifier_block *nb, struct cpufreq_policy *policy = data; int cpu; - if (!raw_capacity) - return 0; - if (val != CPUFREQ_CREATE_POLICY) return 0; @@ -450,9 +447,11 @@ init_cpu_capacity_callback(struct notifier_block *nb, } if (cpumask_empty(cpus_to_visit)) { - topology_normalize_cpu_scale(); - schedule_work(&update_topology_flags_work); - free_raw_capacity(); + if (raw_capacity) { + topology_normalize_cpu_scale(); + schedule_work(&update_topology_flags_work); + free_raw_capacity(); + } pr_debug("cpu_capacity: parsing done\n"); schedule_work(&parsing_done_work); } @@ -472,7 +471,7 @@ static int __init register_cpufreq_notifier(void) * On ACPI-based systems skip registering cpufreq notifier as cpufreq * information is not needed for cpu capacity initialization. */ - if (!acpi_disabled || !raw_capacity) + if (!acpi_disabled) return -EINVAL; if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) diff --git a/drivers/base/core.c b/drivers/base/core.c index 14d46af40f9a..9828da9b933c 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -125,7 +125,7 @@ static void __fwnode_link_del(struct fwnode_link *link) */ static void __fwnode_link_cycle(struct fwnode_link *link) { - pr_debug("%pfwf: Relaxing link with %pfwf\n", + pr_debug("%pfwf: cycle: depends on %pfwf\n", link->consumer, link->supplier); link->flags |= FWLINK_FLAG_CYCLE; } @@ -284,10 +284,12 @@ static bool device_is_ancestor(struct device *dev, struct device *target) return false; } +#define DL_MARKER_FLAGS (DL_FLAG_INFERRED | \ + DL_FLAG_CYCLE | \ + DL_FLAG_MANAGED) static inline bool device_link_flag_is_sync_state_only(u32 flags) { - return (flags & ~(DL_FLAG_INFERRED | DL_FLAG_CYCLE)) == - (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED); + return (flags & ~DL_MARKER_FLAGS) == DL_FLAG_SYNC_STATE_ONLY; } /** @@ -1943,6 +1945,7 @@ static bool __fw_devlink_relax_cycles(struct device *con, /* Termination condition. */ if (sup_dev == con) { + pr_debug("----- cycle: start -----\n"); ret = true; goto out; } @@ -1974,8 +1977,11 @@ static bool __fw_devlink_relax_cycles(struct device *con, else par_dev = fwnode_get_next_parent_dev(sup_handle); - if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode)) + if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode)) { + pr_debug("%pfwf: cycle: child of %pfwf\n", sup_handle, + par_dev->fwnode); ret = true; + } if (!sup_dev) goto out; @@ -1991,6 +1997,8 @@ static bool __fw_devlink_relax_cycles(struct device *con, if (__fw_devlink_relax_cycles(con, dev_link->supplier->fwnode)) { + pr_debug("%pfwf: cycle: depends on %pfwf\n", sup_handle, + dev_link->supplier->fwnode); fw_devlink_relax_link(dev_link); dev_link->flags |= DL_FLAG_CYCLE; ret = true; @@ -2058,13 +2066,19 @@ static int fw_devlink_create_devlink(struct device *con, /* * SYNC_STATE_ONLY device links don't block probing and supports cycles. - * So cycle detection isn't necessary and shouldn't be done. + * So, one might expect that cycle detection isn't necessary for them. + * However, if the device link was marked as SYNC_STATE_ONLY because + * it's part of a cycle, then we still need to do cycle detection. This + * is because the consumer and supplier might be part of multiple cycles + * and we need to detect all those cycles. */ - if (!(flags & DL_FLAG_SYNC_STATE_ONLY)) { + if (!device_link_flag_is_sync_state_only(flags) || + flags & DL_FLAG_CYCLE) { device_links_write_lock(); if (__fw_devlink_relax_cycles(con, sup_handle)) { __fwnode_link_cycle(link); flags = fw_devlink_get_flags(link->flags); + pr_debug("----- cycle: end -----\n"); dev_info(con, "Fixed dependency cycle(s) with %pfwf\n", sup_handle); } diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c index 026bdcb45127..0d957c5f1bcc 100644 --- a/drivers/base/regmap/regmap-kunit.c +++ b/drivers/base/regmap/regmap-kunit.c @@ -9,6 +9,23 @@ #define BLOCK_TEST_SIZE 12 +static void get_changed_bytes(void *orig, void *new, size_t size) +{ + char *o = orig; + char *n = new; + int i; + + get_random_bytes(new, size); + + /* + * This could be nicer and more efficient but we shouldn't + * super care. + */ + for (i = 0; i < size; i++) + while (n[i] == o[i]) + get_random_bytes(&n[i], 1); +} + static const struct regmap_config test_regmap_config = { .max_register = BLOCK_TEST_SIZE, .reg_stride = 1, @@ -1202,7 +1219,8 @@ static void raw_noinc_write(struct kunit *test) struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; - unsigned int val, val_test, val_last; + unsigned int val; + u16 val_test, val_last; u16 val_array[BLOCK_TEST_SIZE]; config = raw_regmap_config; @@ -1251,7 +1269,7 @@ static void raw_sync(struct kunit *test) struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; - u16 val[2]; + u16 val[3]; u16 *hw_buf; unsigned int rval; int i; @@ -1265,17 +1283,13 @@ static void raw_sync(struct kunit *test) hw_buf = (u16 *)data->vals; - get_random_bytes(&val, sizeof(val)); + get_changed_bytes(&hw_buf[2], &val[0], sizeof(val)); /* Do a regular write and a raw write in cache only mode */ regcache_cache_only(map, true); - KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val))); - if (config.val_format_endian == REGMAP_ENDIAN_BIG) - KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6, - be16_to_cpu(val[0]))); - else - KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6, - le16_to_cpu(val[0]))); + KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, + sizeof(u16) * 2)); + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2])); /* We should read back the new values, and defaults for the rest */ for (i = 0; i < config.max_register + 1; i++) { @@ -1284,24 +1298,34 @@ static void raw_sync(struct kunit *test) switch (i) { case 2: case 3: - case 6: if (config.val_format_endian == REGMAP_ENDIAN_BIG) { KUNIT_EXPECT_EQ(test, rval, - be16_to_cpu(val[i % 2])); + be16_to_cpu(val[i - 2])); } else { KUNIT_EXPECT_EQ(test, rval, - le16_to_cpu(val[i % 2])); + le16_to_cpu(val[i - 2])); } break; + case 4: + KUNIT_EXPECT_EQ(test, rval, val[i - 2]); + break; default: KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval); break; } } + + /* + * The value written via _write() was translated by the core, + * translate the original copy for comparison purposes. + */ + if (config.val_format_endian == REGMAP_ENDIAN_BIG) + val[2] = cpu_to_be16(val[2]); + else + val[2] = cpu_to_le16(val[2]); /* The values should not appear in the "hardware" */ - KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], val, sizeof(val)); - KUNIT_EXPECT_MEMNEQ(test, &hw_buf[6], val, sizeof(u16)); + KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val)); for (i = 0; i < config.max_register + 1; i++) data->written[i] = false; @@ -1312,8 +1336,7 @@ static void raw_sync(struct kunit *test) KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); /* The values should now appear in the "hardware" */ - KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val)); - KUNIT_EXPECT_MEMEQ(test, &hw_buf[6], val, sizeof(u16)); + KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val)); regmap_exit(map); } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 5bf98fd6a651..2bf14a0e2815 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -1593,14 +1593,15 @@ static int virtblk_freeze(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; + /* Ensure no requests in virtqueues before deleting vqs. */ + blk_mq_freeze_queue(vblk->disk->queue); + /* Ensure we don't receive any more interrupts */ virtio_reset_device(vdev); /* Make sure no work handler is accessing the device. */ flush_work(&vblk->config_work); - blk_mq_quiesce_queue(vblk->disk->queue); - vdev->config->del_vqs(vdev); kfree(vblk->vqs); @@ -1618,7 +1619,7 @@ static int virtblk_restore(struct virtio_device *vdev) virtio_device_ready(vdev); - blk_mq_unquiesce_queue(vblk->disk->queue); + blk_mq_unfreeze_queue(vblk->disk->queue); return 0; } #endif diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index 3d5e6d705fc6..44b19e696176 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c @@ -108,9 +108,8 @@ static inline void send_msg(struct cn_msg *msg) filter_data[1] = 0; } - if (cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT, - cn_filter, (void *)filter_data) == -ESRCH) - atomic_set(&proc_event_num_listeners, 0); + cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT, + cn_filter, (void *)filter_data); local_unlock(&local_event.lock); } diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index e4d3f45242f6..b04bc1d3d627 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -534,10 +534,16 @@ EXPORT_SYMBOL_GPL(sev_platform_init); static int __sev_platform_shutdown_locked(int *error) { - struct sev_device *sev = psp_master->sev_data; + struct psp_device *psp = psp_master; + struct sev_device *sev; int ret; - if (!sev || sev->state == SEV_STATE_UNINIT) + if (!psp || !psp->sev_data) + return 0; + + sev = psp->sev_data; + + if (sev->state == SEV_STATE_UNINIT) return 0; ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error); diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h index 89445435303a..bdf117a33744 100644 --- a/drivers/cxl/core/trace.h +++ b/drivers/cxl/core/trace.h @@ -338,7 +338,7 @@ TRACE_EVENT(cxl_general_media, TP_fast_assign( CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr); - memcpy(&__entry->hdr_uuid, &CXL_EVENT_GEN_MEDIA_UUID, sizeof(uuid_t)); + __entry->hdr_uuid = CXL_EVENT_GEN_MEDIA_UUID; /* General Media */ __entry->dpa = le64_to_cpu(rec->phys_addr); @@ -425,7 +425,7 @@ TRACE_EVENT(cxl_dram, TP_fast_assign( CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr); - memcpy(&__entry->hdr_uuid, &CXL_EVENT_DRAM_UUID, sizeof(uuid_t)); + __entry->hdr_uuid = CXL_EVENT_DRAM_UUID; /* DRAM */ __entry->dpa = le64_to_cpu(rec->phys_addr); @@ -573,7 +573,7 @@ TRACE_EVENT(cxl_memory_module, TP_fast_assign( CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr); - memcpy(&__entry->hdr_uuid, &CXL_EVENT_MEM_MODULE_UUID, sizeof(uuid_t)); + __entry->hdr_uuid = CXL_EVENT_MEM_MODULE_UUID; /* Memory Module Event */ __entry->event_type = rec->event_type; diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c index 314bb3775465..4ca9ad16cd95 100644 --- a/drivers/dpll/dpll_netlink.c +++ b/drivers/dpll/dpll_netlink.c @@ -1199,6 +1199,7 @@ int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) unsigned long i; int ret = 0; + mutex_lock(&dpll_lock); xa_for_each_marked_start(&dpll_pin_xa, i, pin, DPLL_REGISTERED, ctx->idx) { if (!dpll_pin_available(pin)) @@ -1218,6 +1219,8 @@ int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) } genlmsg_end(skb, hdr); } + mutex_unlock(&dpll_lock); + if (ret == -EMSGSIZE) { ctx->idx = i; return skb->len; @@ -1373,6 +1376,7 @@ int dpll_nl_device_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) unsigned long i; int ret = 0; + mutex_lock(&dpll_lock); xa_for_each_marked_start(&dpll_device_xa, i, dpll, DPLL_REGISTERED, ctx->idx) { hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, @@ -1389,6 +1393,8 @@ int dpll_nl_device_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) } genlmsg_end(skb, hdr); } + mutex_unlock(&dpll_lock); + if (ret == -EMSGSIZE) { ctx->idx = i; return skb->len; @@ -1439,20 +1445,6 @@ dpll_unlock_doit(const struct genl_split_ops *ops, struct sk_buff *skb, mutex_unlock(&dpll_lock); } -int dpll_lock_dumpit(struct netlink_callback *cb) -{ - mutex_lock(&dpll_lock); - - return 0; -} - -int dpll_unlock_dumpit(struct netlink_callback *cb) -{ - mutex_unlock(&dpll_lock); - - return 0; -} - int dpll_pin_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb, struct genl_info *info) { diff --git a/drivers/dpll/dpll_nl.c b/drivers/dpll/dpll_nl.c index eaee5be7aa64..1e95f5397cfc 100644 --- a/drivers/dpll/dpll_nl.c +++ b/drivers/dpll/dpll_nl.c @@ -95,9 +95,7 @@ static const struct genl_split_ops dpll_nl_ops[] = { }, { .cmd = DPLL_CMD_DEVICE_GET, - .start = dpll_lock_dumpit, .dumpit = dpll_nl_device_get_dumpit, - .done = dpll_unlock_dumpit, .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP, }, { @@ -129,9 +127,7 @@ static const struct genl_split_ops dpll_nl_ops[] = { }, { .cmd = DPLL_CMD_PIN_GET, - .start = dpll_lock_dumpit, .dumpit = dpll_nl_pin_get_dumpit, - .done = dpll_unlock_dumpit, .policy = dpll_pin_get_dump_nl_policy, .maxattr = DPLL_A_PIN_ID, .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP, diff --git a/drivers/dpll/dpll_nl.h b/drivers/dpll/dpll_nl.h index 92d4c9c4f788..f491262bee4f 100644 --- a/drivers/dpll/dpll_nl.h +++ b/drivers/dpll/dpll_nl.h @@ -30,8 +30,6 @@ dpll_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb, void dpll_pin_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb, struct genl_info *info); -int dpll_lock_dumpit(struct netlink_callback *cb); -int dpll_unlock_dumpit(struct netlink_callback *cb); int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info); int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info); diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index 6ac5ff20a2fe..8aaa7fcb2630 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c @@ -429,7 +429,23 @@ static void bm_work(struct work_struct *work) */ card->bm_generation = generation; - if (root_device == NULL) { + if (card->gap_count == 0) { + /* + * If self IDs have inconsistent gap counts, do a + * bus reset ASAP. The config rom read might never + * complete, so don't wait for it. However, still + * send a PHY configuration packet prior to the + * bus reset. The PHY configuration packet might + * fail, but 1394-2008 8.4.5.2 explicitly permits + * it in this case, so it should be safe to try. + */ + new_root_id = local_id; + /* + * We must always send a bus reset if the gap count + * is inconsistent, so bypass the 5-reset limit. + */ + card->bm_retries = 0; + } else if (root_device == NULL) { /* * Either link_on is false, or we failed to read the * config rom. In either case, pick another root. diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 83f5bb57fa4c..83092d93f36a 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c @@ -107,7 +107,7 @@ static int __init arm_enable_runtime_services(void) efi_memory_desc_t *md; for_each_efi_memory_desc(md) { - int md_size = md->num_pages << EFI_PAGE_SHIFT; + u64 md_size = md->num_pages << EFI_PAGE_SHIFT; struct resource *res; if (!(md->attribute & EFI_MEMORY_SP)) diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 35c37f667781..9b3884ff81e6 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -523,6 +523,17 @@ static void cper_print_tstamp(const char *pfx, } } +struct ignore_section { + guid_t guid; + const char *name; +}; + +static const struct ignore_section ignore_sections[] = { + { .guid = CPER_SEC_CXL_GEN_MEDIA_GUID, .name = "CXL General Media Event" }, + { .guid = CPER_SEC_CXL_DRAM_GUID, .name = "CXL DRAM Event" }, + { .guid = CPER_SEC_CXL_MEM_MODULE_GUID, .name = "CXL Memory Module Event" }, +}; + static void cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata, int sec_no) @@ -543,6 +554,14 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text); snprintf(newpfx, sizeof(newpfx), "%s ", pfx); + + for (int i = 0; i < ARRAY_SIZE(ignore_sections); i++) { + if (guid_equal(sec_type, &ignore_sections[i].guid)) { + printk("%ssection_type: %s\n", newpfx, ignore_sections[i].name); + return; + } + } + if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) { struct cper_sec_proc_generic *proc_err = acpi_hest_get_payload(gdata); diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c index d4987d013080..a00e07b853f2 100644 --- a/drivers/firmware/efi/efi-init.c +++ b/drivers/firmware/efi/efi-init.c @@ -144,15 +144,6 @@ static __init int is_usable_memory(efi_memory_desc_t *md) case EFI_CONVENTIONAL_MEMORY: case EFI_PERSISTENT_MEMORY: /* - * Special purpose memory is 'soft reserved', which means it - * is set aside initially, but can be hotplugged back in or - * be assigned to the dax driver after boot. - */ - if (efi_soft_reserve_enabled() && - (md->attribute & EFI_MEMORY_SP)) - return false; - - /* * According to the spec, these regions are no longer reserved * after calling ExitBootServices(). However, we can only use * them as System RAM if they can be mapped writeback cacheable. @@ -196,6 +187,16 @@ static __init void reserve_regions(void) size = npages << PAGE_SHIFT; if (is_memory(md)) { + /* + * Special purpose memory is 'soft reserved', which + * means it is set aside initially. Don't add a memblock + * for it now so that it can be hotplugged back in or + * be assigned to the dax driver after boot. + */ + if (efi_soft_reserve_enabled() && + (md->attribute & EFI_MEMORY_SP)) + continue; + early_init_dt_add_memory_arch(paddr, size); if (!is_usable_memory(md)) diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 06964a3c130f..73f4810f6db3 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -28,7 +28,7 @@ cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \ -DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \ -DEFI_HAVE_STRCMP -fno-builtin -fpic \ $(call cc-option,-mno-single-pic-base) -cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE +cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE -mno-relax cflags-$(CONFIG_LOONGARCH) += -fpie cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt @@ -143,7 +143,7 @@ STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS # exist. STUBCOPY_FLAGS-$(CONFIG_RISCV) += --prefix-alloc-sections=.init \ --prefix-symbols=__efistub_ -STUBCOPY_RELOC-$(CONFIG_RISCV) := R_RISCV_HI20 +STUBCOPY_RELOC-$(CONFIG_RISCV) := -E R_RISCV_HI20\|R_RISCV_$(BITS)\|R_RISCV_RELAX # For LoongArch, keep all the symbols in .init section and make sure that no # absolute symbols references exist. diff --git a/drivers/firmware/efi/libstub/alignedmem.c b/drivers/firmware/efi/libstub/alignedmem.c index 6b83c492c3b8..31928bd87e0f 100644 --- a/drivers/firmware/efi/libstub/alignedmem.c +++ b/drivers/firmware/efi/libstub/alignedmem.c @@ -14,6 +14,7 @@ * @max: the address that the last allocated memory page shall not * exceed * @align: minimum alignment of the base of the allocation + * @memory_type: the type of memory to allocate * * Allocate pages as EFI_LOADER_DATA. The allocated pages are aligned according * to @align, which should be >= EFI_ALLOC_ALIGN. The last allocated page will diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index 212687c30d79..c04b82ea40f2 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -956,7 +956,8 @@ efi_status_t efi_get_random_bytes(unsigned long size, u8 *out); efi_status_t efi_random_alloc(unsigned long size, unsigned long align, unsigned long *addr, unsigned long random_seed, - int memory_type, unsigned long alloc_limit); + int memory_type, unsigned long alloc_min, + unsigned long alloc_max); efi_status_t efi_random_get_seed(void); diff --git a/drivers/firmware/efi/libstub/kaslr.c b/drivers/firmware/efi/libstub/kaslr.c index 62d63f7a2645..1a9808012abd 100644 --- a/drivers/firmware/efi/libstub/kaslr.c +++ b/drivers/firmware/efi/libstub/kaslr.c @@ -119,7 +119,7 @@ efi_status_t efi_kaslr_relocate_kernel(unsigned long *image_addr, */ status = efi_random_alloc(*reserve_size, min_kimg_align, reserve_addr, phys_seed, - EFI_LOADER_CODE, EFI_ALLOC_LIMIT); + EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT); if (status != EFI_SUCCESS) efi_warn("efi_random_alloc() failed: 0x%lx\n", status); } else { diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c index 674a064b8f7a..4e96a855fdf4 100644 --- a/drivers/firmware/efi/libstub/randomalloc.c +++ b/drivers/firmware/efi/libstub/randomalloc.c @@ -17,7 +17,7 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md, unsigned long size, unsigned long align_shift, - u64 alloc_limit) + u64 alloc_min, u64 alloc_max) { unsigned long align = 1UL << align_shift; u64 first_slot, last_slot, region_end; @@ -30,11 +30,11 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md, return 0; region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1, - alloc_limit); + alloc_max); if (region_end < size) return 0; - first_slot = round_up(md->phys_addr, align); + first_slot = round_up(max(md->phys_addr, alloc_min), align); last_slot = round_down(region_end - size + 1, align); if (first_slot > last_slot) @@ -56,7 +56,8 @@ efi_status_t efi_random_alloc(unsigned long size, unsigned long *addr, unsigned long random_seed, int memory_type, - unsigned long alloc_limit) + unsigned long alloc_min, + unsigned long alloc_max) { unsigned long total_slots = 0, target_slot; unsigned long total_mirrored_slots = 0; @@ -78,7 +79,8 @@ efi_status_t efi_random_alloc(unsigned long size, efi_memory_desc_t *md = (void *)map->map + map_offset; unsigned long slots; - slots = get_entry_num_slots(md, size, ilog2(align), alloc_limit); + slots = get_entry_num_slots(md, size, ilog2(align), alloc_min, + alloc_max); MD_NUM_SLOTS(md) = slots; total_slots += slots; if (md->attribute & EFI_MEMORY_MORE_RELIABLE) diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c index 0d510c9a06a4..99429bc4b0c7 100644 --- a/drivers/firmware/efi/libstub/x86-stub.c +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -223,8 +223,8 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params) } } -void efi_adjust_memory_range_protection(unsigned long start, - unsigned long size) +efi_status_t efi_adjust_memory_range_protection(unsigned long start, + unsigned long size) { efi_status_t status; efi_gcd_memory_space_desc_t desc; @@ -236,13 +236,17 @@ void efi_adjust_memory_range_protection(unsigned long start, rounded_end = roundup(start + size, EFI_PAGE_SIZE); if (memattr != NULL) { - efi_call_proto(memattr, clear_memory_attributes, rounded_start, - rounded_end - rounded_start, EFI_MEMORY_XP); - return; + status = efi_call_proto(memattr, clear_memory_attributes, + rounded_start, + rounded_end - rounded_start, + EFI_MEMORY_XP); + if (status != EFI_SUCCESS) + efi_warn("Failed to clear EFI_MEMORY_XP attribute\n"); + return status; } if (efi_dxe_table == NULL) - return; + return EFI_SUCCESS; /* * Don't modify memory region attributes, they are @@ -255,7 +259,7 @@ void efi_adjust_memory_range_protection(unsigned long start, status = efi_dxe_call(get_memory_space_descriptor, start, &desc); if (status != EFI_SUCCESS) - return; + break; next = desc.base_address + desc.length; @@ -280,8 +284,10 @@ void efi_adjust_memory_range_protection(unsigned long start, unprotect_start, unprotect_start + unprotect_size, status); + break; } } + return EFI_SUCCESS; } static void setup_unaccepted_memory(void) @@ -793,6 +799,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry) status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr, seed[0], EFI_LOADER_CODE, + LOAD_PHYSICAL_ADDR, EFI_X86_KERNEL_ALLOC_LIMIT); if (status != EFI_SUCCESS) return status; @@ -805,9 +812,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry) *kernel_entry = addr + entry; - efi_adjust_memory_range_protection(addr, kernel_total_size); - - return EFI_SUCCESS; + return efi_adjust_memory_range_protection(addr, kernel_total_size); } static void __noreturn enter_kernel(unsigned long kernel_addr, diff --git a/drivers/firmware/efi/libstub/x86-stub.h b/drivers/firmware/efi/libstub/x86-stub.h index 37c5a36b9d8c..1c20e99a6494 100644 --- a/drivers/firmware/efi/libstub/x86-stub.h +++ b/drivers/firmware/efi/libstub/x86-stub.h @@ -5,8 +5,8 @@ extern void trampoline_32bit_src(void *, bool); extern const u16 trampoline_ljmp_imm_offset; -void efi_adjust_memory_range_protection(unsigned long start, - unsigned long size); +efi_status_t efi_adjust_memory_range_protection(unsigned long start, + unsigned long size); #ifdef CONFIG_X86_64 efi_status_t efi_setup_5level_paging(void); diff --git a/drivers/firmware/efi/libstub/zboot.c b/drivers/firmware/efi/libstub/zboot.c index bdb17eac0cb4..1ceace956758 100644 --- a/drivers/firmware/efi/libstub/zboot.c +++ b/drivers/firmware/efi/libstub/zboot.c @@ -119,7 +119,7 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab) } status = efi_random_alloc(alloc_size, min_kimg_align, &image_base, - seed, EFI_LOADER_CODE, EFI_ALLOC_LIMIT); + seed, EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT); if (status != EFI_SUCCESS) { efi_err("Failed to allocate memory\n"); goto free_cmdline; diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c index 09525fb5c240..01f0f90ea418 100644 --- a/drivers/firmware/efi/riscv-runtime.c +++ b/drivers/firmware/efi/riscv-runtime.c @@ -85,7 +85,7 @@ static int __init riscv_enable_runtime_services(void) efi_memory_desc_t *md; for_each_efi_memory_desc(md) { - int md_size = md->num_pages << EFI_PAGE_SHIFT; + u64 md_size = md->num_pages << EFI_PAGE_SHIFT; struct resource *res; if (!(md->attribute & EFI_MEMORY_SP)) diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 44c8f5743a24..8b3a0f45b574 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1005,15 +1005,15 @@ err_remove_of_chip: err_free_gpiochip_mask: gpiochip_remove_pin_ranges(gc); gpiochip_free_valid_mask(gc); +err_remove_from_list: + spin_lock_irqsave(&gpio_lock, flags); + list_del(&gdev->list); + spin_unlock_irqrestore(&gpio_lock, flags); if (gdev->dev.release) { /* release() has been registered by gpiochip_setup_dev() */ gpio_device_put(gdev); goto err_print_message; } -err_remove_from_list: - spin_lock_irqsave(&gpio_lock, flags); - list_del(&gdev->list); - spin_unlock_irqrestore(&gpio_lock, flags); err_free_label: kfree_const(gdev->label); err_free_descs: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 3d8a48f46b01..79827a6dcd7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -200,6 +200,7 @@ extern uint amdgpu_dc_debug_mask; extern uint amdgpu_dc_visual_confirm; extern uint amdgpu_dm_abm_level; extern int amdgpu_backlight; +extern int amdgpu_damage_clips; extern struct amdgpu_mgpu_info mgpu_info; extern int amdgpu_ras_enable; extern uint amdgpu_ras_mask; @@ -1078,6 +1079,8 @@ struct amdgpu_device { bool in_s3; bool in_s4; bool in_s0ix; + /* indicate amdgpu suspension status */ + bool suspend_complete; enum pp_mp1_state mp1_state; struct amdgpu_doorbell_index doorbell_index; @@ -1547,9 +1550,11 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev, #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev); bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); +void amdgpu_choose_low_power_state(struct amdgpu_device *adev); #else static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; } +static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { } #endif #if defined(CONFIG_DRM_AMD_DC) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 2deebece810e..cc21ed67a330 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -1519,4 +1519,19 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) #endif /* CONFIG_AMD_PMC */ } +/** + * amdgpu_choose_low_power_state + * + * @adev: amdgpu_device_pointer + * + * Choose the target low power state for the GPU + */ +void amdgpu_choose_low_power_state(struct amdgpu_device *adev) +{ + if (amdgpu_acpi_is_s0ix_active(adev)) + adev->in_s0ix = true; + else if (amdgpu_acpi_is_s3_active(adev)) + adev->in_s3 = true; +} + #endif /* CONFIG_SUSPEND */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index fdde7488d0ed..94bdb5fa6ebc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4514,13 +4514,15 @@ int amdgpu_device_prepare(struct drm_device *dev) struct amdgpu_device *adev = drm_to_adev(dev); int i, r; + amdgpu_choose_low_power_state(adev); + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; /* Evict the majority of BOs before starting suspend sequence */ r = amdgpu_device_evict_resources(adev); if (r) - return r; + goto unprepare; for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.valid) @@ -4529,10 +4531,15 @@ int amdgpu_device_prepare(struct drm_device *dev) continue; r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev); if (r) - return r; + goto unprepare; } return 0; + +unprepare: + adev->in_s0ix = adev->in_s3 = false; + + return r; } /** @@ -4569,7 +4576,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); cancel_delayed_work_sync(&adev->delayed_init_work); - flush_delayed_work(&adev->gfx.gfx_off_delay_work); amdgpu_ras_suspend(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 971acf01bea6..586f4d03039d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -211,6 +211,7 @@ int amdgpu_seamless = -1; /* auto */ uint amdgpu_debug_mask; int amdgpu_agp = -1; /* auto */ int amdgpu_wbrf = -1; +int amdgpu_damage_clips = -1; /* auto */ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work); @@ -860,6 +861,18 @@ MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (defau module_param_named(backlight, amdgpu_backlight, bint, 0444); /** + * DOC: damageclips (int) + * Enable or disable damage clips support. If damage clips support is disabled, + * we will force full frame updates, irrespective of what user space sends to + * us. + * + * Defaults to -1 (where it is enabled unless a PSR-SU display is detected). + */ +MODULE_PARM_DESC(damageclips, + "Damage clips support (0 = disable, 1 = enable, -1 auto (default))"); +module_param_named(damageclips, amdgpu_damage_clips, int, 0444); + +/** * DOC: tmz (int) * Trusted Memory Zone (TMZ) is a method to protect data being written * to or read from memory. @@ -2476,6 +2489,7 @@ static int amdgpu_pmops_suspend(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); + adev->suspend_complete = false; if (amdgpu_acpi_is_s0ix_active(adev)) adev->in_s0ix = true; else if (amdgpu_acpi_is_s3_active(adev)) @@ -2490,6 +2504,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); + adev->suspend_complete = true; if (amdgpu_acpi_should_gpu_reset(adev)) return amdgpu_asic_reset(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index b9674c57c436..6ddc8e3360e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -723,8 +723,15 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state) { - schedule_delayed_work(&adev->gfx.gfx_off_delay_work, + /* If going to s2idle, no need to wait */ + if (adev->in_s0ix) { + if (!amdgpu_dpm_set_powergating_by_smu(adev, + AMD_IP_BLOCK_TYPE_GFX, true)) + adev->gfx.gfx_off_state = true; + } else { + schedule_delayed_work(&adev->gfx.gfx_off_delay_work, delay); + } } } else { if (adev->gfx.gfx_off_req_count == 0) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c index 468a67b302d4..ca5c86e5f7cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c @@ -362,7 +362,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size } } - if (copy_to_user((char *)buf, context->mem_context.shared_buf, shared_buf_len)) + if (copy_to_user((char *)&buf[copy_pos], context->mem_context.shared_buf, shared_buf_len)) ret = -EFAULT; err_free_shared_buf: diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 69c500910746..3bc6943365a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3034,6 +3034,14 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev) gfx_v9_0_cp_gfx_enable(adev, true); + /* Now only limit the quirk on the APU gfx9 series and already + * confirmed that the APU gfx10/gfx11 needn't such update. + */ + if (adev->flags & AMD_IS_APU && + adev->in_s3 && !adev->suspend_complete) { + DRM_INFO(" Will skip the CSB packet resubmit\n"); + return 0; + } r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 40a00ea0009f..e67a62db9e12 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1947,14 +1947,6 @@ static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev) static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev) { - static const u32 regBIF_BIOS_SCRATCH_4 = 0x50; - u32 vram_info; - - /* Only for dGPU, vendor informaton is reliable */ - if (!amdgpu_sriov_vf(adev) && !(adev->flags & AMD_IS_APU)) { - vram_info = RREG32(regBIF_BIOS_SCRATCH_4); - adev->gmc.vram_vendor = vram_info & 0xF; - } adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; adev->gmc.vram_width = 128 * 64; } diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index bc38b90f8cf8..88ea58d5c4ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -674,14 +674,6 @@ static int jpeg_v4_0_set_powergating_state(void *handle, return ret; } -static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - unsigned type, - enum amdgpu_interrupt_state state) -{ - return 0; -} - static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned int type, @@ -765,7 +757,6 @@ static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev) } static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = { - .set = jpeg_v4_0_set_interrupt_state, .process = jpeg_v4_0_process_interrupt, }; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c index 6ede85b28cc8..78b74daf4eeb 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c @@ -181,7 +181,6 @@ static int jpeg_v4_0_5_hw_fini(void *handle) RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS)) jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE); } - amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0); return 0; } @@ -516,14 +515,6 @@ static int jpeg_v4_0_5_set_powergating_state(void *handle, return ret; } -static int jpeg_v4_0_5_set_interrupt_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - unsigned type, - enum amdgpu_interrupt_state state) -{ - return 0; -} - static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -603,7 +594,6 @@ static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev) } static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = { - .set = jpeg_v4_0_5_set_interrupt_state, .process = jpeg_v4_0_5_process_interrupt, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index e90f33780803..b4723d68eab0 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -431,6 +431,12 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev) u32 inst_mask; int i; + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = + SOC15_REG_OFFSET( + NBIO, 0, + regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) + << 2; WREG32_SOC15(NBIO, 0, regXCC_DOORBELL_FENCE, 0xff & ~(adev->gfx.xcc_mask)); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 15033efec2ba..c64c01e2944a 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1298,10 +1298,32 @@ static int soc15_common_suspend(void *handle) return soc15_common_hw_fini(adev); } +static bool soc15_need_reset_on_resume(struct amdgpu_device *adev) +{ + u32 sol_reg; + + sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); + + /* Will reset for the following suspend abort cases. + * 1) Only reset limit on APU side, dGPU hasn't checked yet. + * 2) S3 suspend abort and TOS already launched. + */ + if (adev->flags & AMD_IS_APU && adev->in_s3 && + !adev->suspend_complete && + sol_reg) + return true; + + return false; +} + static int soc15_common_resume(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (soc15_need_reset_on_resume(adev)) { + dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n"); + soc15_asic_reset(adev); + } return soc15_common_hw_init(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 48c6efcdeac9..4d7188912edf 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -50,13 +50,13 @@ static const struct amd_ip_funcs soc21_common_ip_funcs; /* SOC21 */ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, }; static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 = { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index d722cbd31783..826bc4f6c8a7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -55,8 +55,8 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, m = get_mqd(mqd); if (has_wa_flag) { - uint32_t wa_mask = minfo->update_flag == UPDATE_FLAG_DBG_WA_ENABLE ? - 0xffff : 0xffffffff; + uint32_t wa_mask = + (minfo->update_flag & UPDATE_FLAG_DBG_WA_ENABLE) ? 0xffff : 0xffffffff; m->compute_static_thread_mgmt_se0 = wa_mask; m->compute_static_thread_mgmt_se1 = wa_mask; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 42d881809dc7..697b6d530d12 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -303,6 +303,15 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, update_cu_mask(mm, mqd, minfo, 0); set_priority(m, q); + if (minfo && KFD_GC_VERSION(mm->dev) >= IP_VERSION(9, 4, 2)) { + if (minfo->update_flag & UPDATE_FLAG_IS_GWS) + m->compute_resource_limits |= + COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK; + else + m->compute_resource_limits &= + ~COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK; + } + q->is_active = QUEUE_IS_ACTIVE(*q); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 677281c0793e..80320b8603fc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -532,6 +532,7 @@ struct queue_properties { enum mqd_update_flag { UPDATE_FLAG_DBG_WA_ENABLE = 1, UPDATE_FLAG_DBG_WA_DISABLE = 2, + UPDATE_FLAG_IS_GWS = 4, /* quirk for gfx9 IP */ }; struct mqd_update_info { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 43eff221eae5..4858112f9a53 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -95,6 +95,7 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd) int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, void *gws) { + struct mqd_update_info minfo = {0}; struct kfd_node *dev = NULL; struct process_queue_node *pqn; struct kfd_process_device *pdd; @@ -146,9 +147,10 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, } pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0; + minfo.update_flag = gws ? UPDATE_FLAG_IS_GWS : 0; return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, - pqn->q, NULL); + pqn->q, &minfo); } void kfd_process_dequeue_from_all_devices(struct kfd_process *p) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index e5f7c92eebcb..6ed2ec381aaa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1638,12 +1638,10 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext, else mode = UNKNOWN_MEMORY_PARTITION_MODE; - if (pcache->cache_level == 2) - pcache->cache_size = pcache_info[cache_type].cache_size * num_xcc; - else if (mode) - pcache->cache_size = pcache_info[cache_type].cache_size / mode; - else - pcache->cache_size = pcache_info[cache_type].cache_size; + pcache->cache_size = pcache_info[cache_type].cache_size; + /* Partition mode only affects L3 cache size */ + if (mode && pcache->cache_level == 3) + pcache->cache_size /= mode; if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE) pcache->cache_type |= HSA_CACHE_TYPE_DATA; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index d292f290cd6e..cf875751971f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1956,7 +1956,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) &adev->dm.dmub_bo_gpu_addr, &adev->dm.dmub_bo_cpu_addr); - if (adev->dm.hpd_rx_offload_wq) { + if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) { for (i = 0; i < adev->dm.dc->caps.max_links; i++) { if (adev->dm.hpd_rx_offload_wq[i].wq) { destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); @@ -5219,6 +5219,7 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, struct drm_plane_state *new_plane_state, struct drm_crtc_state *crtc_state, struct dc_flip_addrs *flip_addrs, + bool is_psr_su, bool *dirty_regions_changed) { struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); @@ -5243,6 +5244,10 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, num_clips = drm_plane_get_damage_clips_count(new_plane_state); clips = drm_plane_get_damage_clips(new_plane_state); + if (num_clips && (!amdgpu_damage_clips || (amdgpu_damage_clips < 0 && + is_psr_su))) + goto ffu; + if (!dm_crtc_state->mpo_requested) { if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS) goto ffu; @@ -6194,7 +6199,9 @@ create_stream_for_sink(struct drm_connector *connector, if (recalculate_timing) { freesync_mode = get_highest_refresh_rate_mode(aconnector, false); drm_mode_copy(&saved_mode, &mode); + saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio; drm_mode_copy(&mode, freesync_mode); + mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio; } else { decide_crtc_timing_for_drm_display_mode( &mode, preferred_mode, scale); @@ -8298,6 +8305,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, fill_dc_dirty_rects(plane, old_plane_state, new_plane_state, new_crtc_state, &bundle->flip_addrs[planes_count], + acrtc_state->stream->link->psr_settings.psr_version == + DC_PSR_VERSION_SU_1, &dirty_rects_changed); /* @@ -10731,11 +10740,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; } - ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); - if (ret) { - DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); - ret = -EINVAL; - goto fail; + if (dc_resource_is_dsc_encoding_supported(dc)) { + ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); + if (ret) { + DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); + ret = -EINVAL; + goto fail; + } } ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c index f2dfa96f9ef5..39530b2ea495 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c @@ -94,7 +94,7 @@ static void calculate_bandwidth( const uint32_t s_high = 7; const uint32_t dmif_chunk_buff_margin = 1; - uint32_t max_chunks_fbc_mode; + uint32_t max_chunks_fbc_mode = 0; int32_t num_cursor_lines; int32_t i, j, k; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 960c4b4f6ddf..05f392501c0a 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1850,19 +1850,21 @@ static enum bp_result get_firmware_info_v3_2( /* Vega12 */ smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2, DATA_TABLES(smu_info)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage); if (!smu_info_v3_2) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage); + info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10; } else if (revision.minor == 3) { /* Vega20 */ smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3, DATA_TABLES(smu_info)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage); if (!smu_info_v3_3) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage); + info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10; } @@ -2422,10 +2424,11 @@ static enum bp_result get_integrated_info_v11( info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11, DATA_TABLES(integratedsysteminfo)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage); if (info_v11 == NULL) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage); + info->gpu_cap_info = le32_to_cpu(info_v11->gpucapinfo); /* @@ -2637,11 +2640,12 @@ static enum bp_result get_integrated_info_v2_1( info_v2_1 = GET_IMAGE(struct atom_integrated_system_info_v2_1, DATA_TABLES(integratedsysteminfo)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage); if (info_v2_1 == NULL) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage); + info->gpu_cap_info = le32_to_cpu(info_v2_1->gpucapinfo); /* @@ -2799,11 +2803,11 @@ static enum bp_result get_integrated_info_v2_2( info_v2_2 = GET_IMAGE(struct atom_integrated_system_info_v2_2, DATA_TABLES(integratedsysteminfo)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage); - if (info_v2_2 == NULL) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage); + info->gpu_cap_info = le32_to_cpu(info_v2_2->gpucapinfo); /* diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c index a5489fe6875f..aa9fd1dc550a 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c @@ -546,6 +546,8 @@ static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_ta int i; for (i = 0; i < VG_NUM_SOC_VOLTAGE_LEVELS; i++) { + if (i >= VG_NUM_DCFCLK_DPM_LEVELS) + break; if (clock_table->SocVoltage[i] == voltage) return clock_table->DcfClocks[i]; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index 14cec1c7b718..e64890259235 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -655,10 +655,13 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1]; uint32_t max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0; uint32_t max_pstate = 0, max_dram_speed_mts = 0, min_dram_speed_mts = 0; + uint32_t num_memps, num_fclk, num_dcfclk; int i; /* Determine min/max p-state values. */ - for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) { + num_memps = (clock_table->NumMemPstatesEnabled > NUM_MEM_PSTATE_LEVELS) ? NUM_MEM_PSTATE_LEVELS : + clock_table->NumMemPstatesEnabled; + for (i = 0; i < num_memps; i++) { uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]); if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts > max_dram_speed_mts) { @@ -670,7 +673,7 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk min_dram_speed_mts = max_dram_speed_mts; min_pstate = max_pstate; - for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) { + for (i = 0; i < num_memps; i++) { uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]); if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts < min_dram_speed_mts) { @@ -699,9 +702,13 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk /* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */ ASSERT(clock_table->NumDcfClkLevelsEnabled > 0); - max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, clock_table->NumFclkLevelsEnabled); + num_fclk = (clock_table->NumFclkLevelsEnabled > NUM_FCLK_DPM_LEVELS) ? NUM_FCLK_DPM_LEVELS : + clock_table->NumFclkLevelsEnabled; + max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, num_fclk); - for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) { + num_dcfclk = (clock_table->NumFclkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS : + clock_table->NumDcfClkLevelsEnabled; + for (i = 0; i < num_dcfclk; i++) { int j; /* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index aa7c02ba948e..2c424e435962 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -3817,7 +3817,9 @@ static void commit_planes_for_stream(struct dc *dc, * programming has completed (we turn on phantom OTG in order * to complete the plane disable for phantom pipes). */ - dc->hwss.apply_ctx_to_hw(dc, context); + + if (dc->hwss.disable_phantom_streams) + dc->hwss.disable_phantom_streams(dc, context); } if (update_type != UPDATE_TYPE_FAST) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index 88c6436b28b6..180ac47868c2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -291,11 +291,14 @@ void dc_state_destruct(struct dc_state *state) dc_stream_release(state->phantom_streams[i]); state->phantom_streams[i] = NULL; } + state->phantom_stream_count = 0; for (i = 0; i < state->phantom_plane_count; i++) { dc_plane_state_release(state->phantom_planes[i]); state->phantom_planes[i] = NULL; } + state->phantom_plane_count = 0; + state->stream_mask = 0; memset(&state->res_ctx, 0, sizeof(state->res_ctx)); memset(&state->pp_display_cfg, 0, sizeof(state->pp_display_cfg)); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c index e43f77c11c00..5f97a868ada3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c @@ -56,16 +56,13 @@ static void dpp3_enable_cm_block( static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base) { - enum dc_lut_mode mode; + enum dc_lut_mode mode = LUT_BYPASS; uint32_t state_mode; uint32_t lut_mode; struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &state_mode); - if (state_mode == 0) - mode = LUT_BYPASS; - if (state_mode == 2) {//Programmable RAM LUT REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &lut_mode); if (lut_mode == 0) diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 6042a5a6a44f..59ade76ffb18 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -72,11 +72,11 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index dd781a20692e..a0a65e099104 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -1288,7 +1288,7 @@ static bool update_pipes_with_split_flags(struct dc *dc, struct dc_state *contex return updated; } -static bool should_allow_odm_power_optimization(struct dc *dc, +static bool should_apply_odm_power_optimization(struct dc *dc, struct dc_state *context, struct vba_vars_st *v, int *split, bool *merge) { @@ -1392,9 +1392,12 @@ static void try_odm_power_optimization_and_revalidate( { int i; unsigned int new_vlevel; + unsigned int cur_policy[MAX_PIPES]; - for (i = 0; i < pipe_cnt; i++) + for (i = 0; i < pipe_cnt; i++) { + cur_policy[i] = pipes[i].pipe.dest.odm_combine_policy; pipes[i].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; + } new_vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); @@ -1403,6 +1406,9 @@ static void try_odm_power_optimization_and_revalidate( memset(merge, 0, MAX_PIPES * sizeof(bool)); *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, new_vlevel, split, merge); context->bw_ctx.dml.vba.VoltageLevel = *vlevel; + } else { + for (i = 0; i < pipe_cnt; i++) + pipes[i].pipe.dest.odm_combine_policy = cur_policy[i]; } } @@ -1580,7 +1586,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, } } - if (should_allow_odm_power_optimization(dc, context, vba, split, merge)) + if (should_apply_odm_power_optimization(dc, context, vba, split, merge)) try_odm_power_optimization_and_revalidate( dc, context, pipes, split, merge, vlevel, *pipe_cnt); @@ -2209,7 +2215,8 @@ bool dcn32_internal_validate_bw(struct dc *dc, int i; pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); - dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes); + if (!dc->config.enable_windowed_mpo_odm) + dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes); /* repopulate_pipes = 1 means the pipes were either split or merged. In this case * we have to re-calculate the DET allocation and run through DML once more to @@ -2753,7 +2760,7 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk struct _vcs_dpi_voltage_scaling_st entry = {0}; struct clk_limit_table_entry max_clk_data = {0}; - unsigned int min_dcfclk_mhz = 399, min_fclk_mhz = 599; + unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299; static const unsigned int num_dcfclk_stas = 5; unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {199, 615, 906, 1324, 1564}; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 2352428bcea3..01493c49bd7a 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -1476,7 +1476,7 @@ static enum dc_status dce110_enable_stream_timing( return DC_OK; } -static enum dc_status apply_single_controller_ctx_to_hw( +enum dc_status dce110_apply_single_controller_ctx_to_hw( struct pipe_ctx *pipe_ctx, struct dc_state *context, struct dc *dc) @@ -2302,7 +2302,7 @@ enum dc_status dce110_apply_ctx_to_hw( if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe) continue; - status = apply_single_controller_ctx_to_hw( + status = dce110_apply_single_controller_ctx_to_hw( pipe_ctx, context, dc); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h index 08028a1779ae..ed3cc3648e8e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h @@ -39,6 +39,10 @@ enum dc_status dce110_apply_ctx_to_hw( struct dc *dc, struct dc_state *context); +enum dc_status dce110_apply_single_controller_ctx_to_hw( + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct dc *dc); void dce110_enable_stream(struct pipe_ctx *pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index 4853ecac53f9..931ac8ed7069 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -2561,7 +2561,7 @@ void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx) tg->funcs->setup_vertical_interrupt2(tg, start_line); } -static void dcn20_reset_back_end_for_pipe( +void dcn20_reset_back_end_for_pipe( struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h index b94c85340abf..d950b3e54ec2 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h @@ -84,6 +84,10 @@ enum dc_status dcn20_enable_stream_timing( void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx); void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx); void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_reset_back_end_for_pipe( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); void dcn20_init_blank( struct dc *dc, struct timing_generator *tg); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c index 8e88dcaf88f5..7252f5f781f0 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c @@ -206,28 +206,32 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx) void dcn21_set_pipe(struct pipe_ctx *pipe_ctx) { struct abm *abm = pipe_ctx->stream_res.abm; - uint32_t otg_inst = pipe_ctx->stream_res.tg->inst; + struct timing_generator *tg = pipe_ctx->stream_res.tg; struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu; + uint32_t otg_inst; + + if (!abm || !tg || !panel_cntl) + return; + + otg_inst = tg->inst; if (dmcu) { dce110_set_pipe(pipe_ctx); return; } - if (abm && panel_cntl) { - if (abm->funcs && abm->funcs->set_pipe_ex) { - abm->funcs->set_pipe_ex(abm, + if (abm->funcs && abm->funcs->set_pipe_ex) { + abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst, panel_cntl->pwrseq_inst); - } else { - dmub_abm_set_pipe(abm, otg_inst, - SET_ABM_PIPE_NORMAL, - panel_cntl->inst, - panel_cntl->pwrseq_inst); - } + } else { + dmub_abm_set_pipe(abm, otg_inst, + SET_ABM_PIPE_NORMAL, + panel_cntl->inst, + panel_cntl->pwrseq_inst); } } @@ -237,34 +241,35 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, { struct dc_context *dc = pipe_ctx->stream->ctx; struct abm *abm = pipe_ctx->stream_res.abm; + struct timing_generator *tg = pipe_ctx->stream_res.tg; struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; + uint32_t otg_inst; + + if (!abm || !tg || !panel_cntl) + return false; + + otg_inst = tg->inst; if (dc->dc->res_pool->dmcu) { dce110_set_backlight_level(pipe_ctx, backlight_pwm_u16_16, frame_ramp); return true; } - if (abm != NULL) { - uint32_t otg_inst = pipe_ctx->stream_res.tg->inst; - - if (abm && panel_cntl) { - if (abm->funcs && abm->funcs->set_pipe_ex) { - abm->funcs->set_pipe_ex(abm, - otg_inst, - SET_ABM_PIPE_NORMAL, - panel_cntl->inst, - panel_cntl->pwrseq_inst); - } else { - dmub_abm_set_pipe(abm, - otg_inst, - SET_ABM_PIPE_NORMAL, - panel_cntl->inst, - panel_cntl->pwrseq_inst); - } - } + if (abm->funcs && abm->funcs->set_pipe_ex) { + abm->funcs->set_pipe_ex(abm, + otg_inst, + SET_ABM_PIPE_NORMAL, + panel_cntl->inst, + panel_cntl->pwrseq_inst); + } else { + dmub_abm_set_pipe(abm, + otg_inst, + SET_ABM_PIPE_NORMAL, + panel_cntl->inst, + panel_cntl->pwrseq_inst); } - if (abm && abm->funcs && abm->funcs->set_backlight_level_pwm) + if (abm->funcs && abm->funcs->set_backlight_level_pwm) abm->funcs->set_backlight_level_pwm(abm, backlight_pwm_u16_16, frame_ramp, 0, panel_cntl->inst); else diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index 6c9299c7683d..aa36d7a56ca8 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -1474,9 +1474,44 @@ void dcn32_update_dsc_pg(struct dc *dc, } } +void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context) +{ + struct dce_hwseq *hws = dc->hwseq; + int i; + + for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { + struct pipe_ctx *pipe_ctx_old = + &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (!pipe_ctx_old->stream) + continue; + + if (dc_state_get_pipe_subvp_type(dc->current_state, pipe_ctx_old) != SUBVP_PHANTOM) + continue; + + if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) + continue; + + if (!pipe_ctx->stream || pipe_need_reprogram(pipe_ctx_old, pipe_ctx) || + (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)) { + struct clock_source *old_clk = pipe_ctx_old->clock_source; + + if (hws->funcs.reset_back_end_for_pipe) + hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); + if (hws->funcs.enable_stream_gating) + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); + if (old_clk) + old_clk->funcs->cs_power_down(old_clk); + } + } +} + void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context) { unsigned int i; + enum dc_status status = DC_OK; + struct dce_hwseq *hws = dc->hwseq; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; @@ -1497,16 +1532,39 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context) } } for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; - - if (new_pipe->stream && dc_state_get_pipe_subvp_type(context, new_pipe) == SUBVP_PHANTOM) { - // If old context or new context has phantom pipes, apply - // the phantom timings now. We can't change the phantom - // pipe configuration safely without driver acquiring - // the DMCUB lock first. - dc->hwss.apply_ctx_to_hw(dc, context); - break; + struct pipe_ctx *pipe_ctx_old = + &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->stream == NULL) + continue; + + if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) + continue; + + if (pipe_ctx->stream == pipe_ctx_old->stream && + pipe_ctx->stream->link->link_state_valid) { + continue; } + + if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) + continue; + + if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe) + continue; + + if (hws->funcs.apply_single_controller_ctx_to_hw) + status = hws->funcs.apply_single_controller_ctx_to_hw( + pipe_ctx, + context, + dc); + + ASSERT(status == DC_OK); + +#ifdef CONFIG_DRM_AMD_DC_FP + if (hws->funcs.resync_fifo_dccg_dio) + hws->funcs.resync_fifo_dccg_dio(hws, dc, context); +#endif } } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h index cecf7f0f5671..069e20bc87c0 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h @@ -111,6 +111,8 @@ void dcn32_update_dsc_pg(struct dc *dc, void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context); +void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context); + void dcn32_init_blank( struct dc *dc, struct timing_generator *tg); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c index 427cfc8c24a4..e8ac94a005b8 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c @@ -109,6 +109,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .get_dcc_en_bits = dcn10_get_dcc_en_bits, .commit_subvp_config = dcn32_commit_subvp_config, .enable_phantom_streams = dcn32_enable_phantom_streams, + .disable_phantom_streams = dcn32_disable_phantom_streams, .subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock, .update_visual_confirm_color = dcn10_update_visual_confirm_color, .subvp_pipe_control_lock_fast = dcn32_subvp_pipe_control_lock_fast, @@ -159,6 +160,8 @@ static const struct hwseq_private_funcs dcn32_private_funcs = { .set_pixels_per_cycle = dcn32_set_pixels_per_cycle, .resync_fifo_dccg_dio = dcn32_resync_fifo_dccg_dio, .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy, + .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw, + .reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe, }; void dcn32_hw_sequencer_init_functions(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index a54399383318..64ca7c66509b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -379,6 +379,7 @@ struct hw_sequencer_funcs { struct dc_cursor_attributes *cursor_attr); void (*commit_subvp_config)(struct dc *dc, struct dc_state *context); void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context); + void (*disable_phantom_streams)(struct dc *dc, struct dc_state *context); void (*subvp_pipe_control_lock)(struct dc *dc, struct dc_state *context, bool lock, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h index 6137cf09aa54..b3c62a82cb1c 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h @@ -165,8 +165,15 @@ struct hwseq_private_funcs { void (*set_pixels_per_cycle)(struct pipe_ctx *pipe_ctx); void (*resync_fifo_dccg_dio)(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context); + enum dc_status (*apply_single_controller_ctx_to_hw)( + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct dc *dc); bool (*is_dp_dig_pixel_rate_div_policy)(struct pipe_ctx *pipe_ctx); #endif + void (*reset_back_end_for_pipe)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); }; struct dce_hwseq { diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index c958ef37b78a..77a60aa9f27b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -427,22 +427,18 @@ struct pipe_ctx *resource_get_primary_dpp_pipe(const struct pipe_ctx *dpp_pipe); int resource_get_mpc_slice_index(const struct pipe_ctx *dpp_pipe); /* - * Get number of MPC "cuts" of the plane associated with the pipe. MPC slice - * count is equal to MPC splits + 1. For example if a plane is cut 3 times, it - * will have 4 pieces of slice. - * return - 0 if pipe is not used for a plane with MPCC combine. otherwise - * the number of MPC "cuts" for the plane. + * Get the number of MPC slices associated with the pipe. + * The function returns 0 if the pipe is not associated with an MPC combine + * pipe topology. */ -int resource_get_mpc_slice_count(const struct pipe_ctx *opp_head); +int resource_get_mpc_slice_count(const struct pipe_ctx *pipe); /* - * Get number of ODM "cuts" of the timing associated with the pipe. ODM slice - * count is equal to ODM splits + 1. For example if a timing is cut 3 times, it - * will have 4 pieces of slice. - * return - 0 if pipe is not used for ODM combine. otherwise - * the number of ODM "cuts" for the timing. + * Get the number of ODM slices associated with the pipe. + * The function returns 0 if the pipe is not associated with an ODM combine + * pipe topology. */ -int resource_get_odm_slice_count(const struct pipe_ctx *otg_master); +int resource_get_odm_slice_count(const struct pipe_ctx *pipe); /* Get the ODM slice index counting from 0 from left most slice */ int resource_get_odm_slice_index(const struct pipe_ctx *opp_head); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c index 8fe66c367850..5b0bc7f6a188 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c @@ -361,7 +361,7 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un struct dc_link *dpia_link[MAX_DPIA_NUM] = {0}; int num_dpias = 0; - for (uint8_t i = 0; i < num_streams; ++i) { + for (unsigned int i = 0; i < num_streams; ++i) { if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) { /* new dpia sst stream, check whether it exceeds max dpia */ if (num_dpias >= MAX_DPIA_NUM) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index 5a0b04518956..16a62e018712 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -517,6 +517,7 @@ enum link_training_result dp_check_link_loss_status( { enum link_training_result status = LINK_TRAINING_SUCCESS; union lane_status lane_status; + union lane_align_status_updated dpcd_lane_status_updated; uint8_t dpcd_buf[6] = {0}; uint32_t lane; @@ -532,10 +533,12 @@ enum link_training_result dp_check_link_loss_status( * check lanes status */ lane_status.raw = dp_get_nibble_at_index(&dpcd_buf[2], lane); + dpcd_lane_status_updated.raw = dpcd_buf[4]; if (!lane_status.bits.CHANNEL_EQ_DONE_0 || !lane_status.bits.CR_DONE_0 || - !lane_status.bits.SYMBOL_LOCKED_0) { + !lane_status.bits.SYMBOL_LOCKED_0 || + !dp_is_interlane_aligned(dpcd_lane_status_updated)) { /* if one of the channel equalization, clock * recovery or symbol lock is dropped * consider it as (link has been diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c index e8dda44b23cb..5d36bab0029c 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c @@ -619,7 +619,7 @@ static enum link_training_result dpia_training_eq_non_transparent( uint32_t retries_eq = 0; enum dc_status status; enum dc_dp_training_pattern tr_pattern; - uint32_t wait_time_microsec; + uint32_t wait_time_microsec = 0; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_align_status_updated dpcd_lane_status_updated = {0}; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c index 511ff6b5b985..7538b548c572 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c @@ -999,7 +999,7 @@ static struct stream_encoder *dcn301_stream_encoder_create(enum engine_id eng_id vpg = dcn301_vpg_create(ctx, vpg_inst); afmt = dcn301_afmt_create(ctx, afmt_inst); - if (!enc1 || !vpg || !afmt) { + if (!enc1 || !vpg || !afmt || eng_id >= ARRAY_SIZE(stream_enc_regs)) { kfree(enc1); kfree(vpg); kfree(afmt); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index c4d71e7f18af..6f10052caeef 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -1829,7 +1829,21 @@ int dcn32_populate_dml_pipes_from_context( dcn32_zero_pipe_dcc_fraction(pipes, pipe_cnt); DC_FP_END(); pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; - pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; + if (dc->config.enable_windowed_mpo_odm && + dc->debug.enable_single_display_2to1_odm_policy) { + switch (resource_get_odm_slice_count(pipe)) { + case 2: + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; + break; + case 4: + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1; + break; + default: + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; + } + } else { + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; + } pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_19; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index 761ec9891875..5fdcda8f8602 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -780,8 +780,8 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_z10 = false, .ignore_pg = true, .psp_disabled_wa = true, - .ips2_eval_delay_us = 200, - .ips2_entry_delay_us = 400, + .ips2_eval_delay_us = 2000, + .ips2_entry_delay_us = 800, .static_screen_wait_frames = 2, }; @@ -2130,6 +2130,7 @@ static bool dcn35_resource_construct( dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; dc->dml2_options.use_native_pstate_optimization = true; dc->dml2_options.use_native_soc_bb_construction = true; + dc->dml2_options.minimize_dispclk_using_odm = false; if (dc->config.EnableMinDispClkODM) dc->dml2_options.minimize_dispclk_using_odm = true; dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm; diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c index f57e6d74fb0e..c1a99bf4dffd 100644 --- a/drivers/gpu/drm/drm_buddy.c +++ b/drivers/gpu/drm/drm_buddy.c @@ -539,6 +539,12 @@ static int __alloc_range(struct drm_buddy *mm, } while (1); list_splice_tail(&allocated, blocks); + + if (total_allocated < size) { + err = -ENOSPC; + goto err_free; + } + return 0; err_undo: diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index cb90e70d85e8..65f9f66933bb 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -904,6 +904,7 @@ out: connector_set = NULL; fb = NULL; mode = NULL; + num_connectors = 0; DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 834a5e28abbe..7352bde299d5 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -820,7 +820,7 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, if (max_segment == 0) max_segment = UINT_MAX; err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0, - nr_pages << PAGE_SHIFT, + (unsigned long)nr_pages << PAGE_SHIFT, max_segment, GFP_KERNEL); if (err) { kfree(sg); diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index b5d6e3352071..3089029abba4 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -140,7 +140,7 @@ config DRM_I915_GVT_KVMGT Note that this driver only supports newer device from Broadwell on. For further information and setup guide, you can visit: - http://01.org/igvt-g. + https://github.com/intel/gvt-linux/wiki. If in doubt, say "N". diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index f5ef95da5534..ae647d03af25 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -2355,6 +2355,9 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp, limits->min_rate = intel_dp_common_rate(intel_dp, 0); limits->max_rate = intel_dp_max_link_rate(intel_dp); + /* FIXME 128b/132b SST support missing */ + limits->max_rate = min(limits->max_rate, 810000); + limits->min_lane_count = 1; limits->max_lane_count = intel_dp_max_lane_count(intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h index 64f440fdc22b..8b21dc8e26d5 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h +++ b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h @@ -51,8 +51,8 @@ #define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00) #define _DSCA_PPS_0 0x6B200 #define _DSCC_PPS_0 0x6BA00 -#define DSCA_PPS(pps) _MMIO(_DSCA_PPS_0 + (pps) * 4) -#define DSCC_PPS(pps) _MMIO(_DSCC_PPS_0 + (pps) * 4) +#define DSCA_PPS(pps) _MMIO(_DSCA_PPS_0 + ((pps) < 12 ? (pps) : (pps) + 12) * 4) +#define DSCC_PPS(pps) _MMIO(_DSCC_PPS_0 + ((pps) < 12 ? (pps) : (pps) + 12) * 4) #define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270 #define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370 #define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470 diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 90f6c1ece57d..efcb00472be2 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -2849,8 +2849,7 @@ static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset, for (i = start; i < end; i += 4) { p = intel_gvt_find_mmio_info(gvt, i); if (p) { - WARN(1, "dup mmio definition offset %x\n", - info->offset); + WARN(1, "dup mmio definition offset %x\n", i); /* We return -EEXIST here to make GVT-g load fail. * So duplicated MMIO can be found as soon as diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index e98b6d69a91a..9b6d87c8b583 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -41,7 +41,7 @@ * To virtualize GPU resources GVT-g driver depends on hypervisor technology * e.g KVM/VFIO/mdev, Xen, etc. to provide resource access trapping capability * and be virtualized within GVT-g device module. More architectural design - * doc is available on https://01.org/group/2230/documentation-list. + * doc is available on https://github.com/intel/gvt-linux/wiki. */ static LIST_HEAD(intel_gvt_devices); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index c0bc924cd302..c9c55e2ea584 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -1287,7 +1287,7 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu) gpu->ubwc_config.highest_bank_bit = 15; if (adreno_is_a610(gpu)) { - gpu->ubwc_config.highest_bank_bit = 14; + gpu->ubwc_config.highest_bank_bit = 13; gpu->ubwc_config.min_acc_len = 1; gpu->ubwc_config.ubwc_mode = 1; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 83380bc92a00..6a4b489d44e5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -144,10 +144,6 @@ enum dpu_enc_rc_states { * to track crtc in the disable() hook which is called * _after_ encoder_mask is cleared. * @connector: If a mode is set, cached pointer to the active connector - * @crtc_kickoff_cb: Callback into CRTC that will flush & start - * all CTL paths - * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb - * @debugfs_root: Debug file system root file node * @enc_lock: Lock around physical encoder * create/destroy/enable/disable * @frame_busy_mask: Bitmask tracking which phys_enc we are still @@ -2072,7 +2068,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) } /* reset the merge 3D HW block */ - if (phys_enc->hw_pp->merge_3d) { + if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) { phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, BLEND_3D_NONE); if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d) @@ -2103,7 +2099,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) if (phys_enc->hw_wb) intf_cfg.wb = phys_enc->hw_wb->idx; - if (phys_enc->hw_pp->merge_3d) + if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx; if (ctl->ops.reset_intf_cfg) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index b58a9c2ae326..724537ab776d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -29,7 +29,6 @@ static inline bool reserved_by_other(uint32_t *res_map, int idx, /** * struct dpu_rm_requirements - Reservation requirements parameter bundle * @topology: selected topology for the display - * @hw_res: Hardware resources required as reported by the encoders */ struct dpu_rm_requirements { struct msm_display_topology topology; @@ -204,6 +203,8 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) * _dpu_rm_get_lm_peer - get the id of a mixer which is a peer of the primary * @rm: dpu resource manager handle * @primary_idx: index of primary mixer in rm->mixer_blks[] + * + * Returns: lm peer mixed id on success or %-EINVAL on error */ static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx) { diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index 77a8d9366ed7..fb588fde298a 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -135,11 +135,6 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl) tbd = dp_link_get_test_bits_depth(ctrl->link, ctrl->panel->dp_mode.bpp); - if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN) { - pr_debug("BIT_DEPTH not set. Configure default\n"); - tbd = DP_TEST_BIT_DEPTH_8; - } - config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT; /* Num of Lanes */ diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c index 98427d45e9a7..49dfac1fd1ef 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.c +++ b/drivers/gpu/drm/msm/dp/dp_link.c @@ -7,6 +7,7 @@ #include <drm/drm_print.h> +#include "dp_reg.h" #include "dp_link.h" #include "dp_panel.h" @@ -1082,7 +1083,7 @@ int dp_link_process_request(struct dp_link *dp_link) int dp_link_get_colorimetry_config(struct dp_link *dp_link) { - u32 cc; + u32 cc = DP_MISC0_COLORIMERY_CFG_LEGACY_RGB; struct dp_link_private *link; if (!dp_link) { @@ -1096,10 +1097,11 @@ int dp_link_get_colorimetry_config(struct dp_link *dp_link) * Unless a video pattern CTS test is ongoing, use RGB_VESA * Only RGB_VESA and RGB_CEA supported for now */ - if (dp_link_is_video_pattern_requested(link)) - cc = link->dp_link.test_video.test_dyn_range; - else - cc = DP_TEST_DYNAMIC_RANGE_VESA; + if (dp_link_is_video_pattern_requested(link)) { + if (link->dp_link.test_video.test_dyn_range & + DP_TEST_DYNAMIC_RANGE_CEA) + cc = DP_MISC0_COLORIMERY_CFG_CEA_RGB; + } return cc; } @@ -1179,6 +1181,9 @@ void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link) u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp) { u32 tbd; + struct dp_link_private *link; + + link = container_of(dp_link, struct dp_link_private, dp_link); /* * Few simplistic rules and assumptions made here: @@ -1196,12 +1201,13 @@ u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp) tbd = DP_TEST_BIT_DEPTH_10; break; default: - tbd = DP_TEST_BIT_DEPTH_UNKNOWN; + drm_dbg_dp(link->drm_dev, "bpp=%d not supported, use bpc=8\n", + bpp); + tbd = DP_TEST_BIT_DEPTH_8; break; } - if (tbd != DP_TEST_BIT_DEPTH_UNKNOWN) - tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT); + tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT); return tbd; } diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h index ea85a691e72b..78785ed4b40c 100644 --- a/drivers/gpu/drm/msm/dp/dp_reg.h +++ b/drivers/gpu/drm/msm/dp/dp_reg.h @@ -143,6 +143,9 @@ #define DP_MISC0_COLORIMETRY_CFG_SHIFT (0x00000001) #define DP_MISC0_TEST_BITS_DEPTH_SHIFT (0x00000005) +#define DP_MISC0_COLORIMERY_CFG_LEGACY_RGB (0) +#define DP_MISC0_COLORIMERY_CFG_CEA_RGB (0x04) + #define REG_DP_VALID_BOUNDARY (0x00000030) #define REG_DP_VALID_BOUNDARY_2 (0x00000034) diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index 5f68e31a3e4e..0915f3b68752 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -26,7 +26,7 @@ int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) { void *vaddr; - vaddr = msm_gem_get_vaddr(obj); + vaddr = msm_gem_get_vaddr_locked(obj); if (IS_ERR(vaddr)) return PTR_ERR(vaddr); iosys_map_set_vaddr(map, vaddr); @@ -36,7 +36,7 @@ int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map) { - msm_gem_put_vaddr(obj); + msm_gem_put_vaddr_locked(obj); } struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 095390774f22..655002b21b0d 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -751,12 +751,14 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) struct msm_ringbuffer *ring = submit->ring; unsigned long flags; - pm_runtime_get_sync(&gpu->pdev->dev); + WARN_ON(!mutex_is_locked(&gpu->lock)); - mutex_lock(&gpu->lock); + pm_runtime_get_sync(&gpu->pdev->dev); msm_gpu_hw_init(gpu); + submit->seqno = submit->hw_fence->seqno; + update_sw_cntrs(gpu); /* @@ -781,11 +783,8 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) gpu->funcs->submit(gpu, submit); gpu->cur_ctx_seqno = submit->queue->ctx->seqno; - hangcheck_timer_reset(gpu); - - mutex_unlock(&gpu->lock); - pm_runtime_put(&gpu->pdev->dev); + hangcheck_timer_reset(gpu); } /* diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 5cc8d358cc97..d5512037c38b 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -21,6 +21,8 @@ struct msm_iommu_pagetable { struct msm_mmu base; struct msm_mmu *parent; struct io_pgtable_ops *pgtbl_ops; + const struct iommu_flush_ops *tlb; + struct device *iommu_dev; unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ phys_addr_t ttbr; u32 asid; @@ -201,11 +203,33 @@ static const struct msm_mmu_funcs pagetable_funcs = { static void msm_iommu_tlb_flush_all(void *cookie) { + struct msm_iommu_pagetable *pagetable = cookie; + struct adreno_smmu_priv *adreno_smmu; + + if (!pm_runtime_get_if_in_use(pagetable->iommu_dev)) + return; + + adreno_smmu = dev_get_drvdata(pagetable->parent->dev); + + pagetable->tlb->tlb_flush_all((void *)adreno_smmu->cookie); + + pm_runtime_put_autosuspend(pagetable->iommu_dev); } static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, void *cookie) { + struct msm_iommu_pagetable *pagetable = cookie; + struct adreno_smmu_priv *adreno_smmu; + + if (!pm_runtime_get_if_in_use(pagetable->iommu_dev)) + return; + + adreno_smmu = dev_get_drvdata(pagetable->parent->dev); + + pagetable->tlb->tlb_flush_walk(iova, size, granule, (void *)adreno_smmu->cookie); + + pm_runtime_put_autosuspend(pagetable->iommu_dev); } static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, @@ -213,7 +237,7 @@ static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, { } -static const struct iommu_flush_ops null_tlb_ops = { +static const struct iommu_flush_ops tlb_ops = { .tlb_flush_all = msm_iommu_tlb_flush_all, .tlb_flush_walk = msm_iommu_tlb_flush_walk, .tlb_add_page = msm_iommu_tlb_add_page, @@ -254,10 +278,10 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) /* The incoming cfg will have the TTBR1 quirk enabled */ ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1; - ttbr0_cfg.tlb = &null_tlb_ops; + ttbr0_cfg.tlb = &tlb_ops; pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1, - &ttbr0_cfg, iommu->domain); + &ttbr0_cfg, pagetable); if (!pagetable->pgtbl_ops) { kfree(pagetable); @@ -279,6 +303,8 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) /* Needed later for TLB flush */ pagetable->parent = parent; + pagetable->tlb = ttbr1_cfg->tlb; + pagetable->iommu_dev = ttbr1_cfg->iommu_dev; pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap; pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr; diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c index 455b2e3a0cdd..35423d10aafa 100644 --- a/drivers/gpu/drm/msm/msm_mdss.c +++ b/drivers/gpu/drm/msm/msm_mdss.c @@ -562,6 +562,7 @@ static const struct msm_mdss_data sdm670_data = { .ubwc_enc_version = UBWC_2_0, .ubwc_dec_version = UBWC_2_0, .highest_bank_bit = 1, + .reg_bus_bw = 76800, }; static const struct msm_mdss_data sdm845_data = { diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 4bc13f7d005a..9d6655f96f0c 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -21,8 +21,6 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job) msm_fence_init(submit->hw_fence, fctx); - submit->seqno = submit->hw_fence->seqno; - mutex_lock(&priv->lru.lock); for (i = 0; i < submit->nr_bos; i++) { @@ -35,8 +33,13 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job) mutex_unlock(&priv->lru.lock); + /* TODO move submit path over to using a per-ring lock.. */ + mutex_lock(&gpu->lock); + msm_gpu_submit(gpu, submit); + mutex_unlock(&gpu->lock); + return dma_fence_get(submit->hw_fence); } diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index d1437c08645f..6f5d376d8fcc 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -9,7 +9,7 @@ #define GSP_PAGE_SIZE BIT(GSP_PAGE_SHIFT) struct nvkm_gsp_mem { - u32 size; + size_t size; void *data; dma_addr_t addr; }; diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index a04156ca8390..d1bb8151a1df 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -128,12 +128,14 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16, struct nouveau_abi16_ntfy *ntfy, *temp; /* Cancel all jobs from the entity's queue. */ - drm_sched_entity_fini(&chan->sched.entity); + if (chan->sched) + drm_sched_entity_fini(&chan->sched->entity); if (chan->chan) nouveau_channel_idle(chan->chan); - nouveau_sched_fini(&chan->sched); + if (chan->sched) + nouveau_sched_destroy(&chan->sched); /* cleanup notifier state */ list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) { @@ -337,10 +339,16 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) if (ret) goto done; - ret = nouveau_sched_init(&chan->sched, drm, drm->sched_wq, - chan->chan->dma.ib_max); - if (ret) - goto done; + /* If we're not using the VM_BIND uAPI, we don't need a scheduler. + * + * The client lock is already acquired by nouveau_abi16_get(). + */ + if (nouveau_cli_uvmm(cli)) { + ret = nouveau_sched_create(&chan->sched, drm, drm->sched_wq, + chan->chan->dma.ib_max); + if (ret) + goto done; + } init->channel = chan->chan->chid; diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h index 1f5e243c0c75..11c8c4a80079 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.h +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h @@ -26,7 +26,7 @@ struct nouveau_abi16_chan { struct nouveau_bo *ntfy; struct nouveau_vma *ntfy_vma; struct nvkm_mm heap; - struct nouveau_sched sched; + struct nouveau_sched *sched; }; struct nouveau_abi16 { diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 6f6c31a9937b..a947e1d5f309 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -201,7 +201,8 @@ nouveau_cli_fini(struct nouveau_cli *cli) WARN_ON(!list_empty(&cli->worker)); usif_client_fini(cli); - nouveau_sched_fini(&cli->sched); + if (cli->sched) + nouveau_sched_destroy(&cli->sched); if (uvmm) nouveau_uvmm_fini(uvmm); nouveau_vmm_fini(&cli->svm); @@ -311,7 +312,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname, cli->mem = &mems[ret]; /* Don't pass in the (shared) sched_wq in order to let - * nouveau_sched_init() create a dedicated one for VM_BIND jobs. + * nouveau_sched_create() create a dedicated one for VM_BIND jobs. * * This is required to ensure that for VM_BIND jobs free_job() work and * run_job() work can always run concurrently and hence, free_job() work @@ -320,7 +321,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname, * locks which indirectly or directly are held for allocations * elsewhere. */ - ret = nouveau_sched_init(&cli->sched, drm, NULL, 1); + ret = nouveau_sched_create(&cli->sched, drm, NULL, 1); if (ret) goto done; diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 8a6d94c8b163..e239c6bf4afa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -98,7 +98,7 @@ struct nouveau_cli { bool disabled; } uvmm; - struct nouveau_sched sched; + struct nouveau_sched *sched; const struct nvif_mclass *mem; diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c index bc5d71b79ab2..e65c0ef23bc7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_exec.c +++ b/drivers/gpu/drm/nouveau/nouveau_exec.c @@ -389,7 +389,7 @@ nouveau_exec_ioctl_exec(struct drm_device *dev, if (ret) goto out; - args.sched = &chan16->sched; + args.sched = chan16->sched; args.file_priv = file_priv; args.chan = chan; diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c index dd98f6910f9c..32fa2e273965 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.c +++ b/drivers/gpu/drm/nouveau/nouveau_sched.c @@ -398,7 +398,7 @@ static const struct drm_sched_backend_ops nouveau_sched_ops = { .free_job = nouveau_sched_free_job, }; -int +static int nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm, struct workqueue_struct *wq, u32 credit_limit) { @@ -453,7 +453,30 @@ fail_wq: return ret; } -void +int +nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm, + struct workqueue_struct *wq, u32 credit_limit) +{ + struct nouveau_sched *sched; + int ret; + + sched = kzalloc(sizeof(*sched), GFP_KERNEL); + if (!sched) + return -ENOMEM; + + ret = nouveau_sched_init(sched, drm, wq, credit_limit); + if (ret) { + kfree(sched); + return ret; + } + + *psched = sched; + + return 0; +} + + +static void nouveau_sched_fini(struct nouveau_sched *sched) { struct drm_gpu_scheduler *drm_sched = &sched->base; @@ -471,3 +494,14 @@ nouveau_sched_fini(struct nouveau_sched *sched) if (sched->wq) destroy_workqueue(sched->wq); } + +void +nouveau_sched_destroy(struct nouveau_sched **psched) +{ + struct nouveau_sched *sched = *psched; + + nouveau_sched_fini(sched); + kfree(sched); + + *psched = NULL; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h index a6528f5981e6..e1f01a23e6f6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.h +++ b/drivers/gpu/drm/nouveau/nouveau_sched.h @@ -111,8 +111,8 @@ struct nouveau_sched { } job; }; -int nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm, - struct workqueue_struct *wq, u32 credit_limit); -void nouveau_sched_fini(struct nouveau_sched *sched); +int nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm, + struct workqueue_struct *wq, u32 credit_limit); +void nouveau_sched_destroy(struct nouveau_sched **psched); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index cc03e0c22ff3..5e4565c5011a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -1011,7 +1011,7 @@ nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id) if (ret) return ret; - buffer->fault = kvcalloc(sizeof(*buffer->fault), buffer->entries, GFP_KERNEL); + buffer->fault = kvcalloc(buffer->entries, sizeof(*buffer->fault), GFP_KERNEL); if (!buffer->fault) return -ENOMEM; diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index 4f223c972c6a..0a0a11dc9ec0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -1740,7 +1740,7 @@ nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev, if (ret) return ret; - args.sched = &cli->sched; + args.sched = cli->sched; args.file_priv = file_priv; ret = nouveau_uvmm_vm_bind(&args); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index 5e1fa176aac4..a41735ab6068 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -997,6 +997,32 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp) return 0; } +static void +nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem) +{ + if (mem->data) { + /* + * Poison the buffer to catch any unexpected access from + * GSP-RM if the buffer was prematurely freed. + */ + memset(mem->data, 0xFF, mem->size); + + dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr); + memset(mem, 0, sizeof(*mem)); + } +} + +static int +nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem) +{ + mem->size = size; + mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL); + if (WARN_ON(!mem->data)) + return -ENOMEM; + + return 0; +} + static int r535_gsp_postinit(struct nvkm_gsp *gsp) { @@ -1024,6 +1050,13 @@ r535_gsp_postinit(struct nvkm_gsp *gsp) nvkm_inth_allow(&gsp->subdev.inth); nvkm_wr32(device, 0x110004, 0x00000040); + + /* Release the DMA buffers that were needed only for boot and init */ + nvkm_gsp_mem_dtor(gsp, &gsp->boot.fw); + nvkm_gsp_mem_dtor(gsp, &gsp->libos); + nvkm_gsp_mem_dtor(gsp, &gsp->rmargs); + nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta); + return ret; } @@ -1532,27 +1565,6 @@ r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc) return 0; } -static void -nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem) -{ - if (mem->data) { - dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr); - mem->data = NULL; - } -} - -static int -nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, u32 size, struct nvkm_gsp_mem *mem) -{ - mem->size = size; - mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL); - if (WARN_ON(!mem->data)) - return -ENOMEM; - - return 0; -} - - static int r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) { @@ -1938,20 +1950,20 @@ nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3) * See kgspCreateRadix3_IMPL */ static int -nvkm_gsp_radix3_sg(struct nvkm_device *device, struct sg_table *sgt, u64 size, +nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size, struct nvkm_gsp_radix3 *rx3) { u64 addr; for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) { u64 *ptes; - int idx; + size_t bufsize; + int ret, idx; - rx3->mem[i].size = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE); - rx3->mem[i].data = dma_alloc_coherent(device->dev, rx3->mem[i].size, - &rx3->mem[i].addr, GFP_KERNEL); - if (WARN_ON(!rx3->mem[i].data)) - return -ENOMEM; + bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE); + ret = nvkm_gsp_mem_ctor(gsp, bufsize, &rx3->mem[i]); + if (ret) + return ret; ptes = rx3->mem[i].data; if (i == 2) { @@ -1991,7 +2003,7 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) if (ret) return ret; - ret = nvkm_gsp_radix3_sg(gsp->subdev.device, &gsp->sr.sgt, len, &gsp->sr.radix3); + ret = nvkm_gsp_radix3_sg(gsp, &gsp->sr.sgt, len, &gsp->sr.radix3); if (ret) return ret; @@ -2150,6 +2162,11 @@ r535_gsp_dtor(struct nvkm_gsp *gsp) mutex_destroy(&gsp->cmdq.mutex); r535_gsp_dtor_fws(gsp); + + nvkm_gsp_mem_dtor(gsp, &gsp->shm.mem); + nvkm_gsp_mem_dtor(gsp, &gsp->loginit); + nvkm_gsp_mem_dtor(gsp, &gsp->logintr); + nvkm_gsp_mem_dtor(gsp, &gsp->logrm); } int @@ -2194,7 +2211,7 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp) memcpy(gsp->sig.data, data, size); /* Build radix3 page table for ELF image. */ - ret = nvkm_gsp_radix3_sg(device, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3); + ret = nvkm_gsp_radix3_sg(gsp, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3); if (ret) return ret; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index 85b3b4871a1d..fdd768bbd487 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -1985,8 +1985,10 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc, clock = vop2_set_intf_mux(vp, rkencoder->crtc_endpoint_id, polflags); } - if (!clock) + if (!clock) { + vop2_unlock(vop2); return; + } if (vcstate->output_mode == ROCKCHIP_OUT_MODE_AAAA && !(vp_data->feature & VOP2_VP_FEATURE_OUTPUT_10BIT)) diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 85f082396d42..d442b893275b 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -1178,21 +1178,24 @@ static void drm_sched_run_job_work(struct work_struct *w) struct drm_sched_entity *entity; struct dma_fence *fence; struct drm_sched_fence *s_fence; - struct drm_sched_job *sched_job = NULL; + struct drm_sched_job *sched_job; int r; if (READ_ONCE(sched->pause_submit)) return; /* Find entity with a ready job */ - while (!sched_job && (entity = drm_sched_select_entity(sched))) { - sched_job = drm_sched_entity_pop_job(entity); - if (!sched_job) - complete_all(&entity->entity_idle); - } + entity = drm_sched_select_entity(sched); if (!entity) return; /* No more work */ + sched_job = drm_sched_entity_pop_job(entity); + if (!sched_job) { + complete_all(&entity->entity_idle); + drm_sched_run_job_queue(sched); + return; + } + s_fence = sched_job->s_fence; atomic_add(sched_job->credits, &sched->credit_count); diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c index ea2af6bd9abe..fee6bec757d1 100644 --- a/drivers/gpu/drm/tests/drm_buddy_test.c +++ b/drivers/gpu/drm/tests/drm_buddy_test.c @@ -8,6 +8,7 @@ #include <linux/prime_numbers.h> #include <linux/sched/signal.h> +#include <linux/sizes.h> #include <drm/drm_buddy.h> @@ -18,6 +19,93 @@ static inline u64 get_size(int order, u64 chunk_size) return (1 << order) * chunk_size; } +static void drm_test_buddy_alloc_contiguous(struct kunit *test) +{ + u64 mm_size, ps = SZ_4K, i, n_pages, total; + struct drm_buddy_block *block; + struct drm_buddy mm; + LIST_HEAD(left); + LIST_HEAD(middle); + LIST_HEAD(right); + LIST_HEAD(allocated); + + mm_size = 16 * 3 * SZ_4K; + + KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps)); + + /* + * Idea is to fragment the address space by alternating block + * allocations between three different lists; one for left, middle and + * right. We can then free a list to simulate fragmentation. In + * particular we want to exercise the DRM_BUDDY_CONTIGUOUS_ALLOCATION, + * including the try_harder path. + */ + + i = 0; + n_pages = mm_size / ps; + do { + struct list_head *list; + int slot = i % 3; + + if (slot == 0) + list = &left; + else if (slot == 1) + list = &middle; + else + list = &right; + KUNIT_ASSERT_FALSE_MSG(test, + drm_buddy_alloc_blocks(&mm, 0, mm_size, + ps, ps, list, 0), + "buddy_alloc hit an error size=%d\n", + ps); + } while (++i < n_pages); + + KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + 3 * ps, ps, &allocated, + DRM_BUDDY_CONTIGUOUS_ALLOCATION), + "buddy_alloc didn't error size=%d\n", 3 * ps); + + drm_buddy_free_list(&mm, &middle); + KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + 3 * ps, ps, &allocated, + DRM_BUDDY_CONTIGUOUS_ALLOCATION), + "buddy_alloc didn't error size=%llu\n", 3 * ps); + KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + 2 * ps, ps, &allocated, + DRM_BUDDY_CONTIGUOUS_ALLOCATION), + "buddy_alloc didn't error size=%llu\n", 2 * ps); + + drm_buddy_free_list(&mm, &right); + KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + 3 * ps, ps, &allocated, + DRM_BUDDY_CONTIGUOUS_ALLOCATION), + "buddy_alloc didn't error size=%llu\n", 3 * ps); + /* + * At this point we should have enough contiguous space for 2 blocks, + * however they are never buddies (since we freed middle and right) so + * will require the try_harder logic to find them. + */ + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + 2 * ps, ps, &allocated, + DRM_BUDDY_CONTIGUOUS_ALLOCATION), + "buddy_alloc hit an error size=%d\n", 2 * ps); + + drm_buddy_free_list(&mm, &left); + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + 3 * ps, ps, &allocated, + DRM_BUDDY_CONTIGUOUS_ALLOCATION), + "buddy_alloc hit an error size=%d\n", 3 * ps); + + total = 0; + list_for_each_entry(block, &allocated, link) + total += drm_buddy_block_size(&mm, block); + + KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3); + + drm_buddy_free_list(&mm, &allocated); + drm_buddy_fini(&mm); +} + static void drm_test_buddy_alloc_pathological(struct kunit *test) { u64 mm_size, size, start = 0; @@ -280,6 +368,7 @@ static struct kunit_case drm_buddy_tests[] = { KUNIT_CASE(drm_test_buddy_alloc_optimistic), KUNIT_CASE(drm_test_buddy_alloc_pessimistic), KUNIT_CASE(drm_test_buddy_alloc_pathological), + KUNIT_CASE(drm_test_buddy_alloc_contiguous), {} }; diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h index 68d9f6116bdf..777c20ceabab 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h @@ -10,7 +10,7 @@ #include "xe_bo.h" -#define i915_gem_object_is_shmem(obj) ((obj)->flags & XE_BO_CREATE_SYSTEM_BIT) +#define i915_gem_object_is_shmem(obj) (0) /* We don't use shmem */ static inline dma_addr_t i915_gem_object_get_dma_address(const struct xe_bo *bo, pgoff_t n) { diff --git a/drivers/gpu/drm/xe/xe_display.c b/drivers/gpu/drm/xe/xe_display.c index 74391d9b11ae..e4db069f0db3 100644 --- a/drivers/gpu/drm/xe/xe_display.c +++ b/drivers/gpu/drm/xe/xe_display.c @@ -134,8 +134,6 @@ static void xe_display_fini_nommio(struct drm_device *dev, void *dummy) int xe_display_init_nommio(struct xe_device *xe) { - int err; - if (!xe->info.enable_display) return 0; @@ -145,10 +143,6 @@ int xe_display_init_nommio(struct xe_device *xe) /* This must be called before any calls to HAS_PCH_* */ intel_detect_pch(xe); - err = intel_power_domains_init(xe); - if (err) - return err; - return drmm_add_action_or_reset(&xe->drm, xe_display_fini_nommio, xe); } diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index bcfc4127c7c5..254b1d3af4cb 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -926,20 +926,24 @@ void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q) * @q: The exec queue * @vm: The VM the engine does a bind or exec for * - * Get last fence, does not take a ref + * Get last fence, takes a ref * * Returns: last fence if not signaled, dma fence stub if signaled */ struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q, struct xe_vm *vm) { + struct dma_fence *fence; + xe_exec_queue_last_fence_lockdep_assert(q, vm); if (q->last_fence && test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) xe_exec_queue_last_fence_put(q, vm); - return q->last_fence ? q->last_fence : dma_fence_get_stub(); + fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); + dma_fence_get(fence); + return fence; } /** diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 3af2adec1295..35474ddbaf97 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -437,7 +437,10 @@ static int all_fw_domain_init(struct xe_gt *gt) * USM has its only SA pool to non-block behind user operations */ if (gt_to_xe(gt)->info.has_usm) { - gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), SZ_1M, 16); + struct xe_device *xe = gt_to_xe(gt); + + gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), + IS_DGFX(xe) ? SZ_1M : SZ_512K, 16); if (IS_ERR(gt->usm.bb_pool)) { err = PTR_ERR(gt->usm.bb_pool); goto err_force_wake; diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c index 9c2fe1697d6e..73f08f1924df 100644 --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c @@ -335,7 +335,7 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len) return -EPROTO; asid = FIELD_GET(PFD_ASID, msg[1]); - pf_queue = >->usm.pf_queue[asid % NUM_PF_QUEUE]; + pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE); spin_lock_irqsave(&pf_queue->lock, flags); full = pf_queue_full(pf_queue); diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 5c6c54624252..70480c305602 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -170,11 +170,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, if (!IS_DGFX(xe)) { /* Write out batch too */ m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE; - if (xe->info.has_usm) { - batch = tile->primary_gt->usm.bb_pool->bo; - m->usm_batch_base_ofs = m->batch_base_ofs; - } - for (i = 0; i < batch->size; i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE : XE_PAGE_SIZE) { @@ -185,6 +180,24 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, entry); level++; } + if (xe->info.has_usm) { + xe_tile_assert(tile, batch->size == SZ_1M); + + batch = tile->primary_gt->usm.bb_pool->bo; + m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M; + xe_tile_assert(tile, batch->size == SZ_512K); + + for (i = 0; i < batch->size; + i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE : + XE_PAGE_SIZE) { + entry = vm->pt_ops->pte_encode_bo(batch, i, + pat_index, 0); + + xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, + entry); + level++; + } + } } else { u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE); @@ -1204,8 +1217,11 @@ static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q, } if (q) { fence = xe_exec_queue_last_fence_get(q, vm); - if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + dma_fence_put(fence); return false; + } + dma_fence_put(fence); } return true; diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index e45b37c3f0c2..ac19bfa3f798 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -20,8 +20,8 @@ struct xe_pt_dir { struct xe_pt pt; - /** @dir: Directory structure for the xe_pt_walk functionality */ - struct xe_ptw_dir dir; + /** @children: Array of page-table child nodes */ + struct xe_ptw *children[XE_PDES]; }; #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM) @@ -44,7 +44,7 @@ static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt) static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index) { - return container_of(pt_dir->dir.entries[index], struct xe_pt, base); + return container_of(pt_dir->children[index], struct xe_pt, base); } static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm, @@ -65,6 +65,14 @@ static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm, XE_PTE_NULL; } +static void xe_pt_free(struct xe_pt *pt) +{ + if (pt->level) + kfree(as_xe_pt_dir(pt)); + else + kfree(pt); +} + /** * xe_pt_create() - Create a page-table. * @vm: The vm to create for. @@ -85,15 +93,19 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, { struct xe_pt *pt; struct xe_bo *bo; - size_t size; int err; - size = !level ? sizeof(struct xe_pt) : sizeof(struct xe_pt_dir) + - XE_PDES * sizeof(struct xe_ptw *); - pt = kzalloc(size, GFP_KERNEL); + if (level) { + struct xe_pt_dir *dir = kzalloc(sizeof(*dir), GFP_KERNEL); + + pt = (dir) ? &dir->pt : NULL; + } else { + pt = kzalloc(sizeof(*pt), GFP_KERNEL); + } if (!pt) return ERR_PTR(-ENOMEM); + pt->level = level; bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K, ttm_bo_type_kernel, XE_BO_CREATE_VRAM_IF_DGFX(tile) | @@ -106,8 +118,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, goto err_kfree; } pt->bo = bo; - pt->level = level; - pt->base.dir = level ? &as_xe_pt_dir(pt)->dir : NULL; + pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL; if (vm->xef) xe_drm_client_add_bo(vm->xef->client, pt->bo); @@ -116,7 +127,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, return pt; err_kfree: - kfree(pt); + xe_pt_free(pt); return ERR_PTR(err); } @@ -193,7 +204,7 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred) deferred); } } - kfree(pt); + xe_pt_free(pt); } /** @@ -358,7 +369,7 @@ xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent, struct iosys_map *map = &parent->bo->vmap; if (unlikely(xe_child)) - parent->base.dir->entries[offset] = &xe_child->base; + parent->base.children[offset] = &xe_child->base; xe_pt_write(xe_walk->vm->xe, map, offset, pte); parent->num_live++; @@ -853,7 +864,7 @@ static void xe_pt_commit_bind(struct xe_vma *vma, xe_pt_destroy(xe_pt_entry(pt_dir, j_), xe_vma_vm(vma)->flags, deferred); - pt_dir->dir.entries[j_] = &newpte->base; + pt_dir->children[j_] = &newpte->base; } kfree(entries[i].pt_entries); } @@ -1507,7 +1518,7 @@ xe_pt_commit_unbind(struct xe_vma *vma, xe_pt_destroy(xe_pt_entry(pt_dir, i), xe_vma_vm(vma)->flags, deferred); - pt_dir->dir.entries[i] = NULL; + pt_dir->children[i] = NULL; } } } diff --git a/drivers/gpu/drm/xe/xe_pt_walk.c b/drivers/gpu/drm/xe/xe_pt_walk.c index 8f6c8d063f39..b8b3d2aea492 100644 --- a/drivers/gpu/drm/xe/xe_pt_walk.c +++ b/drivers/gpu/drm/xe/xe_pt_walk.c @@ -74,7 +74,7 @@ int xe_pt_walk_range(struct xe_ptw *parent, unsigned int level, u64 addr, u64 end, struct xe_pt_walk *walk) { pgoff_t offset = xe_pt_offset(addr, level, walk); - struct xe_ptw **entries = parent->dir ? parent->dir->entries : NULL; + struct xe_ptw **entries = parent->children ? parent->children : NULL; const struct xe_pt_walk_ops *ops = walk->ops; enum page_walk_action action; struct xe_ptw *child; diff --git a/drivers/gpu/drm/xe/xe_pt_walk.h b/drivers/gpu/drm/xe/xe_pt_walk.h index ec3d1e9efa6d..5ecc4d2f0f65 100644 --- a/drivers/gpu/drm/xe/xe_pt_walk.h +++ b/drivers/gpu/drm/xe/xe_pt_walk.h @@ -8,28 +8,15 @@ #include <linux/pagewalk.h> #include <linux/types.h> -struct xe_ptw_dir; - /** * struct xe_ptw - base class for driver pagetable subclassing. - * @dir: Pointer to an array of children if any. + * @children: Pointer to an array of children if any. * * Drivers could subclass this, and if it's a page-directory, typically - * embed the xe_ptw_dir::entries array in the same allocation. + * embed an array of xe_ptw pointers. */ struct xe_ptw { - struct xe_ptw_dir *dir; -}; - -/** - * struct xe_ptw_dir - page directory structure - * @entries: Array holding page directory children. - * - * It is the responsibility of the user to ensure @entries is - * correctly sized. - */ -struct xe_ptw_dir { - struct xe_ptw *entries[0]; + struct xe_ptw **children; }; /** diff --git a/drivers/gpu/drm/xe/xe_range_fence.c b/drivers/gpu/drm/xe/xe_range_fence.c index d35d9ec58e86..372378e89e98 100644 --- a/drivers/gpu/drm/xe/xe_range_fence.c +++ b/drivers/gpu/drm/xe/xe_range_fence.c @@ -151,6 +151,11 @@ xe_range_fence_tree_next(struct xe_range_fence *rfence, u64 start, u64 last) return xe_range_fence_tree_iter_next(rfence, start, last); } +static void xe_range_fence_free(struct xe_range_fence *rfence) +{ + kfree(rfence); +} + const struct xe_range_fence_ops xe_range_fence_kfree_ops = { - .free = (void (*)(struct xe_range_fence *rfence)) kfree, + .free = xe_range_fence_free, }; diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c index 01106a1156ad..4e2ccad0e52f 100644 --- a/drivers/gpu/drm/xe/xe_sched_job.c +++ b/drivers/gpu/drm/xe/xe_sched_job.c @@ -274,7 +274,6 @@ int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm) struct dma_fence *fence; fence = xe_exec_queue_last_fence_get(job->q, vm); - dma_fence_get(fence); return drm_sched_job_add_dependency(&job->drm, fence); } diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c index e4c220cf9115..aab92bee1d7c 100644 --- a/drivers/gpu/drm/xe/xe_sync.c +++ b/drivers/gpu/drm/xe/xe_sync.c @@ -307,7 +307,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync, /* Easy case... */ if (!num_in_fence) { fence = xe_exec_queue_last_fence_get(q, vm); - dma_fence_get(fence); return fence; } @@ -322,7 +321,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync, } } fences[current_fence++] = xe_exec_queue_last_fence_get(q, vm); - dma_fence_get(fences[current_fence - 1]); cf = dma_fence_array_create(num_in_fence, fences, vm->composite_fence_ctx, vm->composite_fence_seqno++, diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 30db264d34a3..7b00faa67287 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -37,8 +37,6 @@ #include "generated/xe_wa_oob.h" #include "xe_wa.h" -#define TEST_VM_ASYNC_OPS_ERROR - static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) { return vm->gpuvm.r_obj; @@ -114,11 +112,8 @@ retry: num_pages - pinned, read_only ? 0 : FOLL_WRITE, &pages[pinned]); - if (ret < 0) { - if (in_kthread) - ret = 0; + if (ret < 0) break; - } pinned += ret; ret = 0; @@ -1000,9 +995,16 @@ int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma, int err; XE_WARN_ON(!vm); - err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared); - if (!err && bo && !bo->vm) - err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared); + if (num_shared) + err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared); + else + err = drm_exec_lock_obj(exec, xe_vm_obj(vm)); + if (!err && bo && !bo->vm) { + if (num_shared) + err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared); + else + err = drm_exec_lock_obj(exec, &bo->ttm.base); + } return err; } @@ -1984,6 +1986,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, xe_exec_queue_last_fence_get(wait_exec_queue, vm); xe_sync_entry_signal(&syncs[i], NULL, fence); + dma_fence_put(fence); } } @@ -2064,7 +2067,6 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL; struct drm_gpuva_ops *ops; struct drm_gpuva_op *__op; - struct xe_vma_op *op; struct drm_gpuvm_bo *vm_bo; int err; @@ -2111,15 +2113,6 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, if (IS_ERR(ops)) return ops; -#ifdef TEST_VM_ASYNC_OPS_ERROR - if (operation & FORCE_ASYNC_OP_ERROR) { - op = list_first_entry_or_null(&ops->list, struct xe_vma_op, - base.entry); - if (op) - op->inject_error = true; - } -#endif - drm_gpuva_for_each_op(__op, ops) { struct xe_vma_op *op = gpuva_op_to_vma_op(__op); @@ -2199,8 +2192,10 @@ static u64 xe_vma_max_pte_size(struct xe_vma *vma) return SZ_1G; else if (vma->gpuva.flags & XE_VMA_PTE_2M) return SZ_2M; + else if (vma->gpuva.flags & XE_VMA_PTE_4K) + return SZ_4K; - return SZ_4K; + return SZ_1G; /* Uninitialized, used max size */ } static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size) @@ -2530,13 +2525,25 @@ retry_userptr: } drm_exec_fini(&exec); - if (err == -EAGAIN && xe_vma_is_userptr(vma)) { + if (err == -EAGAIN) { lockdep_assert_held_write(&vm->lock); - err = xe_vma_userptr_pin_pages(to_userptr_vma(vma)); - if (!err) - goto retry_userptr; - trace_xe_vma_fail(vma); + if (op->base.op == DRM_GPUVA_OP_REMAP) { + if (!op->remap.unmap_done) + vma = gpuva_to_vma(op->base.remap.unmap->va); + else if (op->remap.prev) + vma = op->remap.prev; + else + vma = op->remap.next; + } + + if (xe_vma_is_userptr(vma)) { + err = xe_vma_userptr_pin_pages(to_userptr_vma(vma)); + if (!err) + goto retry_userptr; + + trace_xe_vma_fail(vma); + } } return err; @@ -2548,13 +2555,6 @@ static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op) lockdep_assert_held_write(&vm->lock); -#ifdef TEST_VM_ASYNC_OPS_ERROR - if (op->inject_error) { - op->inject_error = false; - return -ENOMEM; - } -#endif - switch (op->base.op) { case DRM_GPUVA_OP_MAP: ret = __xe_vma_op_execute(vm, op->map.vma, op); @@ -2669,7 +2669,7 @@ static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm, { int i; - for (i = num_ops_list - 1; i; ++i) { + for (i = num_ops_list - 1; i >= 0; --i) { struct drm_gpuva_ops *__ops = ops[i]; struct drm_gpuva_op *__op; @@ -2714,16 +2714,9 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm, return 0; } -#ifdef TEST_VM_ASYNC_OPS_ERROR -#define SUPPORTED_FLAGS \ - (FORCE_ASYNC_OP_ERROR | DRM_XE_VM_BIND_FLAG_READONLY | \ - DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | 0xffff) -#else #define SUPPORTED_FLAGS \ (DRM_XE_VM_BIND_FLAG_READONLY | \ - DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \ - 0xffff) -#endif + DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL) #define XE_64K_PAGE_MASK 0xffffull #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP) diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h index 1fec66ae2eb2..5ac9c5bebabc 100644 --- a/drivers/gpu/drm/xe/xe_vm_types.h +++ b/drivers/gpu/drm/xe/xe_vm_types.h @@ -21,9 +21,6 @@ struct xe_bo; struct xe_sync_entry; struct xe_vm; -#define TEST_VM_ASYNC_OPS_ERROR -#define FORCE_ASYNC_OP_ERROR BIT(31) - #define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS #define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1) #define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2) @@ -360,11 +357,6 @@ struct xe_vma_op { /** @flags: operation flags */ enum xe_vma_op_flags flags; -#ifdef TEST_VM_ASYNC_OPS_ERROR - /** @inject_error: inject error to test async op error handling */ - bool inject_error; -#endif - union { /** @map: VMA map operation specific data */ struct xe_vma_op_map map; diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 6ef0c88e3e60..d2f3f234f29d 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -203,6 +203,8 @@ struct hidpp_device { struct hidpp_scroll_counter vertical_wheel_counter; u8 wireless_feature_index; + + bool connected_once; }; /* HID++ 1.0 error codes */ @@ -988,8 +990,13 @@ static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp) hidpp->protocol_minor = response.rap.params[1]; print_version: - hid_info(hidpp->hid_dev, "HID++ %u.%u device connected.\n", - hidpp->protocol_major, hidpp->protocol_minor); + if (!hidpp->connected_once) { + hid_info(hidpp->hid_dev, "HID++ %u.%u device connected.\n", + hidpp->protocol_major, hidpp->protocol_minor); + hidpp->connected_once = true; + } else + hid_dbg(hidpp->hid_dev, "HID++ %u.%u device connected.\n", + hidpp->protocol_major, hidpp->protocol_minor); return 0; } @@ -4184,7 +4191,7 @@ static void hidpp_connect_event(struct work_struct *work) /* Get device version to check if it is connected */ ret = hidpp_root_get_protocol_version(hidpp); if (ret) { - hid_info(hidpp->hid_dev, "Disconnected\n"); + hid_dbg(hidpp->hid_dev, "Disconnected\n"); if (hidpp->battery.ps) { hidpp->battery.online = false; hidpp->battery.status = POWER_SUPPLY_STATUS_UNKNOWN; diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index fd5b0637dad6..3e91e4d6ba6f 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -2153,6 +2153,10 @@ static const struct hid_device_id mt_devices[] = { { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_SYNAPTICS, 0xcddc) }, + + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_SYNAPTICS, 0xce08) }, { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c index aa6cb033bb06..03d5601ce807 100644 --- a/drivers/hid/intel-ish-hid/ishtp/bus.c +++ b/drivers/hid/intel-ish-hid/ishtp/bus.c @@ -722,6 +722,8 @@ void ishtp_bus_remove_all_clients(struct ishtp_device *ishtp_dev, spin_lock_irqsave(&ishtp_dev->cl_list_lock, flags); list_for_each_entry(cl, &ishtp_dev->cl_list, link) { cl->state = ISHTP_CL_DISCONNECTED; + if (warm_reset && cl->device->reference_count) + continue; /* * Wake any pending process. The waiter would check dev->state diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c index 82c907f01bd3..8a7f2f6a4f86 100644 --- a/drivers/hid/intel-ish-hid/ishtp/client.c +++ b/drivers/hid/intel-ish-hid/ishtp/client.c @@ -49,7 +49,9 @@ static void ishtp_read_list_flush(struct ishtp_cl *cl) list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list) if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) { list_del(&rb->list); - ishtp_io_rb_free(rb); + spin_lock(&cl->free_list_spinlock); + list_add_tail(&rb->list, &cl->free_rb_list.list); + spin_unlock(&cl->free_list_spinlock); } spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags); } diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index b613f11ed949..2bc45b24075c 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -2087,7 +2087,7 @@ static int wacom_allocate_inputs(struct wacom *wacom) return 0; } -static int wacom_register_inputs(struct wacom *wacom) +static int wacom_setup_inputs(struct wacom *wacom) { struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev; struct wacom_wac *wacom_wac = &(wacom->wacom_wac); @@ -2106,10 +2106,6 @@ static int wacom_register_inputs(struct wacom *wacom) input_free_device(pen_input_dev); wacom_wac->pen_input = NULL; pen_input_dev = NULL; - } else { - error = input_register_device(pen_input_dev); - if (error) - goto fail; } error = wacom_setup_touch_input_capabilities(touch_input_dev, wacom_wac); @@ -2118,10 +2114,6 @@ static int wacom_register_inputs(struct wacom *wacom) input_free_device(touch_input_dev); wacom_wac->touch_input = NULL; touch_input_dev = NULL; - } else { - error = input_register_device(touch_input_dev); - if (error) - goto fail; } error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac); @@ -2130,7 +2122,34 @@ static int wacom_register_inputs(struct wacom *wacom) input_free_device(pad_input_dev); wacom_wac->pad_input = NULL; pad_input_dev = NULL; - } else { + } + + return 0; +} + +static int wacom_register_inputs(struct wacom *wacom) +{ + struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev; + struct wacom_wac *wacom_wac = &(wacom->wacom_wac); + int error = 0; + + pen_input_dev = wacom_wac->pen_input; + touch_input_dev = wacom_wac->touch_input; + pad_input_dev = wacom_wac->pad_input; + + if (pen_input_dev) { + error = input_register_device(pen_input_dev); + if (error) + goto fail; + } + + if (touch_input_dev) { + error = input_register_device(touch_input_dev); + if (error) + goto fail; + } + + if (pad_input_dev) { error = input_register_device(pad_input_dev); if (error) goto fail; @@ -2383,6 +2402,20 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless) if (error) goto fail; + error = wacom_setup_inputs(wacom); + if (error) + goto fail; + + if (features->type == HID_GENERIC) + connect_mask |= HID_CONNECT_DRIVER; + + /* Regular HID work starts now */ + error = hid_hw_start(hdev, connect_mask); + if (error) { + hid_err(hdev, "hw start failed\n"); + goto fail; + } + error = wacom_register_inputs(wacom); if (error) goto fail; @@ -2397,16 +2430,6 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless) goto fail; } - if (features->type == HID_GENERIC) - connect_mask |= HID_CONNECT_DRIVER; - - /* Regular HID work starts now */ - error = hid_hw_start(hdev, connect_mask); - if (error) { - hid_err(hdev, "hw start failed\n"); - goto fail; - } - if (!wireless) { /* Note that if query fails it is not a hard failure */ wacom_query_tablet_data(wacom); diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index da8a01fedd39..fbe10fbc5769 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2575,7 +2575,14 @@ static void wacom_wac_pen_report(struct hid_device *hdev, wacom_wac->hid_data.tipswitch); input_report_key(input, wacom_wac->tool[0], sense); if (wacom_wac->serial[0]) { - input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]); + /* + * xf86-input-wacom does not accept a serial number + * of '0'. Report the low 32 bits if possible, but + * if they are zero, report the upper ones instead. + */ + __u32 serial_lo = wacom_wac->serial[0] & 0xFFFFFFFFu; + __u32 serial_hi = wacom_wac->serial[0] >> 32; + input_event(input, EV_MSC, MSC_SERIAL, (int)(serial_lo ? serial_lo : serial_hi)); input_report_abs(input, ABS_MISC, sense ? id : 0); } diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c index f6e1e55e8292..4acc1858d8ac 100644 --- a/drivers/hwmon/aspeed-pwm-tacho.c +++ b/drivers/hwmon/aspeed-pwm-tacho.c @@ -195,6 +195,8 @@ struct aspeed_pwm_tacho_data { u8 fan_tach_ch_source[MAX_ASPEED_FAN_TACH_CHANNELS]; struct aspeed_cooling_device *cdev[8]; const struct attribute_group *groups[3]; + /* protects access to shared ASPEED_PTCR_RESULT */ + struct mutex tach_lock; }; enum type { TYPEM, TYPEN, TYPEO }; @@ -529,6 +531,8 @@ static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv, u8 fan_tach_ch_source, type, mode, both; int ret; + mutex_lock(&priv->tach_lock); + regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0); regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0x1 << fan_tach_ch); @@ -546,6 +550,8 @@ static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv, ASPEED_RPM_STATUS_SLEEP_USEC, usec); + mutex_unlock(&priv->tach_lock); + /* return -ETIMEDOUT if we didn't get an answer. */ if (ret) return ret; @@ -915,6 +921,7 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev) priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; + mutex_init(&priv->tach_lock); priv->regmap = devm_regmap_init(dev, NULL, (__force void *)regs, &aspeed_pwm_tacho_regmap_config); if (IS_ERR(priv->regmap)) diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index ba82d1e79c13..b8fc8d1ef20d 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -41,7 +41,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); #define PKG_SYSFS_ATTR_NO 1 /* Sysfs attribute for package temp */ #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ -#define NUM_REAL_CORES 128 /* Number of Real cores per cpu */ +#define NUM_REAL_CORES 512 /* Number of Real cores per cpu */ #define CORETEMP_NAME_LENGTH 28 /* String Length of attrs */ #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) @@ -419,7 +419,7 @@ static ssize_t show_temp(struct device *dev, } static int create_core_attrs(struct temp_data *tdata, struct device *dev, - int attr_no) + int index) { int i; static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev, @@ -431,13 +431,20 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev, }; for (i = 0; i < tdata->attr_size; i++) { + /* + * We map the attr number to core id of the CPU + * The attr number is always core id + 2 + * The Pkgtemp will always show up as temp1_*, if available + */ + int attr_no = tdata->is_pkg_data ? 1 : tdata->cpu_core_id + 2; + snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, "temp%d_%s", attr_no, suffixes[i]); sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr); tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i]; tdata->sd_attrs[i].dev_attr.attr.mode = 0444; tdata->sd_attrs[i].dev_attr.show = rd_ptr[i]; - tdata->sd_attrs[i].index = attr_no; + tdata->sd_attrs[i].index = index; tdata->attrs[i] = &tdata->sd_attrs[i].dev_attr.attr; } tdata->attr_group.attrs = tdata->attrs; @@ -495,30 +502,25 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu, struct platform_data *pdata = platform_get_drvdata(pdev); struct cpuinfo_x86 *c = &cpu_data(cpu); u32 eax, edx; - int err, index, attr_no; + int err, index; if (!housekeeping_cpu(cpu, HK_TYPE_MISC)) return 0; /* - * Find attr number for sysfs: - * We map the attr number to core id of the CPU - * The attr number is always core id + 2 - * The Pkgtemp will always show up as temp1_*, if available + * Get the index of tdata in pdata->core_data[] + * tdata for package: pdata->core_data[1] + * tdata for core: pdata->core_data[2] .. pdata->core_data[NUM_REAL_CORES + 1] */ if (pkg_flag) { - attr_no = PKG_SYSFS_ATTR_NO; + index = PKG_SYSFS_ATTR_NO; } else { - index = ida_alloc(&pdata->ida, GFP_KERNEL); + index = ida_alloc_max(&pdata->ida, NUM_REAL_CORES - 1, GFP_KERNEL); if (index < 0) return index; - pdata->cpu_map[index] = topology_core_id(cpu); - attr_no = index + BASE_SYSFS_ATTR_NO; - } - if (attr_no > MAX_CORE_DATA - 1) { - err = -ERANGE; - goto ida_free; + pdata->cpu_map[index] = topology_core_id(cpu); + index += BASE_SYSFS_ATTR_NO; } tdata = init_temp_data(cpu, pkg_flag); @@ -544,20 +546,20 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu, if (get_ttarget(tdata, &pdev->dev) >= 0) tdata->attr_size++; - pdata->core_data[attr_no] = tdata; + pdata->core_data[index] = tdata; /* Create sysfs interfaces */ - err = create_core_attrs(tdata, pdata->hwmon_dev, attr_no); + err = create_core_attrs(tdata, pdata->hwmon_dev, index); if (err) goto exit_free; return 0; exit_free: - pdata->core_data[attr_no] = NULL; + pdata->core_data[index] = NULL; kfree(tdata); ida_free: if (!pkg_flag) - ida_free(&pdata->ida, index); + ida_free(&pdata->ida, index - BASE_SYSFS_ATTR_NO); return err; } diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 3757b9391e60..aa0ee8ecd6f2 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -90,10 +90,8 @@ obj-$(CONFIG_I2C_NPCM) += i2c-npcm7xx.o obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o obj-$(CONFIG_I2C_OMAP) += i2c-omap.o obj-$(CONFIG_I2C_OWL) += i2c-owl.o -i2c-pasemi-objs := i2c-pasemi-core.o i2c-pasemi-pci.o -obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o -i2c-apple-objs := i2c-pasemi-core.o i2c-pasemi-platform.o -obj-$(CONFIG_I2C_APPLE) += i2c-apple.o +obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi-core.o i2c-pasemi-pci.o +obj-$(CONFIG_I2C_APPLE) += i2c-pasemi-core.o i2c-pasemi-platform.o obj-$(CONFIG_I2C_PCA_PLATFORM) += i2c-pca-platform.o obj-$(CONFIG_I2C_PNX) += i2c-pnx.o obj-$(CONFIG_I2C_PXA) += i2c-pxa.o diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 3932e8d96a17..2c36b36d7d51 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -498,11 +498,10 @@ static int i801_block_transaction_by_block(struct i801_priv *priv, /* Set block buffer mode */ outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv)); - inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */ - if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; outb_p(len, SMBHSTDAT0(priv)); + inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */ for (i = 0; i < len; i++) outb_p(data->block[i+1], SMBBLKDAT(priv)); } @@ -520,6 +519,7 @@ static int i801_block_transaction_by_block(struct i801_priv *priv, } data->block[0] = len; + inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */ for (i = 0; i < len; i++) data->block[i + 1] = inb_p(SMBBLKDAT(priv)); } diff --git a/drivers/i2c/busses/i2c-pasemi-core.c b/drivers/i2c/busses/i2c-pasemi-core.c index 7d54a9f34c74..bd8becbdeeb2 100644 --- a/drivers/i2c/busses/i2c-pasemi-core.c +++ b/drivers/i2c/busses/i2c-pasemi-core.c @@ -369,6 +369,7 @@ int pasemi_i2c_common_probe(struct pasemi_smbus *smbus) return 0; } +EXPORT_SYMBOL_GPL(pasemi_i2c_common_probe); irqreturn_t pasemi_irq_handler(int irq, void *dev_id) { @@ -378,3 +379,8 @@ irqreturn_t pasemi_irq_handler(int irq, void *dev_id) complete(&smbus->irq_completion); return IRQ_HANDLED; } +EXPORT_SYMBOL_GPL(pasemi_irq_handler); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Olof Johansson <olof@lixom.net>"); +MODULE_DESCRIPTION("PA Semi PWRficient SMBus driver"); diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index 0d2e7171e3a6..da94df466e83 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -613,20 +613,20 @@ static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], i peripheral.addr = msgs[i].addr; + ret = geni_i2c_gpi(gi2c, &msgs[i], &config, + &tx_addr, &tx_buf, I2C_WRITE, gi2c->tx_c); + if (ret) + goto err; + if (msgs[i].flags & I2C_M_RD) { ret = geni_i2c_gpi(gi2c, &msgs[i], &config, &rx_addr, &rx_buf, I2C_READ, gi2c->rx_c); if (ret) goto err; - } - - ret = geni_i2c_gpi(gi2c, &msgs[i], &config, - &tx_addr, &tx_buf, I2C_WRITE, gi2c->tx_c); - if (ret) - goto err; - if (msgs[i].flags & I2C_M_RD) dma_async_issue_pending(gi2c->rx_c); + } + dma_async_issue_pending(gi2c->tx_c); timeout = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT); diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig index 91adcac875a4..c9d7afe489e8 100644 --- a/drivers/iio/accel/Kconfig +++ b/drivers/iio/accel/Kconfig @@ -219,10 +219,12 @@ config BMA400 config BMA400_I2C tristate + select REGMAP_I2C depends on BMA400 config BMA400_SPI tristate + select REGMAP_SPI depends on BMA400 config BMC150_ACCEL diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c index feb86fe6c422..62490424b6ae 100644 --- a/drivers/iio/adc/ad4130.c +++ b/drivers/iio/adc/ad4130.c @@ -1821,7 +1821,7 @@ static int ad4130_setup_int_clk(struct ad4130_state *st) { struct device *dev = &st->spi->dev; struct device_node *of_node = dev_of_node(dev); - struct clk_init_data init; + struct clk_init_data init = {}; const char *clk_name; int ret; @@ -1891,10 +1891,14 @@ static int ad4130_setup(struct iio_dev *indio_dev) return ret; /* - * Configure all GPIOs for output. If configured, the interrupt function - * of P2 takes priority over the GPIO out function. + * Configure unused GPIOs for output. If configured, the interrupt + * function of P2 takes priority over the GPIO out function. */ - val = AD4130_IO_CONTROL_GPIO_CTRL_MASK; + val = 0; + for (i = 0; i < AD4130_MAX_GPIOS; i++) + if (st->pins_fn[i + AD4130_AIN2_P1] == AD4130_PIN_FN_NONE) + val |= FIELD_PREP(AD4130_IO_CONTROL_GPIO_CTRL_MASK, BIT(i)); + val |= FIELD_PREP(AD4130_IO_CONTROL_INT_PIN_SEL_MASK, st->int_pin_sel); ret = regmap_write(st->regmap, AD4130_IO_CONTROL_REG, val); diff --git a/drivers/iio/adc/ad7091r8.c b/drivers/iio/adc/ad7091r8.c index 57700f124803..700564305057 100644 --- a/drivers/iio/adc/ad7091r8.c +++ b/drivers/iio/adc/ad7091r8.c @@ -195,7 +195,7 @@ static int ad7091r8_gpio_setup(struct ad7091r_state *st) st->reset_gpio = devm_gpiod_get_optional(st->dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(st->reset_gpio)) - return dev_err_probe(st->dev, PTR_ERR(st->convst_gpio), + return dev_err_probe(st->dev, PTR_ERR(st->reset_gpio), "Error on requesting reset GPIO\n"); if (st->reset_gpio) { diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig index 2de5494e7c22..b15b7a3b66d5 100644 --- a/drivers/iio/humidity/Kconfig +++ b/drivers/iio/humidity/Kconfig @@ -48,6 +48,18 @@ config HDC2010 To compile this driver as a module, choose M here: the module will be called hdc2010. +config HDC3020 + tristate "TI HDC3020 relative humidity and temperature sensor" + depends on I2C + select CRC8 + help + Say yes here to build support for the Texas Instruments + HDC3020, HDC3021 and HDC3022 relative humidity and temperature + sensors. + + To compile this driver as a module, choose M here: the module + will be called hdc3020. + config HID_SENSOR_HUMIDITY tristate "HID Environmental humidity sensor" depends on HID_SENSOR_HUB diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile index f19ff3de97c5..5fbeef299f61 100644 --- a/drivers/iio/humidity/Makefile +++ b/drivers/iio/humidity/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_AM2315) += am2315.o obj-$(CONFIG_DHT11) += dht11.o obj-$(CONFIG_HDC100X) += hdc100x.o obj-$(CONFIG_HDC2010) += hdc2010.o +obj-$(CONFIG_HDC3020) += hdc3020.o obj-$(CONFIG_HID_SENSOR_HUMIDITY) += hid-sensor-humidity.o hts221-y := hts221_core.o \ diff --git a/drivers/iio/humidity/hdc3020.c b/drivers/iio/humidity/hdc3020.c index 4e3311170725..ed70415512f6 100644 --- a/drivers/iio/humidity/hdc3020.c +++ b/drivers/iio/humidity/hdc3020.c @@ -322,7 +322,7 @@ static int hdc3020_read_raw(struct iio_dev *indio_dev, if (chan->type != IIO_TEMP) return -EINVAL; - *val = 16852; + *val = -16852; return IIO_VAL_INT; default: diff --git a/drivers/iio/imu/bno055/Kconfig b/drivers/iio/imu/bno055/Kconfig index 83e53acfbe88..c7f5866a177d 100644 --- a/drivers/iio/imu/bno055/Kconfig +++ b/drivers/iio/imu/bno055/Kconfig @@ -8,6 +8,7 @@ config BOSCH_BNO055 config BOSCH_BNO055_SERIAL tristate "Bosch BNO055 attached via UART" depends on SERIAL_DEV_BUS + select REGMAP select BOSCH_BNO055 help Enable this to support Bosch BNO055 IMUs attached via UART. diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 9a85752124dd..173dc00762a1 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -1584,10 +1584,13 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev) ret = iio_device_register_sysfs_group(indio_dev, &iio_dev_opaque->chan_attr_group); if (ret) - goto error_clear_attrs; + goto error_free_chan_attrs; return 0; +error_free_chan_attrs: + kfree(iio_dev_opaque->chan_attr_group.attrs); + iio_dev_opaque->chan_attr_group.attrs = NULL; error_clear_attrs: iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list); diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c index 5cd27f04b45e..b6c4bef2a7bb 100644 --- a/drivers/iio/light/hid-sensor-als.c +++ b/drivers/iio/light/hid-sensor-als.c @@ -226,6 +226,7 @@ static int als_capture_sample(struct hid_sensor_hub_device *hsdev, case HID_USAGE_SENSOR_TIME_TIMESTAMP: als_state->timestamp = hid_sensor_convert_timestamp(&als_state->common_attributes, *(s64 *)raw_data); + ret = 0; break; default: break; diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c index 69938204456f..42b70cd42b39 100644 --- a/drivers/iio/magnetometer/rm3100-core.c +++ b/drivers/iio/magnetometer/rm3100-core.c @@ -530,6 +530,7 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq) struct rm3100_data *data; unsigned int tmp; int ret; + int samp_rate_index; indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); if (!indio_dev) @@ -586,9 +587,14 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq) ret = regmap_read(regmap, RM3100_REG_TMRC, &tmp); if (ret < 0) return ret; + + samp_rate_index = tmp - RM3100_TMRC_OFFSET; + if (samp_rate_index < 0 || samp_rate_index >= RM3100_SAMP_NUM) { + dev_err(dev, "The value read from RM3100_REG_TMRC is invalid!\n"); + return -EINVAL; + } /* Initializing max wait time, which is double conversion time. */ - data->conversion_time = rm3100_samp_rates[tmp - RM3100_TMRC_OFFSET][2] - * 2; + data->conversion_time = rm3100_samp_rates[samp_rate_index][2] * 2; /* Cycle count values may not be what we want. */ if ((tmp - RM3100_TMRC_OFFSET) == 0) diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c index 433d6fac83c4..e8a5fed07e88 100644 --- a/drivers/iio/pressure/bmp280-spi.c +++ b/drivers/iio/pressure/bmp280-spi.c @@ -87,6 +87,7 @@ static const struct of_device_id bmp280_of_spi_match[] = { MODULE_DEVICE_TABLE(of, bmp280_of_spi_match); static const struct spi_device_id bmp280_spi_id[] = { + { "bmp085", (kernel_ulong_t)&bmp180_chip_info }, { "bmp180", (kernel_ulong_t)&bmp180_chip_info }, { "bmp181", (kernel_ulong_t)&bmp180_chip_info }, { "bmp280", (kernel_ulong_t)&bmp280_chip_info }, diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c index 20331e119beb..03d626776ba1 100644 --- a/drivers/interconnect/qcom/sc8180x.c +++ b/drivers/interconnect/qcom/sc8180x.c @@ -1372,6 +1372,7 @@ static struct qcom_icc_bcm bcm_mm0 = { static struct qcom_icc_bcm bcm_co0 = { .name = "CO0", + .keepalive = true, .num_nodes = 1, .nodes = { &slv_qns_cdsp_mem_noc } }; diff --git a/drivers/interconnect/qcom/sm8550.c b/drivers/interconnect/qcom/sm8550.c index 629faa4c9aae..fc22cecf650f 100644 --- a/drivers/interconnect/qcom/sm8550.c +++ b/drivers/interconnect/qcom/sm8550.c @@ -2223,6 +2223,7 @@ static struct platform_driver qnoc_driver = { .driver = { .name = "qnoc-sm8550", .of_match_table = qnoc_of_match, + .sync_state = icc_sync_state, }, }; diff --git a/drivers/interconnect/qcom/sm8650.c b/drivers/interconnect/qcom/sm8650.c index b83de54577b6..b962e6c233ef 100644 --- a/drivers/interconnect/qcom/sm8650.c +++ b/drivers/interconnect/qcom/sm8650.c @@ -1160,7 +1160,7 @@ static struct qcom_icc_node qns_gemnoc_sf = { static struct qcom_icc_bcm bcm_acv = { .name = "ACV", - .enable_mask = BIT(3), + .enable_mask = BIT(0), .num_nodes = 1, .nodes = { &ebi }, }; diff --git a/drivers/interconnect/qcom/x1e80100.c b/drivers/interconnect/qcom/x1e80100.c index d19501d913b3..cbaf4f9c41be 100644 --- a/drivers/interconnect/qcom/x1e80100.c +++ b/drivers/interconnect/qcom/x1e80100.c @@ -1586,6 +1586,7 @@ static struct qcom_icc_node qns_pcie_south_gem_noc_pcie = { static struct qcom_icc_bcm bcm_acv = { .name = "ACV", + .enable_mask = BIT(3), .num_nodes = 1, .nodes = { &ebi }, }; diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index 5559c943f03f..2b0b3175cea0 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c @@ -2,7 +2,7 @@ /* * Generic Broadcom Set Top Box Level 2 Interrupt controller driver * - * Copyright (C) 2014-2017 Broadcom + * Copyright (C) 2014-2024 Broadcom */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -112,6 +112,9 @@ static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc) generic_handle_domain_irq(b->domain, irq); } while (status); out: + /* Don't ack parent before all device writes are done */ + wmb(); + chained_irq_exit(chip, desc); } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index d097001c1e3e..53abd4779914 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -207,6 +207,11 @@ static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its) return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]); } +static bool rdists_support_shareable(void) +{ + return !(gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE); +} + static u16 get_its_list(struct its_vm *vm) { struct its_node *its; @@ -2710,10 +2715,12 @@ static u64 inherit_vpe_l1_table_from_its(void) break; } val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12); - val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK, - FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser)); - val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK, - FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser)); + if (rdists_support_shareable()) { + val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK, + FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser)); + val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK, + FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser)); + } val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1); return val; @@ -2936,8 +2943,10 @@ static int allocate_vpe_l1_table(void) WARN_ON(!IS_ALIGNED(pa, psz)); val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12); - val |= GICR_VPROPBASER_RaWb; - val |= GICR_VPROPBASER_InnerShareable; + if (rdists_support_shareable()) { + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + } val |= GICR_VPROPBASER_4_1_Z; val |= GICR_VPROPBASER_4_1_VALID; @@ -3126,7 +3135,7 @@ static void its_cpu_init_lpis(void) gicr_write_propbaser(val, rbase + GICR_PROPBASER); tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); - if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE) + if (!rdists_support_shareable()) tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK; if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { @@ -3153,7 +3162,7 @@ static void its_cpu_init_lpis(void) gicr_write_pendbaser(val, rbase + GICR_PENDBASER); tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); - if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE) + if (!rdists_support_shareable()) tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK; if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { @@ -3817,8 +3826,9 @@ static int its_vpe_set_affinity(struct irq_data *d, bool force) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); - int from, cpu = cpumask_first(mask_val); + struct cpumask common, *table_mask; unsigned long flags; + int from, cpu; /* * Changing affinity is mega expensive, so let's be as lazy as @@ -3834,19 +3844,22 @@ static int its_vpe_set_affinity(struct irq_data *d, * taken on any vLPI handling path that evaluates vpe->col_idx. */ from = vpe_to_cpuid_lock(vpe, &flags); - if (from == cpu) - goto out; - - vpe->col_idx = cpu; + table_mask = gic_data_rdist_cpu(from)->vpe_table_mask; /* - * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD - * is sharing its VPE table with the current one. + * If we are offered another CPU in the same GICv4.1 ITS + * affinity, pick this one. Otherwise, any CPU will do. */ - if (gic_data_rdist_cpu(cpu)->vpe_table_mask && - cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask)) + if (table_mask && cpumask_and(&common, mask_val, table_mask)) + cpu = cpumask_test_cpu(from, &common) ? from : cpumask_first(&common); + else + cpu = cpumask_first(mask_val); + + if (from == cpu) goto out; + vpe->col_idx = cpu; + its_send_vmovp(vpe); its_vpe_db_proxy_move(vpe, from, cpu); @@ -3880,14 +3893,18 @@ static void its_vpe_schedule(struct its_vpe *vpe) val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & GENMASK_ULL(51, 12); val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; - val |= GICR_VPROPBASER_RaWb; - val |= GICR_VPROPBASER_InnerShareable; + if (rdists_support_shareable()) { + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + } gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); val = virt_to_phys(page_address(vpe->vpt_page)) & GENMASK_ULL(51, 16); - val |= GICR_VPENDBASER_RaWaWb; - val |= GICR_VPENDBASER_InnerShareable; + if (rdists_support_shareable()) { + val |= GICR_VPENDBASER_RaWaWb; + val |= GICR_VPENDBASER_InnerShareable; + } /* * There is no good way of finding out if the pending table is * empty as we can race against the doorbell interrupt very @@ -5078,6 +5095,8 @@ static int __init its_probe_one(struct its_node *its) u32 ctlr; int err; + its_enable_quirks(its); + if (is_v4(its)) { if (!(its->typer & GITS_TYPER_VMOVP)) { err = its_compute_its_list_map(its); @@ -5429,7 +5448,6 @@ static int __init its_of_probe(struct device_node *node) if (!its) return -ENOMEM; - its_enable_quirks(its); err = its_probe_one(its); if (err) { its_node_destroy(its); diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index 1623cd779175..b3736bdd4b9f 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -241,7 +241,7 @@ static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq, int ret; unsigned int i, type; unsigned long hwirq = 0; - struct eiointc *priv = domain->host_data; + struct eiointc_priv *priv = domain->host_data; ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type); if (ret) diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c index cda5838d2232..7942d8eb3d00 100644 --- a/drivers/irqchip/irq-qcom-mpm.c +++ b/drivers/irqchip/irq-qcom-mpm.c @@ -389,8 +389,8 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent) /* Don't use devm_ioremap_resource, as we're accessing a shared region. */ priv->base = devm_ioremap(dev, res.start, resource_size(&res)); of_node_put(msgram_np); - if (IS_ERR(priv->base)) - return PTR_ERR(priv->base); + if (!priv->base) + return -ENOMEM; } else { /* Otherwise, fall back to simple MMIO. */ priv->base = devm_platform_ioremap_resource(pdev, 0); diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c index aebd3c12020b..c381c22135a2 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c @@ -725,6 +725,9 @@ irqreturn_t rkisp1_capture_isr(int irq, void *ctx) unsigned int i; u32 status; + if (!rkisp1->irqs_enabled) + return IRQ_NONE; + status = rkisp1_read(rkisp1, RKISP1_CIF_MI_MIS); if (!status) return IRQ_NONE; diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h index 4b6b28c05b89..b757f75edecf 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h @@ -450,6 +450,7 @@ struct rkisp1_debug { * @debug: debug params to be exposed on debugfs * @info: version-specific ISP information * @irqs: IRQ line numbers + * @irqs_enabled: the hardware is enabled and can cause interrupts */ struct rkisp1_device { void __iomem *base_addr; @@ -471,6 +472,7 @@ struct rkisp1_device { struct rkisp1_debug debug; const struct rkisp1_info *info; int irqs[RKISP1_NUM_IRQS]; + bool irqs_enabled; }; /* diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c index b6e47e2f1b94..4202642e0523 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c @@ -196,6 +196,9 @@ irqreturn_t rkisp1_csi_isr(int irq, void *ctx) struct rkisp1_device *rkisp1 = dev_get_drvdata(dev); u32 val, status; + if (!rkisp1->irqs_enabled) + return IRQ_NONE; + status = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_MIS); if (!status) return IRQ_NONE; diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c index f96f821a7b50..73cf08a74011 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c @@ -305,6 +305,24 @@ static int __maybe_unused rkisp1_runtime_suspend(struct device *dev) { struct rkisp1_device *rkisp1 = dev_get_drvdata(dev); + rkisp1->irqs_enabled = false; + /* Make sure the IRQ handler will see the above */ + mb(); + + /* + * Wait until any running IRQ handler has returned. The IRQ handler + * may get called even after this (as it's a shared interrupt line) + * but the 'irqs_enabled' flag will make the handler return immediately. + */ + for (unsigned int il = 0; il < ARRAY_SIZE(rkisp1->irqs); ++il) { + if (rkisp1->irqs[il] == -1) + continue; + + /* Skip if the irq line is the same as previous */ + if (il == 0 || rkisp1->irqs[il - 1] != rkisp1->irqs[il]) + synchronize_irq(rkisp1->irqs[il]); + } + clk_bulk_disable_unprepare(rkisp1->clk_size, rkisp1->clks); return pinctrl_pm_select_sleep_state(dev); } @@ -321,6 +339,10 @@ static int __maybe_unused rkisp1_runtime_resume(struct device *dev) if (ret) return ret; + rkisp1->irqs_enabled = true; + /* Make sure the IRQ handler will see the above */ + mb(); + return 0; } @@ -559,7 +581,7 @@ static int rkisp1_probe(struct platform_device *pdev) rkisp1->irqs[il] = irq; } - ret = devm_request_irq(dev, irq, info->isrs[i].isr, 0, + ret = devm_request_irq(dev, irq, info->isrs[i].isr, IRQF_SHARED, dev_driver_string(dev), dev); if (ret) { dev_err(dev, "request irq failed: %d\n", ret); diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c index f00873d31c42..78a1f7a1499b 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c @@ -976,6 +976,9 @@ irqreturn_t rkisp1_isp_isr(int irq, void *ctx) struct rkisp1_device *rkisp1 = dev_get_drvdata(dev); u32 status, isp_err; + if (!rkisp1->irqs_enabled) + return IRQ_NONE; + status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_MIS); if (!status) return IRQ_NONE; diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig index 2afe67ffa285..74d69ce22a33 100644 --- a/drivers/media/rc/Kconfig +++ b/drivers/media/rc/Kconfig @@ -319,6 +319,7 @@ config IR_PWM_TX tristate "PWM IR transmitter" depends on LIRC depends on PWM + depends on HIGH_RES_TIMERS depends on OF help Say Y if you want to use a PWM based IR transmitter. This is diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c index fe17c7f98e81..52d82cbe7685 100644 --- a/drivers/media/rc/bpf-lirc.c +++ b/drivers/media/rc/bpf-lirc.c @@ -253,7 +253,7 @@ int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) if (attr->attach_flags) return -EINVAL; - rcdev = rc_dev_get_from_fd(attr->target_fd); + rcdev = rc_dev_get_from_fd(attr->target_fd, true); if (IS_ERR(rcdev)) return PTR_ERR(rcdev); @@ -278,7 +278,7 @@ int lirc_prog_detach(const union bpf_attr *attr) if (IS_ERR(prog)) return PTR_ERR(prog); - rcdev = rc_dev_get_from_fd(attr->target_fd); + rcdev = rc_dev_get_from_fd(attr->target_fd, true); if (IS_ERR(rcdev)) { bpf_prog_put(prog); return PTR_ERR(rcdev); @@ -303,7 +303,7 @@ int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) if (attr->query.query_flags) return -EINVAL; - rcdev = rc_dev_get_from_fd(attr->query.target_fd); + rcdev = rc_dev_get_from_fd(attr->query.target_fd, false); if (IS_ERR(rcdev)) return PTR_ERR(rcdev); diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c index 196806709259..69e630d85262 100644 --- a/drivers/media/rc/ir_toy.c +++ b/drivers/media/rc/ir_toy.c @@ -332,6 +332,7 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count) sizeof(COMMAND_SMODE_EXIT), STATE_COMMAND_NO_RESP); if (err) { dev_err(irtoy->dev, "exit sample mode: %d\n", err); + kfree(buf); return err; } @@ -339,6 +340,7 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count) sizeof(COMMAND_SMODE_ENTER), STATE_COMMAND); if (err) { dev_err(irtoy->dev, "enter sample mode: %d\n", err); + kfree(buf); return err; } diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c index a537734832c5..caad59f76793 100644 --- a/drivers/media/rc/lirc_dev.c +++ b/drivers/media/rc/lirc_dev.c @@ -814,7 +814,7 @@ void __exit lirc_dev_exit(void) unregister_chrdev_region(lirc_base_dev, RC_DEV_MAX); } -struct rc_dev *rc_dev_get_from_fd(int fd) +struct rc_dev *rc_dev_get_from_fd(int fd, bool write) { struct fd f = fdget(fd); struct lirc_fh *fh; @@ -828,6 +828,9 @@ struct rc_dev *rc_dev_get_from_fd(int fd) return ERR_PTR(-EINVAL); } + if (write && !(f.file->f_mode & FMODE_WRITE)) + return ERR_PTR(-EPERM); + fh = f.file->private_data; dev = fh->rc; diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h index ef1e95e1af7f..7df949fc65e2 100644 --- a/drivers/media/rc/rc-core-priv.h +++ b/drivers/media/rc/rc-core-priv.h @@ -325,7 +325,7 @@ void lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev); void lirc_scancode_event(struct rc_dev *dev, struct lirc_scancode *lsc); int lirc_register(struct rc_dev *dev); void lirc_unregister(struct rc_dev *dev); -struct rc_dev *rc_dev_get_from_fd(int fd); +struct rc_dev *rc_dev_get_from_fd(int fd, bool write); #else static inline int lirc_dev_init(void) { return 0; } static inline void lirc_dev_exit(void) {} diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c index 2a2d949a9344..39f45c2b6de8 100644 --- a/drivers/mmc/core/slot-gpio.c +++ b/drivers/mmc/core/slot-gpio.c @@ -75,11 +75,15 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_irq); int mmc_gpio_get_ro(struct mmc_host *host) { struct mmc_gpio *ctx = host->slot.handler_priv; + int cansleep; if (!ctx || !ctx->ro_gpio) return -ENOSYS; - return gpiod_get_value_cansleep(ctx->ro_gpio); + cansleep = gpiod_cansleep(ctx->ro_gpio); + return cansleep ? + gpiod_get_value_cansleep(ctx->ro_gpio) : + gpiod_get_value(ctx->ro_gpio); } EXPORT_SYMBOL(mmc_gpio_get_ro); diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c index 7bfee28116af..d4a02184784a 100644 --- a/drivers/mmc/host/sdhci-pci-o2micro.c +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -693,6 +693,35 @@ static int sdhci_pci_o2_init_sd_express(struct mmc_host *mmc, struct mmc_ios *io return 0; } +static void sdhci_pci_o2_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd) +{ + struct sdhci_pci_chip *chip; + struct sdhci_pci_slot *slot = sdhci_priv(host); + u32 scratch_32 = 0; + u8 scratch_8 = 0; + + chip = slot->chip; + + if (mode == MMC_POWER_OFF) { + /* UnLock WP */ + pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8); + scratch_8 &= 0x7f; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8); + + /* Set PCR 0x354[16] to switch Clock Source back to OPE Clock */ + pci_read_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, &scratch_32); + scratch_32 &= ~(O2_SD_SEL_DLL); + pci_write_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, scratch_32); + + /* Lock WP */ + pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8); + scratch_8 |= 0x80; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8); + } + + sdhci_set_power(host, mode, vdd); +} + static int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot) { struct sdhci_pci_chip *chip; @@ -1051,6 +1080,7 @@ static const struct sdhci_ops sdhci_pci_o2_ops = { .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, + .set_power = sdhci_pci_o2_set_power, }; const struct sdhci_pci_fixes sdhci_o2 = { diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c index 8c651fdee039..57f1729066f2 100644 --- a/drivers/net/arcnet/arc-rawmode.c +++ b/drivers/net/arcnet/arc-rawmode.c @@ -186,4 +186,5 @@ static void __exit arcnet_raw_exit(void) module_init(arcnet_raw_init); module_exit(arcnet_raw_exit); +MODULE_DESCRIPTION("ARCnet raw mode packet interface module"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c index 8c3ccc7c83cd..53d10a04d1bd 100644 --- a/drivers/net/arcnet/arc-rimi.c +++ b/drivers/net/arcnet/arc-rimi.c @@ -312,6 +312,7 @@ module_param(node, int, 0); module_param(io, int, 0); module_param(irq, int, 0); module_param_string(device, device, sizeof(device), 0); +MODULE_DESCRIPTION("ARCnet COM90xx RIM I chipset driver"); MODULE_LICENSE("GPL"); static struct net_device *my_dev; diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c index c09b567845e1..7a0a79973769 100644 --- a/drivers/net/arcnet/capmode.c +++ b/drivers/net/arcnet/capmode.c @@ -265,4 +265,5 @@ static void __exit capmode_module_exit(void) module_init(capmode_module_init); module_exit(capmode_module_exit); +MODULE_DESCRIPTION("ARCnet CAP mode packet interface module"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 7b5c8bb02f11..c5e571ec94c9 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -61,6 +61,7 @@ module_param(timeout, int, 0); module_param(backplane, int, 0); module_param(clockp, int, 0); module_param(clockm, int, 0); +MODULE_DESCRIPTION("ARCnet COM20020 chipset PCI driver"); MODULE_LICENSE("GPL"); static void led_tx_set(struct led_classdev *led_cdev, diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c index 06e1651b594b..a0053e3992a3 100644 --- a/drivers/net/arcnet/com20020.c +++ b/drivers/net/arcnet/com20020.c @@ -399,6 +399,7 @@ EXPORT_SYMBOL(com20020_found); EXPORT_SYMBOL(com20020_netdev_ops); #endif +MODULE_DESCRIPTION("ARCnet COM20020 chipset core driver"); MODULE_LICENSE("GPL"); #ifdef MODULE diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c index dc3253b318da..75f08aa7528b 100644 --- a/drivers/net/arcnet/com20020_cs.c +++ b/drivers/net/arcnet/com20020_cs.c @@ -97,6 +97,7 @@ module_param(backplane, int, 0); module_param(clockp, int, 0); module_param(clockm, int, 0); +MODULE_DESCRIPTION("ARCnet COM20020 chipset PCMCIA driver"); MODULE_LICENSE("GPL"); /*====================================================================*/ diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c index 37b47749fc8b..3b463fbc6402 100644 --- a/drivers/net/arcnet/com90io.c +++ b/drivers/net/arcnet/com90io.c @@ -350,6 +350,7 @@ static char device[9]; /* use eg. device=arc1 to change name */ module_param_hw(io, int, ioport, 0); module_param_hw(irq, int, irq, 0); module_param_string(device, device, sizeof(device), 0); +MODULE_DESCRIPTION("ARCnet COM90xx IO mapped chipset driver"); MODULE_LICENSE("GPL"); #ifndef MODULE diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c index f49dae194284..b3b287c16561 100644 --- a/drivers/net/arcnet/com90xx.c +++ b/drivers/net/arcnet/com90xx.c @@ -645,6 +645,7 @@ static void com90xx_copy_from_card(struct net_device *dev, int bufnum, TIME(dev, "memcpy_fromio", count, memcpy_fromio(buf, memaddr, count)); } +MODULE_DESCRIPTION("ARCnet COM90xx normal chipset driver"); MODULE_LICENSE("GPL"); static int __init com90xx_init(void) diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c index a7752a5b647f..46519ca63a0a 100644 --- a/drivers/net/arcnet/rfc1051.c +++ b/drivers/net/arcnet/rfc1051.c @@ -78,6 +78,7 @@ static void __exit arcnet_rfc1051_exit(void) module_init(arcnet_rfc1051_init); module_exit(arcnet_rfc1051_exit); +MODULE_DESCRIPTION("ARCNet packet format (RFC 1051) module"); MODULE_LICENSE("GPL"); /* Determine a packet's protocol ID. diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c index a4c856282674..0edf35d971c5 100644 --- a/drivers/net/arcnet/rfc1201.c +++ b/drivers/net/arcnet/rfc1201.c @@ -35,6 +35,7 @@ #include "arcdevice.h" +MODULE_DESCRIPTION("ARCNet packet format (RFC 1201) module"); MODULE_LICENSE("GPL"); static __be16 type_trans(struct sk_buff *skb, struct net_device *dev); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 4e0600c7b050..a11748b8d69b 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1819,6 +1819,8 @@ void bond_xdp_set_features(struct net_device *bond_dev) bond_for_each_slave(bond, slave, iter) val &= slave->dev->xdp_features; + val &= ~NETDEV_XDP_ACT_XSK_ZEROCOPY; + xdp_set_features_flag(bond_dev, val); } @@ -5909,9 +5911,6 @@ void bond_setup(struct net_device *bond_dev) if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) bond_dev->features |= BOND_XFRM_FEATURES; #endif /* CONFIG_XFRM_OFFLOAD */ - - if (bond_xdp_check(bond)) - bond_dev->xdp_features = NETDEV_XDP_ACT_MASK; } /* Destroy a bonding device. diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c index 036d85ef07f5..dfdc039d92a6 100644 --- a/drivers/net/can/dev/netlink.c +++ b/drivers/net/can/dev/netlink.c @@ -346,7 +346,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], /* Neither of TDC parameters nor TDC flags are * provided: do calculation */ - can_calc_tdco(&priv->tdc, priv->tdc_const, &priv->data_bittiming, + can_calc_tdco(&priv->tdc, priv->tdc_const, &dbt, &priv->ctrlmode, priv->ctrlmode_supported); } /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly * turned off. TDC is disabled: do nothing diff --git a/drivers/net/dsa/dsa_loop_bdinfo.c b/drivers/net/dsa/dsa_loop_bdinfo.c index 237066d30704..14ca42491512 100644 --- a/drivers/net/dsa/dsa_loop_bdinfo.c +++ b/drivers/net/dsa/dsa_loop_bdinfo.c @@ -32,4 +32,5 @@ static int __init dsa_loop_bdinfo_init(void) } arch_initcall(dsa_loop_bdinfo_init) +MODULE_DESCRIPTION("DSA mock-up switch driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c index cdbf053b5376..0050c5894563 100644 --- a/drivers/net/ethernet/amd/pds_core/main.c +++ b/drivers/net/ethernet/amd/pds_core/main.c @@ -451,6 +451,9 @@ static void pdsc_remove(struct pci_dev *pdev) static void pdsc_stop_health_thread(struct pdsc *pdsc) { + if (pdsc->pdev->is_virtfn) + return; + timer_shutdown_sync(&pdsc->wdtimer); if (pdsc->health_work.func) cancel_work_sync(&pdsc->health_work); @@ -458,6 +461,9 @@ static void pdsc_stop_health_thread(struct pdsc *pdsc) static void pdsc_restart_health_thread(struct pdsc *pdsc) { + if (pdsc->pdev->is_virtfn) + return; + timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0); mod_timer(&pdsc->wdtimer, jiffies + 1); } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c index abd4832e4ed2..5acb3e16b567 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c @@ -993,7 +993,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic) return 0; err_exit_hwts_rx: - aq_ring_free(&aq_ptp->hwts_rx); + aq_ring_hwts_rx_free(&aq_ptp->hwts_rx); err_exit_ptp_rx: aq_ring_free(&aq_ptp->ptp_rx); err_exit_ptp_tx: @@ -1011,7 +1011,7 @@ void aq_ptp_ring_free(struct aq_nic_s *aq_nic) aq_ring_free(&aq_ptp->ptp_tx); aq_ring_free(&aq_ptp->ptp_rx); - aq_ring_free(&aq_ptp->hwts_rx); + aq_ring_hwts_rx_free(&aq_ptp->hwts_rx); aq_ptp_skb_ring_release(&aq_ptp->skb_ring); } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index cda8597b4e14..f7433abd6591 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -919,6 +919,19 @@ void aq_ring_free(struct aq_ring_s *self) } } +void aq_ring_hwts_rx_free(struct aq_ring_s *self) +{ + if (!self) + return; + + if (self->dx_ring) { + dma_free_coherent(aq_nic_get_dev(self->aq_nic), + self->size * self->dx_size + AQ_CFG_RXDS_DEF, + self->dx_ring, self->dx_ring_pa); + self->dx_ring = NULL; + } +} + unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data) { unsigned int count; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index 52847310740a..d627ace850ff 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -210,6 +210,7 @@ int aq_ring_rx_fill(struct aq_ring_s *self); int aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic, unsigned int idx, unsigned int size, unsigned int dx_size); +void aq_ring_hwts_rx_free(struct aq_ring_s *self); void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic); unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data); diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c index 53e542881255..f59557b0cd51 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c @@ -684,6 +684,8 @@ static int bcmasp_init_rx(struct bcmasp_intf *intf) intf->rx_buf_order = get_order(RING_BUFFER_SIZE); buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order); + if (!buffer_pg) + return -ENOMEM; dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE, DMA_FROM_DEVICE); @@ -1092,6 +1094,7 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect) return 0; err_reclaim_tx: + netif_napi_del(&intf->tx_napi); bcmasp_reclaim_free_all_tx(intf); err_phy_disconnect: if (phydev) diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 31191b520b58..c32174484a96 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -1091,10 +1091,10 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx) * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm. */ static void -bnad_tx_cleanup(struct delayed_work *work) +bnad_tx_cleanup(struct work_struct *work) { struct bnad_tx_info *tx_info = - container_of(work, struct bnad_tx_info, tx_cleanup_work); + container_of(work, struct bnad_tx_info, tx_cleanup_work.work); struct bnad *bnad = NULL; struct bna_tcb *tcb; unsigned long flags; @@ -1170,7 +1170,7 @@ bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx) * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm. */ static void -bnad_rx_cleanup(void *work) +bnad_rx_cleanup(struct work_struct *work) { struct bnad_rx_info *rx_info = container_of(work, struct bnad_rx_info, rx_cleanup_work); @@ -1991,8 +1991,7 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id) } tx_info->tx = tx; - INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, - (work_func_t)bnad_tx_cleanup); + INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, bnad_tx_cleanup); /* Register ISR for the Tx object */ if (intr_info->intr_type == BNA_INTR_T_MSIX) { @@ -2248,8 +2247,7 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id) rx_info->rx = rx; spin_unlock_irqrestore(&bnad->bna_lock, flags); - INIT_WORK(&rx_info->rx_cleanup_work, - (work_func_t)(bnad_rx_cleanup)); + INIT_WORK(&rx_info->rx_cleanup_work, bnad_rx_cleanup); /* * Init NAPI, so that state is set to NAPI_STATE_SCHED, diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index 9aeff2b37a61..64eadd320798 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -719,17 +719,25 @@ static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx) static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter, struct xdp_buff *xdp, - struct netdev_queue *tx_nq, struct tsnep_tx *tx) + struct netdev_queue *tx_nq, struct tsnep_tx *tx, + bool zc) { struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); bool xmit; + u32 type; if (unlikely(!xdpf)) return false; + /* no page pool for zero copy */ + if (zc) + type = TSNEP_TX_TYPE_XDP_NDO; + else + type = TSNEP_TX_TYPE_XDP_TX; + __netif_tx_lock(tx_nq, smp_processor_id()); - xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, TSNEP_TX_TYPE_XDP_TX); + xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, type); /* Avoid transmit queue timeout since we share it with the slow path */ if (xmit) @@ -1273,7 +1281,7 @@ static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog, case XDP_PASS: return false; case XDP_TX: - if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx)) + if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false)) goto out_failure; *status |= TSNEP_XDP_TX; return true; @@ -1323,7 +1331,7 @@ static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog, case XDP_PASS: return false; case XDP_TX: - if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx)) + if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true)) goto out_failure; *status |= TSNEP_XDP_TX; return true; diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 9d88ed6105fd..8db1eb0c1768 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -1523,7 +1523,7 @@ void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share, reg = rd32(hw, I40E_PRTDCB_RETSTCC(i)); reg &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK | I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK | - I40E_PRTDCB_RETSTCC_ETSTC_SHIFT); + I40E_PRTDCB_RETSTCC_ETSTC_MASK); reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_BWSHARE_MASK, bw_share[i]); reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK, diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index 6b60dc9b7736..d76497566e40 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -43,7 +43,7 @@ #define I40E_LLDP_TLV_SUBTYPE_SHIFT 0 #define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT) #define I40E_LLDP_TLV_OUI_SHIFT 8 -#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT) +#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFFU << I40E_LLDP_TLV_OUI_SHIFT) /* Defines for IEEE ETS TLV */ #define I40E_IEEE_ETS_MAXTC_SHIFT 0 diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 6e7fd473abfd..54eb55464e31 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -4926,27 +4926,23 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi) void i40e_vsi_stop_rings(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; - int pf_q, err, q_end; + u32 pf_q, tx_q_end, rx_q_end; /* When port TX is suspended, don't wait */ if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state)) return i40e_vsi_stop_rings_no_wait(vsi); - q_end = vsi->base_queue + vsi->num_queue_pairs; - for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) - i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false); + tx_q_end = vsi->base_queue + + vsi->alloc_queue_pairs * (i40e_enabled_xdp_vsi(vsi) ? 2 : 1); + for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++) + i40e_pre_tx_queue_cfg(&pf->hw, pf_q, false); - for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) { - err = i40e_control_wait_rx_q(pf, pf_q, false); - if (err) - dev_info(&pf->pdev->dev, - "VSI seid %d Rx ring %d disable timeout\n", - vsi->seid, pf_q); - } + rx_q_end = vsi->base_queue + vsi->num_queue_pairs; + for (pf_q = vsi->base_queue; pf_q < rx_q_end; pf_q++) + i40e_control_rx_q(pf, pf_q, false); msleep(I40E_DISABLE_TX_GAP_MSEC); - pf_q = vsi->base_queue; - for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) + for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++) wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0); i40e_vsi_wait_queues_disabled(vsi); @@ -5360,7 +5356,7 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) { int v, ret = 0; - for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { + for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v]) { ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); if (ret) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 908cdbd3ec5d..b34c71770887 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2848,6 +2848,24 @@ error_param: (u8 *)&stats, sizeof(stats)); } +/** + * i40e_can_vf_change_mac + * @vf: pointer to the VF info + * + * Return true if the VF is allowed to change its MAC filters, false otherwise + */ +static bool i40e_can_vf_change_mac(struct i40e_vf *vf) +{ + /* If the VF MAC address has been set administratively (via the + * ndo_set_vf_mac command), then deny permission to the VF to + * add/delete unicast MAC addresses, unless the VF is trusted + */ + if (vf->pf_set_mac && !vf->trusted) + return false; + + return true; +} + #define I40E_MAX_MACVLAN_PER_HW 3072 #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \ (num_ports)) @@ -2907,8 +2925,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, * The VF may request to set the MAC address filter already * assigned to it so do not return an error in that case. */ - if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && - !is_multicast_ether_addr(addr) && vf->pf_set_mac && + if (!i40e_can_vf_change_mac(vf) && + !is_multicast_ether_addr(addr) && !ether_addr_equal(addr, vf->default_lan_addr.addr)) { dev_err(&pf->pdev->dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); @@ -3114,19 +3132,29 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) ret = -EINVAL; goto error_param; } - if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr)) - was_unimac_deleted = true; } vsi = pf->vsi[vf->lan_vsi_idx]; spin_lock_bh(&vsi->mac_filter_hash_lock); /* delete addresses from the list */ - for (i = 0; i < al->num_elements; i++) + for (i = 0; i < al->num_elements; i++) { + const u8 *addr = al->list[i].addr; + + /* Allow to delete VF primary MAC only if it was not set + * administratively by PF or if VF is trusted. + */ + if (ether_addr_equal(addr, vf->default_lan_addr.addr) && + i40e_can_vf_change_mac(vf)) + was_unimac_deleted = true; + else + continue; + if (i40e_del_mac_filter(vsi, al->list[i].addr)) { ret = -EINVAL; spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; } + } spin_unlock_bh(&vsi->mac_filter_hash_lock); diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index 2a25323105e5..467372d541d2 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -152,6 +152,27 @@ ice_lag_find_hw_by_lport(struct ice_lag *lag, u8 lport) } /** + * ice_pkg_has_lport_extract - check if lport extraction supported + * @hw: HW struct + */ +static bool ice_pkg_has_lport_extract(struct ice_hw *hw) +{ + int i; + + for (i = 0; i < hw->blk[ICE_BLK_SW].es.count; i++) { + u16 offset; + u8 fv_prot; + + ice_find_prot_off(hw, ICE_BLK_SW, ICE_SW_DEFAULT_PROFILE, i, + &fv_prot, &offset); + if (fv_prot == ICE_FV_PROT_MDID && + offset == ICE_LP_EXT_BUF_OFFSET) + return true; + } + return false; +} + +/** * ice_lag_find_primary - returns pointer to primary interfaces lag struct * @lag: local interfaces lag struct */ @@ -1206,7 +1227,7 @@ static void ice_lag_del_prune_list(struct ice_lag *lag, struct ice_pf *event_pf) } /** - * ice_lag_init_feature_support_flag - Check for NVM support for LAG + * ice_lag_init_feature_support_flag - Check for package and NVM support for LAG * @pf: PF struct */ static void ice_lag_init_feature_support_flag(struct ice_pf *pf) @@ -1219,7 +1240,7 @@ static void ice_lag_init_feature_support_flag(struct ice_pf *pf) else ice_clear_feature_support(pf, ICE_F_ROCE_LAG); - if (caps->sriov_lag) + if (caps->sriov_lag && ice_pkg_has_lport_extract(&pf->hw)) ice_set_feature_support(pf, ICE_F_SRIOV_LAG); else ice_clear_feature_support(pf, ICE_F_SRIOV_LAG); diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h index ede833dfa658..183b38792ef2 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.h +++ b/drivers/net/ethernet/intel/ice/ice_lag.h @@ -17,6 +17,9 @@ enum ice_lag_role { #define ICE_LAG_INVALID_PORT 0xFF #define ICE_LAG_RESET_RETRIES 5 +#define ICE_SW_DEFAULT_PROFILE 0 +#define ICE_FV_PROT_MDID 255 +#define ICE_LP_EXT_BUF_OFFSET 32 struct ice_pf; struct ice_vf; diff --git a/drivers/net/ethernet/intel/ice/ice_osdep.h b/drivers/net/ethernet/intel/ice/ice_osdep.h index 82bc54fec7f3..a2562f04267f 100644 --- a/drivers/net/ethernet/intel/ice/ice_osdep.h +++ b/drivers/net/ethernet/intel/ice/ice_osdep.h @@ -24,7 +24,7 @@ #define rd64(a, reg) readq((a)->hw_addr + (reg)) #define ice_flush(a) rd32((a), GLGEN_STAT) -#define ICE_M(m, s) ((m) << (s)) +#define ICE_M(m, s) ((m ## U) << (s)) struct ice_dma_mem { void *va; diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 41ab6d7bbd9e..a508e917ce5f 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -1072,7 +1072,7 @@ struct ice_aq_get_set_rss_lut_params { #define ICE_OROM_VER_BUILD_SHIFT 8 #define ICE_OROM_VER_BUILD_MASK (0xffff << ICE_OROM_VER_BUILD_SHIFT) #define ICE_OROM_VER_SHIFT 24 -#define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT) +#define ICE_OROM_VER_MASK (0xffU << ICE_OROM_VER_SHIFT) #define ICE_SR_PFA_PTR 0x40 #define ICE_SR_1ST_NVM_BANK_PTR 0x42 #define ICE_SR_NVM_BANK_SIZE 0x43 diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index a2b759531cb7..3c2dc7bdebb5 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -637,7 +637,7 @@ struct igb_adapter { struct timespec64 period; } perout[IGB_N_PEROUT]; - char fw_version[32]; + char fw_version[48]; #ifdef CONFIG_IGB_HWMON struct hwmon_buff *igb_hwmon_buff; bool ets; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 4df8d4153aa5..cebb44f51d5f 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3069,7 +3069,6 @@ void igb_set_fw_version(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_fw_version fw; - char *lbuf; igb_get_fw_version(hw, &fw); @@ -3077,34 +3076,36 @@ void igb_set_fw_version(struct igb_adapter *adapter) case e1000_i210: case e1000_i211: if (!(igb_get_flash_presence_i210(hw))) { - lbuf = kasprintf(GFP_KERNEL, "%2d.%2d-%d", - fw.invm_major, fw.invm_minor, - fw.invm_img_type); + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%2d.%2d-%d", + fw.invm_major, fw.invm_minor, + fw.invm_img_type); break; } fallthrough; default: /* if option rom is valid, display its version too */ if (fw.or_valid) { - lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x, %d.%d.%d", - fw.eep_major, fw.eep_minor, - fw.etrack_id, fw.or_major, fw.or_build, - fw.or_patch); + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x, %d.%d.%d", + fw.eep_major, fw.eep_minor, fw.etrack_id, + fw.or_major, fw.or_build, fw.or_patch); /* no option rom */ } else if (fw.etrack_id != 0X0000) { - lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x", - fw.eep_major, fw.eep_minor, - fw.etrack_id); + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x", + fw.eep_major, fw.eep_minor, fw.etrack_id); } else { - lbuf = kasprintf(GFP_KERNEL, "%d.%d.%d", fw.eep_major, - fw.eep_minor, fw.eep_build); + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d.%d", + fw.eep_major, fw.eep_minor, fw.eep_build); } break; } - - /* the truncate happens here if it doesn't fit */ - strscpy(adapter->fw_version, lbuf, sizeof(adapter->fw_version)); - kfree(lbuf); } /** diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index 7cd8716d2ffa..861f37076861 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -130,11 +130,7 @@ void igc_power_down_phy_copper(struct igc_hw *hw) /* The PHY will retain its settings across a power down/up cycle */ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); mii_reg |= MII_CR_POWER_DOWN; - - /* Temporary workaround - should be removed when PHY will implement - * IEEE registers as properly - */ - /* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/ + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); usleep_range(1000, 2000); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 167145bdcb75..e5d6156655ba 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -61,28 +61,6 @@ int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena) return 0; } -static int npc_mcam_verify_pf_func(struct rvu *rvu, - struct mcam_entry *entry_data, u8 intf, - u16 pcifunc) -{ - u16 pf_func, pf_func_mask; - - if (is_npc_intf_rx(intf)) - return 0; - - pf_func_mask = (entry_data->kw_mask[0] >> 32) & - NPC_KEX_PF_FUNC_MASK; - pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK; - - pf_func = be16_to_cpu((__force __be16)pf_func); - if (pf_func_mask != NPC_KEX_PF_FUNC_MASK || - ((pf_func & ~RVU_PFVF_FUNC_MASK) != - (pcifunc & ~RVU_PFVF_FUNC_MASK))) - return -EINVAL; - - return 0; -} - void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf) { int blkaddr; @@ -1850,8 +1828,8 @@ void npc_mcam_rsrcs_deinit(struct rvu *rvu) { struct npc_mcam *mcam = &rvu->hw->mcam; - kfree(mcam->bmap); - kfree(mcam->bmap_reverse); + bitmap_free(mcam->bmap); + bitmap_free(mcam->bmap_reverse); kfree(mcam->entry2pfvf_map); kfree(mcam->cntr2pfvf_map); kfree(mcam->entry2cntr_map); @@ -1904,21 +1882,20 @@ int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) mcam->pf_offset = mcam->nixlf_offset + nixlf_count; /* Allocate bitmaps for managing MCAM entries */ - mcam->bmap = kmalloc_array(BITS_TO_LONGS(mcam->bmap_entries), - sizeof(long), GFP_KERNEL); + mcam->bmap = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL); if (!mcam->bmap) return -ENOMEM; - mcam->bmap_reverse = kmalloc_array(BITS_TO_LONGS(mcam->bmap_entries), - sizeof(long), GFP_KERNEL); + mcam->bmap_reverse = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL); if (!mcam->bmap_reverse) goto free_bmap; mcam->bmap_fcnt = mcam->bmap_entries; /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ - mcam->entry2pfvf_map = kmalloc_array(mcam->bmap_entries, - sizeof(u16), GFP_KERNEL); + mcam->entry2pfvf_map = kcalloc(mcam->bmap_entries, sizeof(u16), + GFP_KERNEL); + if (!mcam->entry2pfvf_map) goto free_bmap_reverse; @@ -1941,21 +1918,21 @@ int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) if (err) goto free_entry_map; - mcam->cntr2pfvf_map = kmalloc_array(mcam->counters.max, - sizeof(u16), GFP_KERNEL); + mcam->cntr2pfvf_map = kcalloc(mcam->counters.max, sizeof(u16), + GFP_KERNEL); if (!mcam->cntr2pfvf_map) goto free_cntr_bmap; /* Alloc memory for MCAM entry to counter mapping and for tracking * counter's reference count. */ - mcam->entry2cntr_map = kmalloc_array(mcam->bmap_entries, - sizeof(u16), GFP_KERNEL); + mcam->entry2cntr_map = kcalloc(mcam->bmap_entries, sizeof(u16), + GFP_KERNEL); if (!mcam->entry2cntr_map) goto free_cntr_map; - mcam->cntr_refcnt = kmalloc_array(mcam->counters.max, - sizeof(u16), GFP_KERNEL); + mcam->cntr_refcnt = kcalloc(mcam->counters.max, sizeof(u16), + GFP_KERNEL); if (!mcam->cntr_refcnt) goto free_entry_cntr_map; @@ -1988,9 +1965,9 @@ free_cntr_bmap: free_entry_map: kfree(mcam->entry2pfvf_map); free_bmap_reverse: - kfree(mcam->bmap_reverse); + bitmap_free(mcam->bmap_reverse); free_bmap: - kfree(mcam->bmap); + bitmap_free(mcam->bmap); return -ENOMEM; } @@ -2852,12 +2829,6 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, else nix_intf = pfvf->nix_rx_intf; - if (!is_pffunc_af(pcifunc) && - npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) { - rc = NPC_MCAM_INVALID_REQ; - goto exit; - } - /* For AF installed rules, the nix_intf should be set to target NIX */ if (is_pffunc_af(req->hdr.pcifunc)) nix_intf = req->intf; @@ -3209,10 +3180,6 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, if (!is_npc_interface_valid(rvu, req->intf)) return NPC_MCAM_INVALID_REQ; - if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, - req->hdr.pcifunc)) - return NPC_MCAM_INVALID_REQ; - /* Try to allocate a MCAM entry */ entry_req.hdr.pcifunc = req->hdr.pcifunc; entry_req.contig = true; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 7ca6941ea0b9..02d0b707aea5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -951,8 +951,11 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) if (pfvf->ptp && qidx < pfvf->hw.tx_queues) { err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt, sizeof(*sq->timestamps)); - if (err) + if (err) { + kfree(sq->sg); + sq->sg = NULL; return err; + } } sq->head = 0; @@ -968,7 +971,14 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) sq->stats.bytes = 0; sq->stats.pkts = 0; - return pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura); + err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura); + if (err) { + kfree(sq->sg); + sq->sg = NULL; + return err; + } + + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c index 18fed2b34fb1..928bf24d4b12 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c @@ -389,7 +389,7 @@ static void mlx5_dpll_remove(struct auxiliary_device *adev) struct mlx5_dpll *mdpll = auxiliary_get_drvdata(adev); struct mlx5_core_dev *mdev = mdpll->mdev; - cancel_delayed_work(&mdpll->work); + cancel_delayed_work_sync(&mdpll->work); mlx5_dpll_mdev_netdev_untrack(mdpll, mdev); destroy_workqueue(mdpll->wq); dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin, diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c index 41fa2523d91d..5f2cd9a8cf8f 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c @@ -37,19 +37,24 @@ static void lan966x_lag_set_aggr_pgids(struct lan966x *lan966x) /* Now, set PGIDs for each active LAG */ for (lag = 0; lag < lan966x->num_phys_ports; ++lag) { - struct net_device *bond = lan966x->ports[lag]->bond; + struct lan966x_port *port = lan966x->ports[lag]; int num_active_ports = 0; + struct net_device *bond; unsigned long bond_mask; u8 aggr_idx[16]; - if (!bond || (visited & BIT(lag))) + if (!port || !port->bond || (visited & BIT(lag))) continue; + bond = port->bond; bond_mask = lan966x_lag_get_mask(lan966x, bond); for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) { struct lan966x_port *port = lan966x->ports[p]; + if (!port) + continue; + lan_wr(ANA_PGID_PGID_SET(bond_mask), lan966x, ANA_PGID(p)); if (port->lag_tx_active) diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index e522845c7c21..0d7d138d6e0d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -1084,7 +1084,7 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, u16 nfp_mac_idx = 0; entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); - if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) { + if (entry && (nfp_tunnel_is_mac_idx_global(entry->index) || netif_is_lag_port(netdev))) { if (entry->bridge_count || !nfp_flower_is_supported_bridge(netdev)) { nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 3b3210d823e8..f28e769e6fda 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2776,6 +2776,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn) case NFP_NFD_VER_NFD3: netdev->netdev_ops = &nfp_nfd3_netdev_ops; netdev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY; + netdev->xdp_features |= NETDEV_XDP_ACT_REDIRECT; break; case NFP_NFD_VER_NFDK: netdev->netdev_ops = &nfp_nfdk_netdev_ops; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index 33b4c2856316..3f10c5365c80 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -537,11 +537,13 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) const u32 barcfg_msix_general = NFP_PCIE_BAR_PCIE2CPP_MapType( NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) | - NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT; + NFP_PCIE_BAR_PCIE2CPP_LengthSelect( + NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT); const u32 barcfg_msix_xpb = NFP_PCIE_BAR_PCIE2CPP_MapType( NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) | - NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT | + NFP_PCIE_BAR_PCIE2CPP_LengthSelect( + NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT) | NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress( NFP_CPP_TARGET_ISLAND_XPB); const u32 barcfg_explicit[4] = { diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 54cd96b035d6..6f4776759863 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -579,6 +579,9 @@ int ionic_tx_napi(struct napi_struct *napi, int budget) work_done = ionic_cq_service(cq, budget, ionic_tx_service, NULL, NULL); + if (unlikely(!budget)) + return budget; + if (work_done < budget && napi_complete_done(napi, work_done)) { ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR); flags |= IONIC_INTR_CRED_UNMASK; @@ -607,6 +610,9 @@ int ionic_rx_napi(struct napi_struct *napi, int budget) u32 work_done = 0; u32 flags = 0; + if (unlikely(!budget)) + return budget; + lif = cq->bound_q->lif; idev = &lif->ionic->idev; @@ -656,6 +662,9 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT, ionic_tx_service, NULL, NULL); + if (unlikely(!budget)) + return budget; + rx_work_done = ionic_cq_service(rxcq, budget, ionic_rx_service, NULL, NULL); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 0e3731f50fc2..f7566cfa45ca 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -772,29 +772,25 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) struct ravb_rx_desc *desc; struct sk_buff *skb; dma_addr_t dma_addr; + int rx_packets = 0; u8 desc_status; - int boguscnt; u16 pkt_len; u8 die_dt; int entry; int limit; + int i; entry = priv->cur_rx[q] % priv->num_rx_ring[q]; - boguscnt = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; + limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; stats = &priv->stats[q]; - boguscnt = min(boguscnt, *quota); - limit = boguscnt; desc = &priv->gbeth_rx_ring[entry]; - while (desc->die_dt != DT_FEMPTY) { + for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) { /* Descriptor type must be checked before all other reads */ dma_rmb(); desc_status = desc->msc; pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS; - if (--boguscnt < 0) - break; - /* We use 0-byte descriptors to mark the DMA mapping errors */ if (!pkt_len) continue; @@ -820,7 +816,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, ndev); napi_gro_receive(&priv->napi[q], skb); - stats->rx_packets++; + rx_packets++; stats->rx_bytes += pkt_len; break; case DT_FSTART: @@ -848,7 +844,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) eth_type_trans(priv->rx_1st_skb, ndev); napi_gro_receive(&priv->napi[q], priv->rx_1st_skb); - stats->rx_packets++; + rx_packets++; stats->rx_bytes += pkt_len; break; } @@ -887,9 +883,9 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) desc->die_dt = DT_FEMPTY; } - *quota -= limit - (++boguscnt); - - return boguscnt <= 0; + stats->rx_packets += rx_packets; + *quota -= rx_packets; + return *quota == 0; } /* Packet receive function for Ethernet AVB */ diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 721c1f8e892f..5ba606a596e7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -59,28 +59,51 @@ #undef FRAME_FILTER_DEBUG /* #define FRAME_FILTER_DEBUG */ +struct stmmac_q_tx_stats { + u64_stats_t tx_bytes; + u64_stats_t tx_set_ic_bit; + u64_stats_t tx_tso_frames; + u64_stats_t tx_tso_nfrags; +}; + +struct stmmac_napi_tx_stats { + u64_stats_t tx_packets; + u64_stats_t tx_pkt_n; + u64_stats_t poll; + u64_stats_t tx_clean; + u64_stats_t tx_set_ic_bit; +}; + struct stmmac_txq_stats { - u64 tx_bytes; - u64 tx_packets; - u64 tx_pkt_n; - u64 tx_normal_irq_n; - u64 napi_poll; - u64 tx_clean; - u64 tx_set_ic_bit; - u64 tx_tso_frames; - u64 tx_tso_nfrags; - struct u64_stats_sync syncp; + /* Updates protected by tx queue lock. */ + struct u64_stats_sync q_syncp; + struct stmmac_q_tx_stats q; + + /* Updates protected by NAPI poll logic. */ + struct u64_stats_sync napi_syncp; + struct stmmac_napi_tx_stats napi; } ____cacheline_aligned_in_smp; +struct stmmac_napi_rx_stats { + u64_stats_t rx_bytes; + u64_stats_t rx_packets; + u64_stats_t rx_pkt_n; + u64_stats_t poll; +}; + struct stmmac_rxq_stats { - u64 rx_bytes; - u64 rx_packets; - u64 rx_pkt_n; - u64 rx_normal_irq_n; - u64 napi_poll; - struct u64_stats_sync syncp; + /* Updates protected by NAPI poll logic. */ + struct u64_stats_sync napi_syncp; + struct stmmac_napi_rx_stats napi; } ____cacheline_aligned_in_smp; +/* Updates on each CPU protected by not allowing nested irqs. */ +struct stmmac_pcpu_stats { + struct u64_stats_sync syncp; + u64_stats_t rx_normal_irq_n[MTL_MAX_TX_QUEUES]; + u64_stats_t tx_normal_irq_n[MTL_MAX_RX_QUEUES]; +}; + /* Extra statistic and debug information exposed by ethtool */ struct stmmac_extra_stats { /* Transmit errors */ @@ -205,6 +228,7 @@ struct stmmac_extra_stats { /* per queue statistics */ struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES]; struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES]; + struct stmmac_pcpu_stats __percpu *pcpu_stats; unsigned long rx_dropped; unsigned long rx_errors; unsigned long tx_dropped; @@ -216,6 +240,7 @@ struct stmmac_safety_stats { unsigned long mac_errors[32]; unsigned long mtl_errors[32]; unsigned long dma_errors[32]; + unsigned long dma_dpp_errors[32]; }; /* Number of fields in Safety Stats */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 137741b94122..b21d99faa2d0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -441,8 +441,7 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv, struct stmmac_extra_stats *x, u32 chan, u32 dir) { - struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; - struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; + struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats); int ret = 0; u32 v; @@ -455,9 +454,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv, if (v & EMAC_TX_INT) { ret |= handle_tx; - u64_stats_update_begin(&txq_stats->syncp); - txq_stats->tx_normal_irq_n++; - u64_stats_update_end(&txq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); } if (v & EMAC_TX_DMA_STOP_INT) @@ -479,9 +478,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv, if (v & EMAC_RX_INT) { ret |= handle_rx; - u64_stats_update_begin(&rxq_stats->syncp); - rxq_stats->rx_normal_irq_n++; - u64_stats_update_end(&rxq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); } if (v & EMAC_RX_BUF_UA_INT) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c index 9470d3fd2ded..0d185e54eb7e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c @@ -171,8 +171,7 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, chan)); u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan)); - struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; - struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; + struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats); int ret = 0; if (dir == DMA_DIR_RX) @@ -201,15 +200,15 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, } /* TX/RX NORMAL interrupts */ if (likely(intr_status & DMA_CHAN_STATUS_RI)) { - u64_stats_update_begin(&rxq_stats->syncp); - rxq_stats->rx_normal_irq_n++; - u64_stats_update_end(&rxq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_rx; } if (likely(intr_status & DMA_CHAN_STATUS_TI)) { - u64_stats_update_begin(&txq_stats->syncp); - txq_stats->tx_normal_irq_n++; - u64_stats_update_end(&txq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_tx; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index 7907d62d3437..85e18f9a22f9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -162,8 +162,7 @@ static void show_rx_process_state(unsigned int status) int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, struct stmmac_extra_stats *x, u32 chan, u32 dir) { - struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; - struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; + struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats); int ret = 0; /* read the status register (CSR5) */ u32 intr_status = readl(ioaddr + DMA_STATUS); @@ -215,16 +214,16 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, u32 value = readl(ioaddr + DMA_INTR_ENA); /* to schedule NAPI on real RIE event. */ if (likely(value & DMA_INTR_ENA_RIE)) { - u64_stats_update_begin(&rxq_stats->syncp); - rxq_stats->rx_normal_irq_n++; - u64_stats_update_end(&rxq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_rx; } } if (likely(intr_status & DMA_STATUS_TI)) { - u64_stats_update_begin(&txq_stats->syncp); - txq_stats->tx_normal_irq_n++; - u64_stats_update_end(&txq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_tx; } if (unlikely(intr_status & DMA_STATUS_ERI)) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index 207ff1799f2c..6a2c7d22df1e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -303,6 +303,8 @@ #define XGMAC_RXCEIE BIT(4) #define XGMAC_TXCEIE BIT(0) #define XGMAC_MTL_ECC_INT_STATUS 0x000010cc +#define XGMAC_MTL_DPP_CONTROL 0x000010e0 +#define XGMAC_DPP_DISABLE BIT(0) #define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x))) #define XGMAC_TQS GENMASK(25, 16) #define XGMAC_TQS_SHIFT 16 @@ -385,6 +387,7 @@ #define XGMAC_DCEIE BIT(1) #define XGMAC_TCEIE BIT(0) #define XGMAC_DMA_ECC_INT_STATUS 0x0000306c +#define XGMAC_DMA_DPP_INT_STATUS 0x00003074 #define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x))) #define XGMAC_SPH BIT(24) #define XGMAC_PBLx8 BIT(16) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index eb48211d9b0e..1af2f89a0504 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -830,6 +830,44 @@ static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= { { false, "UNKNOWN", "Unknown Error" }, /* 31 */ }; +#define DPP_RX_ERR "Read Rx Descriptor Parity checker Error" +#define DPP_TX_ERR "Read Tx Descriptor Parity checker Error" + +static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = { + { true, "TDPES0", DPP_TX_ERR }, + { true, "TDPES1", DPP_TX_ERR }, + { true, "TDPES2", DPP_TX_ERR }, + { true, "TDPES3", DPP_TX_ERR }, + { true, "TDPES4", DPP_TX_ERR }, + { true, "TDPES5", DPP_TX_ERR }, + { true, "TDPES6", DPP_TX_ERR }, + { true, "TDPES7", DPP_TX_ERR }, + { true, "TDPES8", DPP_TX_ERR }, + { true, "TDPES9", DPP_TX_ERR }, + { true, "TDPES10", DPP_TX_ERR }, + { true, "TDPES11", DPP_TX_ERR }, + { true, "TDPES12", DPP_TX_ERR }, + { true, "TDPES13", DPP_TX_ERR }, + { true, "TDPES14", DPP_TX_ERR }, + { true, "TDPES15", DPP_TX_ERR }, + { true, "RDPES0", DPP_RX_ERR }, + { true, "RDPES1", DPP_RX_ERR }, + { true, "RDPES2", DPP_RX_ERR }, + { true, "RDPES3", DPP_RX_ERR }, + { true, "RDPES4", DPP_RX_ERR }, + { true, "RDPES5", DPP_RX_ERR }, + { true, "RDPES6", DPP_RX_ERR }, + { true, "RDPES7", DPP_RX_ERR }, + { true, "RDPES8", DPP_RX_ERR }, + { true, "RDPES9", DPP_RX_ERR }, + { true, "RDPES10", DPP_RX_ERR }, + { true, "RDPES11", DPP_RX_ERR }, + { true, "RDPES12", DPP_RX_ERR }, + { true, "RDPES13", DPP_RX_ERR }, + { true, "RDPES14", DPP_RX_ERR }, + { true, "RDPES15", DPP_RX_ERR }, +}; + static void dwxgmac3_handle_dma_err(struct net_device *ndev, void __iomem *ioaddr, bool correctable, struct stmmac_safety_stats *stats) @@ -841,6 +879,13 @@ static void dwxgmac3_handle_dma_err(struct net_device *ndev, dwxgmac3_log_error(ndev, value, correctable, "DMA", dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats); + + value = readl(ioaddr + XGMAC_DMA_DPP_INT_STATUS); + writel(value, ioaddr + XGMAC_DMA_DPP_INT_STATUS); + + dwxgmac3_log_error(ndev, value, false, "DMA_DPP", + dwxgmac3_dma_dpp_errors, + STAT_OFF(dma_dpp_errors), stats); } static int @@ -881,6 +926,12 @@ dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp, value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */ writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL); + /* 5. Enable Data Path Parity Protection */ + value = readl(ioaddr + XGMAC_MTL_DPP_CONTROL); + /* already enabled by default, explicit enable it again */ + value &= ~XGMAC_DPP_DISABLE; + writel(value, ioaddr + XGMAC_MTL_DPP_CONTROL); + return 0; } @@ -914,7 +965,11 @@ static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev, ret |= !corr; } - err = dma & (XGMAC_DEUIS | XGMAC_DECIS); + /* DMA_DPP_Interrupt_Status is indicated by MCSIS bit in + * DMA_Safety_Interrupt_Status, so we handle DMA Data Path + * Parity Errors here + */ + err = dma & (XGMAC_DEUIS | XGMAC_DECIS | XGMAC_MCSIS); corr = dma & XGMAC_DECIS; if (err) { dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats); @@ -930,6 +985,7 @@ static const struct dwxgmac3_error { { dwxgmac3_mac_errors }, { dwxgmac3_mtl_errors }, { dwxgmac3_dma_errors }, + { dwxgmac3_dma_dpp_errors }, }; static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 3cde695fec91..dd2ab6185c40 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -337,8 +337,7 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv, struct stmmac_extra_stats *x, u32 chan, u32 dir) { - struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; - struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; + struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats); u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan)); u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); int ret = 0; @@ -367,15 +366,15 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv, /* TX/RX NORMAL interrupts */ if (likely(intr_status & XGMAC_NIS)) { if (likely(intr_status & XGMAC_RI)) { - u64_stats_update_begin(&rxq_stats->syncp); - rxq_stats->rx_normal_irq_n++; - u64_stats_update_end(&rxq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_rx; } if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) { - u64_stats_update_begin(&txq_stats->syncp); - txq_stats->tx_normal_irq_n++; - u64_stats_update_end(&txq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_tx; } } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 42d27b97dd1d..ec44becf0e2d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -549,44 +549,79 @@ stmmac_set_pauseparam(struct net_device *netdev, } } +static u64 stmmac_get_rx_normal_irq_n(struct stmmac_priv *priv, int q) +{ + u64 total; + int cpu; + + total = 0; + for_each_possible_cpu(cpu) { + struct stmmac_pcpu_stats *pcpu; + unsigned int start; + u64 irq_n; + + pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu); + do { + start = u64_stats_fetch_begin(&pcpu->syncp); + irq_n = u64_stats_read(&pcpu->rx_normal_irq_n[q]); + } while (u64_stats_fetch_retry(&pcpu->syncp, start)); + total += irq_n; + } + return total; +} + +static u64 stmmac_get_tx_normal_irq_n(struct stmmac_priv *priv, int q) +{ + u64 total; + int cpu; + + total = 0; + for_each_possible_cpu(cpu) { + struct stmmac_pcpu_stats *pcpu; + unsigned int start; + u64 irq_n; + + pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu); + do { + start = u64_stats_fetch_begin(&pcpu->syncp); + irq_n = u64_stats_read(&pcpu->tx_normal_irq_n[q]); + } while (u64_stats_fetch_retry(&pcpu->syncp, start)); + total += irq_n; + } + return total; +} + static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data) { u32 tx_cnt = priv->plat->tx_queues_to_use; u32 rx_cnt = priv->plat->rx_queues_to_use; unsigned int start; - int q, stat; - char *p; + int q; for (q = 0; q < tx_cnt; q++) { struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q]; - struct stmmac_txq_stats snapshot; + u64 pkt_n; do { - start = u64_stats_fetch_begin(&txq_stats->syncp); - snapshot = *txq_stats; - } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); + start = u64_stats_fetch_begin(&txq_stats->napi_syncp); + pkt_n = u64_stats_read(&txq_stats->napi.tx_pkt_n); + } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start)); - p = (char *)&snapshot + offsetof(struct stmmac_txq_stats, tx_pkt_n); - for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) { - *data++ = (*(u64 *)p); - p += sizeof(u64); - } + *data++ = pkt_n; + *data++ = stmmac_get_tx_normal_irq_n(priv, q); } for (q = 0; q < rx_cnt; q++) { struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q]; - struct stmmac_rxq_stats snapshot; + u64 pkt_n; do { - start = u64_stats_fetch_begin(&rxq_stats->syncp); - snapshot = *rxq_stats; - } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); + start = u64_stats_fetch_begin(&rxq_stats->napi_syncp); + pkt_n = u64_stats_read(&rxq_stats->napi.rx_pkt_n); + } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start)); - p = (char *)&snapshot + offsetof(struct stmmac_rxq_stats, rx_pkt_n); - for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) { - *data++ = (*(u64 *)p); - p += sizeof(u64); - } + *data++ = pkt_n; + *data++ = stmmac_get_rx_normal_irq_n(priv, q); } } @@ -645,39 +680,49 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, pos = j; for (i = 0; i < rx_queues_count; i++) { struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[i]; - struct stmmac_rxq_stats snapshot; + struct stmmac_napi_rx_stats snapshot; + u64 n_irq; j = pos; do { - start = u64_stats_fetch_begin(&rxq_stats->syncp); - snapshot = *rxq_stats; - } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); - - data[j++] += snapshot.rx_pkt_n; - data[j++] += snapshot.rx_normal_irq_n; - normal_irq_n += snapshot.rx_normal_irq_n; - napi_poll += snapshot.napi_poll; + start = u64_stats_fetch_begin(&rxq_stats->napi_syncp); + snapshot = rxq_stats->napi; + } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start)); + + data[j++] += u64_stats_read(&snapshot.rx_pkt_n); + n_irq = stmmac_get_rx_normal_irq_n(priv, i); + data[j++] += n_irq; + normal_irq_n += n_irq; + napi_poll += u64_stats_read(&snapshot.poll); } pos = j; for (i = 0; i < tx_queues_count; i++) { struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[i]; - struct stmmac_txq_stats snapshot; + struct stmmac_napi_tx_stats napi_snapshot; + struct stmmac_q_tx_stats q_snapshot; + u64 n_irq; j = pos; do { - start = u64_stats_fetch_begin(&txq_stats->syncp); - snapshot = *txq_stats; - } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); - - data[j++] += snapshot.tx_pkt_n; - data[j++] += snapshot.tx_normal_irq_n; - normal_irq_n += snapshot.tx_normal_irq_n; - data[j++] += snapshot.tx_clean; - data[j++] += snapshot.tx_set_ic_bit; - data[j++] += snapshot.tx_tso_frames; - data[j++] += snapshot.tx_tso_nfrags; - napi_poll += snapshot.napi_poll; + start = u64_stats_fetch_begin(&txq_stats->q_syncp); + q_snapshot = txq_stats->q; + } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start)); + do { + start = u64_stats_fetch_begin(&txq_stats->napi_syncp); + napi_snapshot = txq_stats->napi; + } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start)); + + data[j++] += u64_stats_read(&napi_snapshot.tx_pkt_n); + n_irq = stmmac_get_tx_normal_irq_n(priv, i); + data[j++] += n_irq; + normal_irq_n += n_irq; + data[j++] += u64_stats_read(&napi_snapshot.tx_clean); + data[j++] += u64_stats_read(&q_snapshot.tx_set_ic_bit) + + u64_stats_read(&napi_snapshot.tx_set_ic_bit); + data[j++] += u64_stats_read(&q_snapshot.tx_tso_frames); + data[j++] += u64_stats_read(&q_snapshot.tx_tso_nfrags); + napi_poll += u64_stats_read(&napi_snapshot.poll); } normal_irq_n += priv->xstats.rx_early_irq; data[j++] = normal_irq_n; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 25519952f754..75d029704503 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2482,7 +2482,6 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) struct xdp_desc xdp_desc; bool work_done = true; u32 tx_set_ic_bit = 0; - unsigned long flags; /* Avoids TX time-out as we are sharing with slow path */ txq_trans_cond_update(nq); @@ -2566,9 +2565,9 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); entry = tx_q->cur_tx; } - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_set_ic_bit += tx_set_ic_bit; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->napi_syncp); + u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit); + u64_stats_update_end(&txq_stats->napi_syncp); if (tx_desc) { stmmac_flush_tx_descriptors(priv, queue); @@ -2616,7 +2615,6 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue, unsigned int bytes_compl = 0, pkts_compl = 0; unsigned int entry, xmits = 0, count = 0; u32 tx_packets = 0, tx_errors = 0; - unsigned long flags; __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); @@ -2782,11 +2780,11 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue, if (tx_q->dirty_tx != tx_q->cur_tx) *pending_packets = true; - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_packets += tx_packets; - txq_stats->tx_pkt_n += tx_packets; - txq_stats->tx_clean++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->napi_syncp); + u64_stats_add(&txq_stats->napi.tx_packets, tx_packets); + u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets); + u64_stats_inc(&txq_stats->napi.tx_clean); + u64_stats_update_end(&txq_stats->napi_syncp); priv->xstats.tx_errors += tx_errors; @@ -4213,7 +4211,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) struct stmmac_tx_queue *tx_q; bool has_vlan, set_ic; u8 proto_hdr_len, hdr; - unsigned long flags; u32 pay_len, mss; dma_addr_t des; int i; @@ -4378,13 +4375,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); } - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_bytes += skb->len; - txq_stats->tx_tso_frames++; - txq_stats->tx_tso_nfrags += nfrags; + u64_stats_update_begin(&txq_stats->q_syncp); + u64_stats_add(&txq_stats->q.tx_bytes, skb->len); + u64_stats_inc(&txq_stats->q.tx_tso_frames); + u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags); if (set_ic) - txq_stats->tx_set_ic_bit++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_inc(&txq_stats->q.tx_set_ic_bit); + u64_stats_update_end(&txq_stats->q_syncp); if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); @@ -4483,7 +4480,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) struct stmmac_tx_queue *tx_q; bool has_vlan, set_ic; int entry, first_tx; - unsigned long flags; dma_addr_t des; tx_q = &priv->dma_conf.tx_queue[queue]; @@ -4653,11 +4649,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); } - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_bytes += skb->len; + u64_stats_update_begin(&txq_stats->q_syncp); + u64_stats_add(&txq_stats->q.tx_bytes, skb->len); if (set_ic) - txq_stats->tx_set_ic_bit++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_inc(&txq_stats->q.tx_set_ic_bit); + u64_stats_update_end(&txq_stats->q_syncp); if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); @@ -4921,12 +4917,11 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, set_ic = false; if (set_ic) { - unsigned long flags; tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, tx_desc); - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_set_ic_bit++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->q_syncp); + u64_stats_inc(&txq_stats->q.tx_set_ic_bit); + u64_stats_update_end(&txq_stats->q_syncp); } stmmac_enable_dma_transmission(priv, priv->ioaddr); @@ -5076,7 +5071,6 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, unsigned int len = xdp->data_end - xdp->data; enum pkt_hash_types hash_type; int coe = priv->hw->rx_csum; - unsigned long flags; struct sk_buff *skb; u32 hash; @@ -5106,10 +5100,10 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, skb_record_rx_queue(skb, queue); napi_gro_receive(&ch->rxtx_napi, skb); - flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->rx_pkt_n++; - rxq_stats->rx_bytes += len; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_inc(&rxq_stats->napi.rx_pkt_n); + u64_stats_add(&rxq_stats->napi.rx_bytes, len); + u64_stats_update_end(&rxq_stats->napi_syncp); } static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) @@ -5191,7 +5185,6 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) unsigned int desc_size; struct bpf_prog *prog; bool failure = false; - unsigned long flags; int xdp_status = 0; int status = 0; @@ -5346,9 +5339,9 @@ read_again: stmmac_finalize_xdp_rx(priv, xdp_status); - flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->rx_pkt_n += count; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_add(&rxq_stats->napi.rx_pkt_n, count); + u64_stats_update_end(&rxq_stats->napi_syncp); priv->xstats.rx_dropped += rx_dropped; priv->xstats.rx_errors += rx_errors; @@ -5386,7 +5379,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) unsigned int desc_size; struct sk_buff *skb = NULL; struct stmmac_xdp_buff ctx; - unsigned long flags; int xdp_status = 0; int buf_sz; @@ -5646,11 +5638,11 @@ drain_data: stmmac_rx_refill(priv, queue); - flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->rx_packets += rx_packets; - rxq_stats->rx_bytes += rx_bytes; - rxq_stats->rx_pkt_n += count; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets); + u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes); + u64_stats_add(&rxq_stats->napi.rx_pkt_n, count); + u64_stats_update_end(&rxq_stats->napi_syncp); priv->xstats.rx_dropped += rx_dropped; priv->xstats.rx_errors += rx_errors; @@ -5665,13 +5657,12 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) struct stmmac_priv *priv = ch->priv_data; struct stmmac_rxq_stats *rxq_stats; u32 chan = ch->index; - unsigned long flags; int work_done; rxq_stats = &priv->xstats.rxq_stats[chan]; - flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->napi_poll++; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_inc(&rxq_stats->napi.poll); + u64_stats_update_end(&rxq_stats->napi_syncp); work_done = stmmac_rx(priv, budget, chan); if (work_done < budget && napi_complete_done(napi, work_done)) { @@ -5693,13 +5684,12 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) struct stmmac_txq_stats *txq_stats; bool pending_packets = false; u32 chan = ch->index; - unsigned long flags; int work_done; txq_stats = &priv->xstats.txq_stats[chan]; - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->napi_poll++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->napi_syncp); + u64_stats_inc(&txq_stats->napi.poll); + u64_stats_update_end(&txq_stats->napi_syncp); work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets); work_done = min(work_done, budget); @@ -5729,17 +5719,16 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) struct stmmac_rxq_stats *rxq_stats; struct stmmac_txq_stats *txq_stats; u32 chan = ch->index; - unsigned long flags; rxq_stats = &priv->xstats.rxq_stats[chan]; - flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->napi_poll++; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_inc(&rxq_stats->napi.poll); + u64_stats_update_end(&rxq_stats->napi_syncp); txq_stats = &priv->xstats.txq_stats[chan]; - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->napi_poll++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->napi_syncp); + u64_stats_inc(&txq_stats->napi.poll); + u64_stats_update_end(&txq_stats->napi_syncp); tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets); tx_done = min(tx_done, budget); @@ -7065,10 +7054,13 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 u64 tx_bytes; do { - start = u64_stats_fetch_begin(&txq_stats->syncp); - tx_packets = txq_stats->tx_packets; - tx_bytes = txq_stats->tx_bytes; - } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); + start = u64_stats_fetch_begin(&txq_stats->q_syncp); + tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes); + } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start)); + do { + start = u64_stats_fetch_begin(&txq_stats->napi_syncp); + tx_packets = u64_stats_read(&txq_stats->napi.tx_packets); + } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start)); stats->tx_packets += tx_packets; stats->tx_bytes += tx_bytes; @@ -7080,10 +7072,10 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 u64 rx_bytes; do { - start = u64_stats_fetch_begin(&rxq_stats->syncp); - rx_packets = rxq_stats->rx_packets; - rx_bytes = rxq_stats->rx_bytes; - } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); + start = u64_stats_fetch_begin(&rxq_stats->napi_syncp); + rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets); + rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes); + } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; @@ -7477,9 +7469,16 @@ int stmmac_dvr_probe(struct device *device, priv->dev = ndev; for (i = 0; i < MTL_MAX_RX_QUEUES; i++) - u64_stats_init(&priv->xstats.rxq_stats[i].syncp); - for (i = 0; i < MTL_MAX_TX_QUEUES; i++) - u64_stats_init(&priv->xstats.txq_stats[i].syncp); + u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp); + for (i = 0; i < MTL_MAX_TX_QUEUES; i++) { + u64_stats_init(&priv->xstats.txq_stats[i].q_syncp); + u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp); + } + + priv->xstats.pcpu_stats = + devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats); + if (!priv->xstats.pcpu_stats) + return -ENOMEM; stmmac_set_ethtool_ops(ndev); priv->pause = pause; diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index be01450c20dc..1530d13984d4 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -189,6 +189,7 @@ config TI_ICSSG_PRUETH select TI_K3_CPPI_DESC_POOL depends on PRU_REMOTEPROC depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER + depends on PTP_1588_CLOCK_OPTIONAL help Support dual Gigabit Ethernet ports over the ICSSG PRU Subsystem. This subsystem is available starting with the AM65 platform. diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index ea85c6dd5484..c0a5abd8d9a8 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -631,6 +631,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) } } + phy->mac_managed_pm = true; + slave->phy = phy; phy_attached_info(slave->phy); diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 498c50c6d1a7..087dcb67505a 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -773,6 +773,9 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) slave->slave_num); return; } + + phy->mac_managed_pm = true; + slave->phy = phy; phy_attached_info(slave->phy); diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index bcccf43d368b..dbbea9146040 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -638,6 +638,16 @@ static void cpts_calc_mult_shift(struct cpts *cpts) freq, cpts->cc.mult, cpts->cc.shift, (ns - NSEC_PER_SEC)); } +static void cpts_clk_unregister(void *clk) +{ + clk_hw_unregister_mux(clk); +} + +static void cpts_clk_del_provider(void *np) +{ + of_clk_del_provider(np); +} + static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node) { struct device_node *refclk_np; @@ -687,9 +697,7 @@ static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node) goto mux_fail; } - ret = devm_add_action_or_reset(cpts->dev, - (void(*)(void *))clk_hw_unregister_mux, - clk_hw); + ret = devm_add_action_or_reset(cpts->dev, cpts_clk_unregister, clk_hw); if (ret) { dev_err(cpts->dev, "add clkmux unreg action %d", ret); goto mux_fail; @@ -699,8 +707,7 @@ static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node) if (ret) goto mux_fail; - ret = devm_add_action_or_reset(cpts->dev, - (void(*)(void *))of_clk_del_provider, + ret = devm_add_action_or_reset(cpts->dev, cpts_clk_del_provider, refclk_np); if (ret) { dev_err(cpts->dev, "add clkmux provider unreg action %d", ret); diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c index 2b6a607ac0b7..a273362c9e70 100644 --- a/drivers/net/fddi/skfp/skfddi.c +++ b/drivers/net/fddi/skfp/skfddi.c @@ -153,6 +153,7 @@ static const struct pci_device_id skfddi_pci_tbl[] = { { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, skfddi_pci_tbl); +MODULE_DESCRIPTION("SysKonnect FDDI PCI driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>"); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 273bd8a20122..11831a1c9762 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -42,6 +42,10 @@ #define LINKCHANGE_INT (2 * HZ) #define VF_TAKEOVER_INT (HZ / 10) +/* Macros to define the context of vf registration */ +#define VF_REG_IN_PROBE 1 +#define VF_REG_IN_NOTIFIER 2 + static unsigned int ring_size __ro_after_init = 128; module_param(ring_size, uint, 0444); MODULE_PARM_DESC(ring_size, "Ring buffer size (# of 4K pages)"); @@ -2185,7 +2189,7 @@ static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb) } static int netvsc_vf_join(struct net_device *vf_netdev, - struct net_device *ndev) + struct net_device *ndev, int context) { struct net_device_context *ndev_ctx = netdev_priv(ndev); int ret; @@ -2208,7 +2212,11 @@ static int netvsc_vf_join(struct net_device *vf_netdev, goto upper_link_failed; } - schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); + /* If this registration is called from probe context vf_takeover + * is taken care of later in probe itself. + */ + if (context == VF_REG_IN_NOTIFIER) + schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); @@ -2346,7 +2354,7 @@ static int netvsc_prepare_bonding(struct net_device *vf_netdev) return NOTIFY_DONE; } -static int netvsc_register_vf(struct net_device *vf_netdev) +static int netvsc_register_vf(struct net_device *vf_netdev, int context) { struct net_device_context *net_device_ctx; struct netvsc_device *netvsc_dev; @@ -2386,7 +2394,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev) netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); - if (netvsc_vf_join(vf_netdev, ndev) != 0) + if (netvsc_vf_join(vf_netdev, ndev, context) != 0) return NOTIFY_DONE; dev_hold(vf_netdev); @@ -2484,10 +2492,31 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev) return NOTIFY_OK; } +static int check_dev_is_matching_vf(struct net_device *event_ndev) +{ + /* Skip NetVSC interfaces */ + if (event_ndev->netdev_ops == &device_ops) + return -ENODEV; + + /* Avoid non-Ethernet type devices */ + if (event_ndev->type != ARPHRD_ETHER) + return -ENODEV; + + /* Avoid Vlan dev with same MAC registering as VF */ + if (is_vlan_dev(event_ndev)) + return -ENODEV; + + /* Avoid Bonding master dev with same MAC registering as VF */ + if (netif_is_bond_master(event_ndev)) + return -ENODEV; + + return 0; +} + static int netvsc_probe(struct hv_device *dev, const struct hv_vmbus_device_id *dev_id) { - struct net_device *net = NULL; + struct net_device *net = NULL, *vf_netdev; struct net_device_context *net_device_ctx; struct netvsc_device_info *device_info = NULL; struct netvsc_device *nvdev; @@ -2599,6 +2628,30 @@ static int netvsc_probe(struct hv_device *dev, } list_add(&net_device_ctx->list, &netvsc_dev_list); + + /* When the hv_netvsc driver is unloaded and reloaded, the + * NET_DEVICE_REGISTER for the vf device is replayed before probe + * is complete. This is because register_netdevice_notifier() gets + * registered before vmbus_driver_register() so that callback func + * is set before probe and we don't miss events like NETDEV_POST_INIT + * So, in this section we try to register the matching vf device that + * is present as a netdevice, knowing that its register call is not + * processed in the netvsc_netdev_notifier(as probing is progress and + * get_netvsc_byslot fails). + */ + for_each_netdev(dev_net(net), vf_netdev) { + ret = check_dev_is_matching_vf(vf_netdev); + if (ret != 0) + continue; + + if (net != get_netvsc_byslot(vf_netdev)) + continue; + + netvsc_prepare_bonding(vf_netdev); + netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE); + __netvsc_vf_setup(net, vf_netdev); + break; + } rtnl_unlock(); netvsc_devinfo_put(device_info); @@ -2754,28 +2807,17 @@ static int netvsc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); + int ret = 0; - /* Skip our own events */ - if (event_dev->netdev_ops == &device_ops) - return NOTIFY_DONE; - - /* Avoid non-Ethernet type devices */ - if (event_dev->type != ARPHRD_ETHER) - return NOTIFY_DONE; - - /* Avoid Vlan dev with same MAC registering as VF */ - if (is_vlan_dev(event_dev)) - return NOTIFY_DONE; - - /* Avoid Bonding master dev with same MAC registering as VF */ - if (netif_is_bond_master(event_dev)) + ret = check_dev_is_matching_vf(event_dev); + if (ret != 0) return NOTIFY_DONE; switch (event) { case NETDEV_POST_INIT: return netvsc_prepare_bonding(event_dev); case NETDEV_REGISTER: - return netvsc_register_vf(event_dev); + return netvsc_register_vf(event_dev, VF_REG_IN_NOTIFIER); case NETDEV_UNREGISTER: return netvsc_unregister_vf(event_dev); case NETDEV_UP: diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index 35e55f198e05..2930141d7dd2 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -259,4 +259,5 @@ static __exit void fake_remove_module(void) module_init(fakelb_init_module); module_exit(fake_remove_module); +MODULE_DESCRIPTION("IEEE 802.15.4 loopback driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c index 60944a4beada..1afc4c47be73 100644 --- a/drivers/net/ipvlan/ipvtap.c +++ b/drivers/net/ipvlan/ipvtap.c @@ -237,4 +237,5 @@ static void __exit ipvtap_exit(void) module_exit(ipvtap_exit); MODULE_ALIAS_RTNL_LINK("ipvtap"); MODULE_AUTHOR("Sainath Grandhi <sainath.grandhi@intel.com>"); +MODULE_DESCRIPTION("IP-VLAN based tap driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index b4d3b9cde8bd..92a7a36b93ac 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -835,14 +835,14 @@ static void nsim_dev_trap_report_work(struct work_struct *work) trap_report_dw.work); nsim_dev = nsim_trap_data->nsim_dev; - /* For each running port and enabled packet trap, generate a UDP - * packet with a random 5-tuple and report it. - */ if (!devl_trylock(priv_to_devlink(nsim_dev))) { - schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 0); + schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 1); return; } + /* For each running port and enabled packet trap, generate a UDP + * packet with a random 5-tuple and report it. + */ list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list) { if (!netif_running(nsim_dev_port->ns->netdev)) continue; diff --git a/drivers/net/phy/mdio_devres.c b/drivers/net/phy/mdio_devres.c index 69b829e6ab35..7fd3377dbd79 100644 --- a/drivers/net/phy/mdio_devres.c +++ b/drivers/net/phy/mdio_devres.c @@ -131,4 +131,5 @@ int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio, EXPORT_SYMBOL(__devm_of_mdiobus_register); #endif /* CONFIG_OF_MDIO */ +MODULE_DESCRIPTION("Network MDIO bus devres helpers"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c index 40ce8abe6999..cc7d1113ece0 100644 --- a/drivers/net/plip/plip.c +++ b/drivers/net/plip/plip.c @@ -1437,4 +1437,5 @@ static int __init plip_init (void) module_init(plip_init); module_exit(plip_cleanup_module); +MODULE_DESCRIPTION("PLIP (parallel port) network module"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ppp/bsd_comp.c b/drivers/net/ppp/bsd_comp.c index db0dc36d12e3..55954594e157 100644 --- a/drivers/net/ppp/bsd_comp.c +++ b/drivers/net/ppp/bsd_comp.c @@ -1166,5 +1166,6 @@ static void __exit bsdcomp_cleanup(void) module_init(bsdcomp_init); module_exit(bsdcomp_cleanup); +MODULE_DESCRIPTION("PPP BSD-Compress compression module"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("ppp-compress-" __stringify(CI_BSD_COMPRESS)); diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c index 840da924708b..c33c3db3cc08 100644 --- a/drivers/net/ppp/ppp_async.c +++ b/drivers/net/ppp/ppp_async.c @@ -87,6 +87,7 @@ struct asyncppp { static int flag_time = HZ; module_param(flag_time, int, 0); MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)"); +MODULE_DESCRIPTION("PPP async serial channel module"); MODULE_LICENSE("GPL"); MODULE_ALIAS_LDISC(N_PPP); @@ -460,6 +461,10 @@ ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg) case PPPIOCSMRU: if (get_user(val, p)) break; + if (val > U16_MAX) { + err = -EINVAL; + break; + } if (val < PPP_MRU) val = PPP_MRU; ap->mru = val; diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c index e6d48e5c65a3..4d2ff63f2ee2 100644 --- a/drivers/net/ppp/ppp_deflate.c +++ b/drivers/net/ppp/ppp_deflate.c @@ -630,6 +630,7 @@ static void __exit deflate_cleanup(void) module_init(deflate_init); module_exit(deflate_cleanup); +MODULE_DESCRIPTION("PPP Deflate compression module"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE)); MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE_DRAFT)); diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 0193af2d31c9..3dd52bf28f15 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -3604,6 +3604,7 @@ EXPORT_SYMBOL(ppp_input_error); EXPORT_SYMBOL(ppp_output_wakeup); EXPORT_SYMBOL(ppp_register_compressor); EXPORT_SYMBOL(ppp_unregister_compressor); +MODULE_DESCRIPTION("Generic PPP layer driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0); MODULE_ALIAS_RTNL_LINK("ppp"); diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c index 52d05ce4a281..45bf59ac8f57 100644 --- a/drivers/net/ppp/ppp_synctty.c +++ b/drivers/net/ppp/ppp_synctty.c @@ -724,5 +724,6 @@ ppp_sync_cleanup(void) module_init(ppp_sync_init); module_exit(ppp_sync_cleanup); +MODULE_DESCRIPTION("PPP synchronous TTY channel module"); MODULE_LICENSE("GPL"); MODULE_ALIAS_LDISC(N_SYNC_PPP); diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 8e7238e97d0a..2ea4f4890d23 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -1007,26 +1007,21 @@ static int pppoe_recvmsg(struct socket *sock, struct msghdr *m, struct sk_buff *skb; int error = 0; - if (sk->sk_state & PPPOX_BOUND) { - error = -EIO; - goto end; - } + if (sk->sk_state & PPPOX_BOUND) + return -EIO; skb = skb_recv_datagram(sk, flags, &error); - if (error < 0) - goto end; + if (!skb) + return error; - if (skb) { - total_len = min_t(size_t, total_len, skb->len); - error = skb_copy_datagram_msg(skb, 0, m, total_len); - if (error == 0) { - consume_skb(skb); - return total_len; - } + total_len = min_t(size_t, total_len, skb->len); + error = skb_copy_datagram_msg(skb, 0, m, total_len); + if (error == 0) { + consume_skb(skb); + return total_len; } kfree_skb(skb); -end: return error; } diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 43e0db78d42b..a742cec44e3d 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -1803,5 +1803,6 @@ static struct usb_driver ar5523_driver = { module_usb_driver(ar5523_driver); +MODULE_DESCRIPTION("Atheros AR5523 wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_FIRMWARE(AR5523_FIRMWARE_FILE); diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 41119fb177e3..4e6b4df8562f 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -1685,6 +1685,7 @@ static struct platform_driver wcn36xx_driver = { module_platform_driver(wcn36xx_driver); +MODULE_DESCRIPTION("Qualcomm Atheros WCN3660/3680 wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Eugene Krasnikov k.eugene.e@gmail.com"); MODULE_FIRMWARE(WLAN_NV_FILE); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c index d55f3271d619..4f0c1e1a8e60 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c @@ -20,6 +20,7 @@ static void __exit brcmf_bca_exit(void) brcmf_fwvid_unregister_vendor(BRCMF_FWVENDOR_BCA, THIS_MODULE); } +MODULE_DESCRIPTION("Broadcom FullMAC WLAN driver plugin for Broadcom AP chipsets"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_IMPORT_NS(BRCMFMAC); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 133c5ea6429c..28d6a30cc010 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -3779,8 +3779,10 @@ static int brcmf_internal_escan_add_info(struct cfg80211_scan_request *req, if (req->channels[i] == chan) break; } - if (i == req->n_channels) - req->channels[req->n_channels++] = chan; + if (i == req->n_channels) { + req->n_channels++; + req->channels[i] = chan; + } for (i = 0; i < req->n_ssids; i++) { if (req->ssids[i].ssid_len == ssid_len && diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c index f82fbbe3ecef..90d06cda03a2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c @@ -20,6 +20,7 @@ static void __exit brcmf_cyw_exit(void) brcmf_fwvid_unregister_vendor(BRCMF_FWVENDOR_CYW, THIS_MODULE); } +MODULE_DESCRIPTION("Broadcom FullMAC WLAN driver plugin for Cypress/Infineon chipsets"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_IMPORT_NS(BRCMFMAC); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c index 02918d434556..b66135e3cff4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c @@ -20,6 +20,7 @@ static void __exit brcmf_wcc_exit(void) brcmf_fwvid_unregister_vendor(BRCMF_FWVENDOR_WCC, THIS_MODULE); } +MODULE_DESCRIPTION("Broadcom FullMAC WLAN driver plugin for Broadcom mobility chipsets"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_IMPORT_NS(BRCMFMAC); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index b96f30d11644..dcc4810cb324 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -618,7 +618,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 2) { - ret = PTR_ERR(wifi_pkg); + ret = -EINVAL; goto out_free; } @@ -634,7 +634,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 1) { - ret = PTR_ERR(wifi_pkg); + ret = -EINVAL; goto out_free; } @@ -650,7 +650,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 0) { - ret = PTR_ERR(wifi_pkg); + ret = -EINVAL; goto out_free; } @@ -707,7 +707,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 2) { - ret = PTR_ERR(wifi_pkg); + ret = -EINVAL; goto out_free; } @@ -723,7 +723,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 1) { - ret = PTR_ERR(wifi_pkg); + ret = -EINVAL; goto out_free; } @@ -739,7 +739,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 0) { - ret = PTR_ERR(wifi_pkg); + ret = -EINVAL; goto out_free; } @@ -1116,6 +1116,9 @@ int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt) goto read_table; } + ret = PTR_ERR(wifi_pkg); + goto out_free; + read_table: fwrt->ppag_ver = tbl_rev; flags = &wifi_pkg->package.elements[1]; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h index 798731ecbefd..b740c65a7dca 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h @@ -537,7 +537,7 @@ enum iwl_fw_dbg_config_cmd_type { }; /* LDBG_CFG_CMD_TYPE_API_E_VER_1 */ /* this token disables debug asserts in the firmware */ -#define IWL_FW_DBG_CONFIG_TOKEN 0x00011301 +#define IWL_FW_DBG_CONFIG_TOKEN 0x00010001 /** * struct iwl_fw_dbg_config_cmd - configure FW debug diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index e27774e7ed74..80fda056e46a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2023 Intel Corporation + * Copyright (C) 2005-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -19,7 +19,6 @@ * @fwrt_ptr: pointer to the buffer coming from fwrt * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the * transport's data. - * @trans_len: length of the valid data in trans_ptr * @fwrt_len: length of the valid data in fwrt_ptr */ struct iwl_fw_dump_ptrs { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index ffe2670720c9..abf8001bdac1 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -128,6 +128,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv) kfree(drv->fw.ucode_capa.cmd_versions); kfree(drv->fw.phy_integration_ver); kfree(drv->trans->dbg.pc_data); + drv->trans->dbg.pc_data = NULL; for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) iwl_free_fw_img(drv, drv->fw.img + i); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 402896988686..2f6774ec37b2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -668,7 +668,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = { .has_eht = true, .eht_cap_elem = { .mac_cap_info[0] = - IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | IEEE80211_EHT_MAC_CAP0_OM_CONTROL | IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 | IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2 | @@ -793,7 +792,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = { .has_eht = true, .eht_cap_elem = { .mac_cap_info[0] = - IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | IEEE80211_EHT_MAC_CAP0_OM_CONTROL | IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 | IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2, @@ -1020,8 +1018,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL && iftype_data->eht_cap.has_eht) { iftype_data->eht_cap.eht_cap_elem.mac_cap_info[0] &= - ~(IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | - IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 | + ~(IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 | IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2); iftype_data->eht_cap.eht_cap_elem.phy_cap_info[3] &= ~(IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO | diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 7f13dff04b26..53e26c3c3a9a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1600,7 +1600,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, */ if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { - iwl_mvm_vif_dbgfs_add_link(mvm, vif); + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + iwl_mvm_vif_dbgfs_add_link(mvm, vif); ret = 0; goto out; } @@ -1640,7 +1641,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, iwl_mvm_chandef_get_primary_80(&vif->bss_conf.chandef); } - iwl_mvm_vif_dbgfs_add_link(mvm, vif); + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + iwl_mvm_vif_dbgfs_add_link(mvm, vif); if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && vif->type == NL80211_IFTYPE_STATION && !vif->p2p && @@ -3685,6 +3687,9 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm, NL80211_TDLS_SETUP); } + if (ret) + return ret; + for_each_sta_active_link(vif, sta, link_sta, i) link_sta->agg.max_rc_amsdu_len = 1; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c index 61170173f917..893b69fc841b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c @@ -81,7 +81,8 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS); } - iwl_mvm_vif_dbgfs_add_link(mvm, vif); + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + iwl_mvm_vif_dbgfs_add_link(mvm, vif); if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && vif->type == NL80211_IFTYPE_STATION && !vif->p2p && @@ -437,6 +438,9 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm, mvmvif->ap_ibss_active = false; } + iwl_mvm_link_changed(mvm, vif, link_conf, + LINK_CONTEXT_MODIFY_ACTIVE, false); + if (iwl_mvm_is_esr_supported(mvm->fwrt.trans) && n_active > 1) { int ret = iwl_mvm_esr_mode_inactive(mvm, vif); @@ -448,9 +452,6 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm, if (vif->type == NL80211_IFTYPE_MONITOR) iwl_mvm_mld_rm_snif_sta(mvm, vif); - iwl_mvm_link_changed(mvm, vif, link_conf, - LINK_CONTEXT_MODIFY_ACTIVE, false); - if (switching_chanctx) return; mvmvif->link[link_id]->phy_ctxt = NULL; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 886d00098528..af15d470c69b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -505,6 +505,10 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, return false; mvm_sta = iwl_mvm_sta_from_mac80211(sta); + + if (WARN_ON_ONCE(!mvm_sta->dup_data)) + return false; + dup_data = &mvm_sta->dup_data[queue]; /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 218fdf1ed530..2e653a417d62 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH */ @@ -972,6 +972,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) { /* End TE, notify mac80211 */ mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID; + mvmvif->time_event_data.link_id = -1; iwl_mvm_p2p_roc_finished(mvm); ieee80211_remain_on_channel_expired(mvm->hw); } else if (le32_to_cpu(notif->start)) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index db986bfc4dc3..461f26d9214e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -520,13 +520,24 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, } } +static void iwl_mvm_copy_hdr(void *cmd, const void *hdr, int hdrlen, + const u8 *addr3_override) +{ + struct ieee80211_hdr *out_hdr = cmd; + + memcpy(cmd, hdr, hdrlen); + if (addr3_override) + memcpy(out_hdr->addr3, addr3_override, ETH_ALEN); +} + /* * Allocates and sets the Tx cmd the driver data pointers in the skb */ static struct iwl_device_tx_cmd * iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, int hdrlen, - struct ieee80211_sta *sta, u8 sta_id) + struct ieee80211_sta *sta, u8 sta_id, + const u8 *addr3_override) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_device_tx_cmd *dev_cmd; @@ -584,7 +595,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, cmd->len = cpu_to_le16((u16)skb->len); /* Copy MAC header from skb into command buffer */ - memcpy(cmd->hdr, hdr, hdrlen); + iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override); cmd->flags = cpu_to_le16(flags); cmd->rate_n_flags = cpu_to_le32(rate_n_flags); @@ -599,7 +610,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, cmd->len = cpu_to_le16((u16)skb->len); /* Copy MAC header from skb into command buffer */ - memcpy(cmd->hdr, hdr, hdrlen); + iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override); cmd->flags = cpu_to_le32(flags); cmd->rate_n_flags = cpu_to_le32(rate_n_flags); @@ -617,7 +628,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); /* Copy MAC header from skb into command buffer */ - memcpy(tx_cmd->hdr, hdr, hdrlen); + iwl_mvm_copy_hdr(tx_cmd->hdr, hdr, hdrlen, addr3_override); out: return dev_cmd; @@ -820,7 +831,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue); - dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id); + dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id, + NULL); if (!dev_cmd) return -1; @@ -1140,7 +1152,8 @@ static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm, */ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, - struct ieee80211_sta *sta) + struct ieee80211_sta *sta, + const u8 *addr3_override) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_mvm_sta *mvmsta; @@ -1172,7 +1185,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, iwl_mvm_probe_resp_set_noa(mvm, skb); dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen, - sta, mvmsta->deflink.sta_id); + sta, mvmsta->deflink.sta_id, + addr3_override); if (!dev_cmd) goto drop; @@ -1294,9 +1308,11 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_tx_info info; struct sk_buff_head mpdus_skbs; + struct ieee80211_vif *vif; unsigned int payload_len; int ret; struct sk_buff *orig_skb = skb; + const u8 *addr3; if (WARN_ON_ONCE(!mvmsta)) return -1; @@ -1307,26 +1323,59 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, memcpy(&info, skb->cb, sizeof(info)); if (!skb_is_gso(skb)) - return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); + return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL); payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - tcp_hdrlen(skb) + skb->data_len; if (payload_len <= skb_shinfo(skb)->gso_size) - return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); + return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL); __skb_queue_head_init(&mpdus_skbs); + vif = info.control.vif; + if (!vif) + return -1; + ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs); if (ret) return ret; WARN_ON(skb_queue_empty(&mpdus_skbs)); + /* + * As described in IEEE sta 802.11-2020, table 9-30 (Address + * field contents), A-MSDU address 3 should contain the BSSID + * address. + * Pass address 3 down to iwl_mvm_tx_mpdu() and further to set it + * in the command header. We need to preserve the original + * address 3 in the skb header to correctly create all the + * A-MSDU subframe headers from it. + */ + switch (vif->type) { + case NL80211_IFTYPE_STATION: + addr3 = vif->cfg.ap_addr; + break; + case NL80211_IFTYPE_AP: + addr3 = vif->addr; + break; + default: + addr3 = NULL; + break; + } + while (!skb_queue_empty(&mpdus_skbs)) { + struct ieee80211_hdr *hdr; + bool amsdu; + skb = __skb_dequeue(&mpdus_skbs); + hdr = (void *)skb->data; + amsdu = ieee80211_is_data_qos(hdr->frame_control) && + (*ieee80211_get_qos_ctl(hdr) & + IEEE80211_QOS_CTL_A_MSDU_PRESENT); - ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta); + ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta, + amsdu ? addr3 : NULL); if (ret) { /* Free skbs created as part of TSO logic that have not yet been dequeued */ __skb_queue_purge(&mpdus_skbs); diff --git a/drivers/net/wireless/intersil/p54/p54spi.c b/drivers/net/wireless/intersil/p54/p54spi.c index ce0179b8ab36..0073b5e0f9c9 100644 --- a/drivers/net/wireless/intersil/p54/p54spi.c +++ b/drivers/net/wireless/intersil/p54/p54spi.c @@ -700,6 +700,7 @@ static struct spi_driver p54spi_driver = { module_spi_driver(p54spi_driver); +MODULE_DESCRIPTION("Prism54 SPI wireless driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>"); MODULE_ALIAS("spi:cx3110x"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index 89d738deea62..e2146d30e553 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -728,6 +728,7 @@ const struct ieee80211_ops mt7603_ops = { .set_sar_specs = mt7603_set_sar_specs, }; +MODULE_DESCRIPTION("MediaTek MT7603E and MT76x8 wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); static int __init mt7603_init(void) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index dab16b5fc386..0971c164b57e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -1375,4 +1375,5 @@ const struct ieee80211_ops mt7615_ops = { }; EXPORT_SYMBOL_GPL(mt7615_ops); +MODULE_DESCRIPTION("MediaTek MT7615E and MT7663E wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c index ac036a072439..87a956ea3ad7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c @@ -270,4 +270,5 @@ static void __exit mt7615_exit(void) module_init(mt7615_init); module_exit(mt7615_exit); +MODULE_DESCRIPTION("MediaTek MT7615E MMIO helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c index 67cedd2555f9..9692890ba51b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c @@ -253,4 +253,5 @@ module_sdio_driver(mt7663s_driver); MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); +MODULE_DESCRIPTION("MediaTek MT7663S (SDIO) wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c index 04963b9f7498..df737e1ff27b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c @@ -281,4 +281,5 @@ module_usb_driver(mt7663u_driver); MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); +MODULE_DESCRIPTION("MediaTek MT7663U (USB) wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c index 0052d103e276..820b39590027 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c @@ -349,4 +349,5 @@ EXPORT_SYMBOL_GPL(mt7663_usb_sdio_register_device); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek MT7663 SDIO/USB helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index 96494ba2fdf7..3a20ba0d2492 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -3160,4 +3160,5 @@ exit: EXPORT_SYMBOL_GPL(mt76_connac2_mcu_fill_message); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); +MODULE_DESCRIPTION("MediaTek MT76x connac layer helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c index c3a392a1a659..bcd24c9072ec 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c @@ -342,4 +342,5 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev) return 0; } +MODULE_DESCRIPTION("MediaTek MT76x EEPROM helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c index 9277ff38b7a2..293e66fa83d5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c @@ -302,6 +302,7 @@ static const struct pci_device_id mt76x0e_device_table[] = { MODULE_DEVICE_TABLE(pci, mt76x0e_device_table); MODULE_FIRMWARE(MT7610E_FIRMWARE); MODULE_FIRMWARE(MT7650E_FIRMWARE); +MODULE_DESCRIPTION("MediaTek MT76x0E (PCIe) wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); static struct pci_driver mt76x0e_driver = { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index 0422c332354a..dd042949cf82 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -336,6 +336,7 @@ err: MODULE_DEVICE_TABLE(usb, mt76x0_device_table); MODULE_FIRMWARE(MT7610E_FIRMWARE); MODULE_FIRMWARE(MT7610U_FIRMWARE); +MODULE_DESCRIPTION("MediaTek MT76x0U (USB) wireless driver"); MODULE_LICENSE("GPL"); static struct usb_driver mt76x0_driver = { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c index 02da543dfc5c..b2cc44914294 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c @@ -293,4 +293,5 @@ void mt76x02u_init_mcu(struct mt76_dev *dev) EXPORT_SYMBOL_GPL(mt76x02u_init_mcu); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>"); +MODULE_DESCRIPTION("MediaTek MT76x02 MCU helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c index 8a0e8124b894..8020446be37b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c @@ -696,4 +696,5 @@ void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev) } EXPORT_SYMBOL_GPL(mt76x02_config_mac_addr_list); +MODULE_DESCRIPTION("MediaTek MT76x02 helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c index 8c01855885ce..1fe5f5a02f93 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c @@ -506,4 +506,5 @@ int mt76x2_eeprom_init(struct mt76x02_dev *dev) } EXPORT_SYMBOL_GPL(mt76x2_eeprom_init); +MODULE_DESCRIPTION("MediaTek MT76x2 EEPROM helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c index df85ebc6e1df..30959746e924 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c @@ -165,6 +165,7 @@ mt76x2e_resume(struct pci_dev *pdev) MODULE_DEVICE_TABLE(pci, mt76x2e_device_table); MODULE_FIRMWARE(MT7662_FIRMWARE); MODULE_FIRMWARE(MT7662_ROM_PATCH); +MODULE_DESCRIPTION("MediaTek MT76x2E (PCIe) wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); static struct pci_driver mt76pci_driver = { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c index 55068f3252ef..ca78e14251c2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c @@ -147,4 +147,5 @@ static struct usb_driver mt76x2u_driver = { module_usb_driver(mt76x2u_driver); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>"); +MODULE_DESCRIPTION("MediaTek MT76x2U (USB) wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c index aff4f21e843d..3039f53e2245 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c @@ -958,4 +958,5 @@ static void __exit mt7915_exit(void) module_init(mt7915_init); module_exit(mt7915_exit); +MODULE_DESCRIPTION("MediaTek MT7915E MMIO helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 0645417e0582..0d5adc5ddae3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -1418,5 +1418,6 @@ const struct ieee80211_ops mt7921_ops = { }; EXPORT_SYMBOL_GPL(mt7921_ops); +MODULE_DESCRIPTION("MediaTek MT7921 core driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c index 57903c6e4f11..dde26f327478 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c @@ -544,4 +544,5 @@ MODULE_FIRMWARE(MT7922_FIRMWARE_WM); MODULE_FIRMWARE(MT7922_ROM_PATCH); MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); +MODULE_DESCRIPTION("MediaTek MT7921E (PCIe) wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c index 7591e54d2897..a9ce1e746b95 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c @@ -323,5 +323,6 @@ static struct sdio_driver mt7921s_driver = { .drv.pm = pm_sleep_ptr(&mt7921s_pm_ops), }; module_sdio_driver(mt7921s_driver); +MODULE_DESCRIPTION("MediaTek MT7921S (SDIO) wireless driver"); MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c index e5258c74fc07..8b7c03c47598 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c @@ -336,5 +336,6 @@ static struct usb_driver mt7921u_driver = { }; module_usb_driver(mt7921u_driver); +MODULE_DESCRIPTION("MediaTek MT7921U (USB) wireless driver"); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c index 8f1075da4903..125a1be3cb64 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c @@ -1450,4 +1450,5 @@ const struct ieee80211_ops mt7925_ops = { EXPORT_SYMBOL_GPL(mt7925_ops); MODULE_AUTHOR("Deren Wu <deren.wu@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek MT7925 core driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c index 734f31ee40d3..1fd99a856541 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c @@ -583,4 +583,5 @@ MODULE_FIRMWARE(MT7925_FIRMWARE_WM); MODULE_FIRMWARE(MT7925_ROM_PATCH); MODULE_AUTHOR("Deren Wu <deren.wu@mediatek.com>"); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); +MODULE_DESCRIPTION("MediaTek MT7925E (PCIe) wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c index 9b885c5b3ed5..1e0f094fc905 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c @@ -329,4 +329,5 @@ static struct usb_driver mt7925u_driver = { module_usb_driver(mt7925u_driver); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); +MODULE_DESCRIPTION("MediaTek MT7925U (USB) wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c index 502be22dbe36..c42101aa9e45 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c @@ -862,5 +862,6 @@ int mt792x_load_firmware(struct mt792x_dev *dev) } EXPORT_SYMBOL_GPL(mt792x_load_firmware); +MODULE_DESCRIPTION("MediaTek MT792x core driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c index 2dd283caed36..589a3efb9f8c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c @@ -314,5 +314,6 @@ void mt792xu_disconnect(struct usb_interface *usb_intf) } EXPORT_SYMBOL_GPL(mt792xu_disconnect); +MODULE_DESCRIPTION("MediaTek MT792x USB helpers"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c index 3c729b563edc..699be57309c2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c @@ -4477,7 +4477,8 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy) skb_put_data(skb, &req, sizeof(req)); /* cck and ofdm */ - skb_put_data(skb, &la.cck, sizeof(la.cck) + sizeof(la.ofdm)); + skb_put_data(skb, &la.cck, sizeof(la.cck)); + skb_put_data(skb, &la.ofdm, sizeof(la.ofdm)); /* ht20 */ skb_put_data(skb, &la.mcs[0], 8); /* ht40 */ diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c index c50d89a445e9..9f2abfa273c9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c @@ -650,4 +650,5 @@ static void __exit mt7996_exit(void) module_init(mt7996_init); module_exit(mt7996_exit); +MODULE_DESCRIPTION("MediaTek MT7996 MMIO helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c index c52d550f0c32..3e88798df017 100644 --- a/drivers/net/wireless/mediatek/mt76/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/sdio.c @@ -672,4 +672,5 @@ EXPORT_SYMBOL_GPL(mt76s_init); MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); +MODULE_DESCRIPTION("MediaTek MT76x SDIO helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index 1584665fe3cb..5a0bcb5071bd 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -1128,4 +1128,5 @@ int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf) EXPORT_SYMBOL_GPL(mt76u_init); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>"); +MODULE_DESCRIPTION("MediaTek MT76x USB helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c index fc76c66ff1a5..d6c01a2dd198 100644 --- a/drivers/net/wireless/mediatek/mt76/util.c +++ b/drivers/net/wireless/mediatek/mt76/util.c @@ -138,4 +138,5 @@ int __mt76_worker_fn(void *ptr) } EXPORT_SYMBOL_GPL(__mt76_worker_fn); +MODULE_DESCRIPTION("MediaTek MT76x helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c index 91d71e0f7ef2..81e8f25863f5 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.c +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c @@ -1018,5 +1018,6 @@ unregister_netdev: return ERR_PTR(ret); } +MODULE_DESCRIPTION("Atmel WILC1000 core wireless driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(WILC1000_FW(WILC1000_API_VER)); diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c index 0d13e3e46e98..d6d394693090 100644 --- a/drivers/net/wireless/microchip/wilc1000/sdio.c +++ b/drivers/net/wireless/microchip/wilc1000/sdio.c @@ -984,4 +984,5 @@ static struct sdio_driver wilc_sdio_driver = { module_driver(wilc_sdio_driver, sdio_register_driver, sdio_unregister_driver); +MODULE_DESCRIPTION("Atmel WILC1000 SDIO wireless driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c index 77b4cdff73c3..1d8b241ce43c 100644 --- a/drivers/net/wireless/microchip/wilc1000/spi.c +++ b/drivers/net/wireless/microchip/wilc1000/spi.c @@ -273,6 +273,7 @@ static struct spi_driver wilc_spi_driver = { .remove = wilc_bus_remove, }; module_spi_driver(wilc_spi_driver); +MODULE_DESCRIPTION("Atmel WILC1000 SPI wireless driver"); MODULE_LICENSE("GPL"); static int wilc_spi_tx(struct wilc *wilc, u8 *b, u32 len) diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c index 301bd0043a43..4e5b351f80f0 100644 --- a/drivers/net/wireless/ti/wl1251/sdio.c +++ b/drivers/net/wireless/ti/wl1251/sdio.c @@ -343,5 +343,6 @@ static void __exit wl1251_sdio_exit(void) module_init(wl1251_sdio_init); module_exit(wl1251_sdio_exit); +MODULE_DESCRIPTION("TI WL1251 SDIO helpers"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>"); diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c index 29292f06bd3d..1936bb3af54a 100644 --- a/drivers/net/wireless/ti/wl1251/spi.c +++ b/drivers/net/wireless/ti/wl1251/spi.c @@ -342,6 +342,7 @@ static struct spi_driver wl1251_spi_driver = { module_spi_driver(wl1251_spi_driver); +MODULE_DESCRIPTION("TI WL1251 SPI helpers"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>"); MODULE_ALIAS("spi:wl1251"); diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c index de045fe4ca1e..b26d42b4e3cc 100644 --- a/drivers/net/wireless/ti/wl12xx/main.c +++ b/drivers/net/wireless/ti/wl12xx/main.c @@ -1955,6 +1955,7 @@ module_param_named(tcxo, tcxo_param, charp, 0); MODULE_PARM_DESC(tcxo, "TCXO clock: 19.2, 26, 38.4, 52, 16.368, 32.736, 16.8, 33.6"); +MODULE_DESCRIPTION("TI WL12xx wireless driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE); diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c index 20d9181b3410..2ccac1cdec01 100644 --- a/drivers/net/wireless/ti/wl18xx/main.c +++ b/drivers/net/wireless/ti/wl18xx/main.c @@ -2086,6 +2086,7 @@ module_param_named(num_rx_desc, num_rx_desc_param, int, 0400); MODULE_PARM_DESC(num_rx_desc_param, "Number of Rx descriptors: u8 (default is 32)"); +MODULE_DESCRIPTION("TI WiLink 8 wireless driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); MODULE_FIRMWARE(WL18XX_FW_NAME); diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index fb9ed97774c7..5736acb4d206 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -6793,6 +6793,7 @@ MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery"); module_param(no_recovery, int, 0600); MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck."); +MODULE_DESCRIPTION("TI WLAN core driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index f0686635db46..eb5482ed76ae 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -447,6 +447,7 @@ module_sdio_driver(wl1271_sdio_driver); module_param(dump, bool, 0600); MODULE_PARM_DESC(dump, "Enable sdio read/write dumps."); +MODULE_DESCRIPTION("TI WLAN SDIO helpers"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index 7d9a139db59e..0aa2b2f3c5c9 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -562,6 +562,7 @@ static struct spi_driver wl1271_spi_driver = { }; module_spi_driver(wl1271_spi_driver); +MODULE_DESCRIPTION("TI WLAN SPI helpers"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index fab361a250d6..ef76850d9bcd 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1778,5 +1778,6 @@ static void __exit netback_fini(void) } module_exit(netback_fini); +MODULE_DESCRIPTION("Xen backend network device module"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("xen-backend:vif"); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 0d124a8ca9c3..0a96362912ce 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -713,7 +713,7 @@ void nvme_init_request(struct request *req, struct nvme_command *cmd) if (req->q->queuedata) { struct nvme_ns *ns = req->q->disk->private_data; - logging_enabled = ns->passthru_err_log_enabled; + logging_enabled = ns->head->passthru_err_log_enabled; req->timeout = NVME_IO_TIMEOUT; } else { /* no queuedata implies admin queue */ logging_enabled = nr->ctrl->passthru_err_log_enabled; @@ -1153,6 +1153,10 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) effects &= ~NVME_CMD_EFFECTS_CSE_MASK; } else { effects = le32_to_cpu(ctrl->effects->acs[opcode]); + + /* Ignore execution restrictions if any relaxation bits are set */ + if (effects & NVME_CMD_EFFECTS_CSER_MASK) + effects &= ~NVME_CMD_EFFECTS_CSE_MASK; } return effects; @@ -3696,7 +3700,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) ns->disk = disk; ns->queue = disk->queue; - ns->passthru_err_log_enabled = false; if (ctrl->opts && ctrl->opts->data_digest) blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue); @@ -3762,8 +3765,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) /* * Set ns->disk->device->driver_data to ns so we can access - * ns->logging_enabled in nvme_passthru_err_log_enabled_store() and - * nvme_passthru_err_log_enabled_show(). + * ns->head->passthru_err_log_enabled in + * nvme_io_passthru_err_log_enabled_[store | show](). */ dev_set_drvdata(disk_to_dev(ns->disk), ns); @@ -4191,6 +4194,7 @@ static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) { struct nvme_fw_slot_info_log *log; + u8 next_fw_slot, cur_fw_slot; log = kmalloc(sizeof(*log), GFP_KERNEL); if (!log) @@ -4202,13 +4206,15 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) goto out_free_log; } - if (log->afi & 0x70 || !(log->afi & 0x7)) { + cur_fw_slot = log->afi & 0x7; + next_fw_slot = (log->afi & 0x70) >> 4; + if (!cur_fw_slot || (next_fw_slot && (cur_fw_slot != next_fw_slot))) { dev_info(ctrl->device, "Firmware is activated after next Controller Level Reset\n"); goto out_free_log; } - memcpy(ctrl->subsys->firmware_rev, &log->frs[(log->afi & 0x7) - 1], + memcpy(ctrl->subsys->firmware_rev, &log->frs[cur_fw_slot - 1], sizeof(ctrl->subsys->firmware_rev)); out_free_log: diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 3499acbf6a82..495c171daead 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -534,6 +534,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) if (ret) { nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32), &cmd, data); + goto out_free_data; } result = le32_to_cpu(res.u32); if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) { diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 18f5c1be5d67..3dfd5ae99ae0 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -228,7 +228,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) length = (io.nblocks + 1) << ns->head->lba_shift; if ((io.control & NVME_RW_PRINFO_PRACT) && - ns->head->ms == sizeof(struct t10_pi_tuple)) { + (ns->head->ms == ns->head->pi_size)) { /* * Protection information is stripped/inserted by the * controller. diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 3897334e3950..7b87763e2f8a 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -455,6 +455,7 @@ struct nvme_ns_head { struct list_head entry; struct kref ref; bool shared; + bool passthru_err_log_enabled; int instance; struct nvme_effects_log *effects; u64 nuse; @@ -523,7 +524,6 @@ struct nvme_ns { struct device cdev_device; struct nvme_fault_inject fault_inject; - bool passthru_err_log_enabled; }; /* NVMe ns supports metadata actions by the controller (generate/strip) */ diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c index d099218e494a..f2832f70e7e0 100644 --- a/drivers/nvme/host/sysfs.c +++ b/drivers/nvme/host/sysfs.c @@ -48,8 +48,8 @@ static ssize_t nvme_adm_passthru_err_log_enabled_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - int err; bool passthru_err_log_enabled; + int err; err = kstrtobool(buf, &passthru_err_log_enabled); if (err) @@ -60,25 +60,34 @@ static ssize_t nvme_adm_passthru_err_log_enabled_store(struct device *dev, return count; } +static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) +{ + struct gendisk *disk = dev_to_disk(dev); + + if (nvme_disk_is_ns_head(disk)) + return disk->private_data; + return nvme_get_ns_from_dev(dev)->head; +} + static ssize_t nvme_io_passthru_err_log_enabled_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct nvme_ns *n = dev_get_drvdata(dev); + struct nvme_ns_head *head = dev_to_ns_head(dev); - return sysfs_emit(buf, n->passthru_err_log_enabled ? "on\n" : "off\n"); + return sysfs_emit(buf, head->passthru_err_log_enabled ? "on\n" : "off\n"); } static ssize_t nvme_io_passthru_err_log_enabled_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct nvme_ns *ns = dev_get_drvdata(dev); - int err; + struct nvme_ns_head *head = dev_to_ns_head(dev); bool passthru_err_log_enabled; + int err; err = kstrtobool(buf, &passthru_err_log_enabled); if (err) return -EINVAL; - ns->passthru_err_log_enabled = passthru_err_log_enabled; + head->passthru_err_log_enabled = passthru_err_log_enabled; return count; } @@ -91,15 +100,6 @@ static struct device_attribute dev_attr_io_passthru_err_log_enabled = \ __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \ nvme_io_passthru_err_log_enabled_show, nvme_io_passthru_err_log_enabled_store); -static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) -{ - struct gendisk *disk = dev_to_disk(dev); - - if (nvme_disk_is_ns_head(disk)) - return disk->private_data; - return nvme_get_ns_from_dev(dev)->head; -} - static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, char *buf) { diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index d8da840a1c0e..9964ffe347d2 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -209,7 +209,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) struct nvmf_connect_command *c = &req->cmd->connect; struct nvmf_connect_data *d; struct nvmet_ctrl *ctrl = NULL; - u16 status = 0; + u16 status; int ret; if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data))) @@ -290,7 +290,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) struct nvmf_connect_data *d; struct nvmet_ctrl *ctrl; u16 qid = le16_to_cpu(c->qid); - u16 status = 0; + u16 status; if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data))) return; diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 980123fb4dde..eb357ac2e54a 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -460,8 +460,9 @@ static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem) list_for_each_entry(entry, &nvmem->cells, node) { sysfs_bin_attr_init(&attrs[i]); attrs[i].attr.name = devm_kasprintf(&nvmem->dev, GFP_KERNEL, - "%s@%x", entry->name, - entry->offset); + "%s@%x,%x", entry->name, + entry->offset, + entry->bit_offset); attrs[i].attr.mode = 0444; attrs[i].size = entry->bytes; attrs[i].read = &nvmem_cell_attr_read; diff --git a/drivers/of/property.c b/drivers/of/property.c index 641a40cf5cf3..b71267c6667c 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -763,7 +763,9 @@ struct device_node *of_graph_get_port_parent(struct device_node *node) /* Walk 3 levels up only if there is 'ports' node. */ for (depth = 3; depth && node; depth--) { node = of_get_next_parent(node); - if (depth == 2 && !of_node_name_eq(node, "ports")) + if (depth == 2 && !of_node_name_eq(node, "ports") && + !of_node_name_eq(node, "in-ports") && + !of_node_name_eq(node, "out-ports")) break; } return node; @@ -1063,36 +1065,6 @@ of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode, return of_device_get_match_data(dev); } -static struct device_node *of_get_compat_node(struct device_node *np) -{ - of_node_get(np); - - while (np) { - if (!of_device_is_available(np)) { - of_node_put(np); - np = NULL; - } - - if (of_property_present(np, "compatible")) - break; - - np = of_get_next_parent(np); - } - - return np; -} - -static struct device_node *of_get_compat_node_parent(struct device_node *np) -{ - struct device_node *parent, *node; - - parent = of_get_parent(np); - node = of_get_compat_node(parent); - of_node_put(parent); - - return node; -} - static void of_link_to_phandle(struct device_node *con_np, struct device_node *sup_np) { @@ -1222,10 +1194,10 @@ static struct device_node *parse_##fname(struct device_node *np, \ * parse_prop.prop_name: Name of property holding a phandle value * parse_prop.index: For properties holding a list of phandles, this is the * index into the list + * @get_con_dev: If the consumer node containing the property is never converted + * to a struct device, implement this ops so fw_devlink can use it + * to find the true consumer. * @optional: Describes whether a supplier is mandatory or not - * @node_not_dev: The consumer node containing the property is never converted - * to a struct device. Instead, parse ancestor nodes for the - * compatible property to find a node corresponding to a device. * * Returns: * parse_prop() return values are @@ -1236,15 +1208,15 @@ static struct device_node *parse_##fname(struct device_node *np, \ struct supplier_bindings { struct device_node *(*parse_prop)(struct device_node *np, const char *prop_name, int index); + struct device_node *(*get_con_dev)(struct device_node *np); bool optional; - bool node_not_dev; }; DEFINE_SIMPLE_PROP(clocks, "clocks", "#clock-cells") DEFINE_SIMPLE_PROP(interconnects, "interconnects", "#interconnect-cells") DEFINE_SIMPLE_PROP(iommus, "iommus", "#iommu-cells") DEFINE_SIMPLE_PROP(mboxes, "mboxes", "#mbox-cells") -DEFINE_SIMPLE_PROP(io_channels, "io-channel", "#io-channel-cells") +DEFINE_SIMPLE_PROP(io_channels, "io-channels", "#io-channel-cells") DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL) DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells") DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells") @@ -1262,7 +1234,6 @@ DEFINE_SIMPLE_PROP(pinctrl5, "pinctrl-5", NULL) DEFINE_SIMPLE_PROP(pinctrl6, "pinctrl-6", NULL) DEFINE_SIMPLE_PROP(pinctrl7, "pinctrl-7", NULL) DEFINE_SIMPLE_PROP(pinctrl8, "pinctrl-8", NULL) -DEFINE_SIMPLE_PROP(remote_endpoint, "remote-endpoint", NULL) DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells") DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells") DEFINE_SIMPLE_PROP(leds, "leds", NULL) @@ -1328,6 +1299,17 @@ static struct device_node *parse_interrupts(struct device_node *np, return of_irq_parse_one(np, index, &sup_args) ? NULL : sup_args.np; } +static struct device_node *parse_remote_endpoint(struct device_node *np, + const char *prop_name, + int index) +{ + /* Return NULL for index > 0 to signify end of remote-endpoints. */ + if (!index || strcmp(prop_name, "remote-endpoint")) + return NULL; + + return of_graph_get_remote_port_parent(np); +} + static const struct supplier_bindings of_supplier_bindings[] = { { .parse_prop = parse_clocks, }, { .parse_prop = parse_interconnects, }, @@ -1352,7 +1334,10 @@ static const struct supplier_bindings of_supplier_bindings[] = { { .parse_prop = parse_pinctrl6, }, { .parse_prop = parse_pinctrl7, }, { .parse_prop = parse_pinctrl8, }, - { .parse_prop = parse_remote_endpoint, .node_not_dev = true, }, + { + .parse_prop = parse_remote_endpoint, + .get_con_dev = of_graph_get_port_parent, + }, { .parse_prop = parse_pwms, }, { .parse_prop = parse_resets, }, { .parse_prop = parse_leds, }, @@ -1403,8 +1388,8 @@ static int of_link_property(struct device_node *con_np, const char *prop_name) while ((phandle = s->parse_prop(con_np, prop_name, i))) { struct device_node *con_dev_np; - con_dev_np = s->node_not_dev - ? of_get_compat_node_parent(con_np) + con_dev_np = s->get_con_dev + ? s->get_con_dev(con_np) : of_node_get(con_np); matched = true; i++; diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index cfd60e35a899..d7593bde2d02 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -50,6 +50,12 @@ static struct unittest_results { failed; \ }) +#ifdef CONFIG_OF_KOBJ +#define OF_KREF_READ(NODE) kref_read(&(NODE)->kobj.kref) +#else +#define OF_KREF_READ(NODE) 1 +#endif + /* * Expected message may have a message level other than KERN_INFO. * Print the expected message only if the current loglevel will allow @@ -570,7 +576,7 @@ static void __init of_unittest_parse_phandle_with_args_map(void) pr_err("missing testcase data\n"); return; } - prefs[i] = kref_read(&p[i]->kobj.kref); + prefs[i] = OF_KREF_READ(p[i]); } rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells"); @@ -693,9 +699,9 @@ static void __init of_unittest_parse_phandle_with_args_map(void) unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); for (i = 0; i < ARRAY_SIZE(p); ++i) { - unittest(prefs[i] == kref_read(&p[i]->kobj.kref), + unittest(prefs[i] == OF_KREF_READ(p[i]), "provider%d: expected:%d got:%d\n", - i, prefs[i], kref_read(&p[i]->kobj.kref)); + i, prefs[i], OF_KREF_READ(p[i])); of_node_put(p[i]); } } diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 5befed2dc02b..9a437cfce073 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -6,6 +6,7 @@ * Author: Kishon Vijay Abraham I <kishon@ti.com> */ +#include <linux/align.h> #include <linux/bitfield.h> #include <linux/of.h> #include <linux/platform_device.h> @@ -482,9 +483,10 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, reg = ep_func->msi_cap + PCI_MSI_DATA_32; msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg); } - aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1); - msg_addr = ((u64)msg_addr_upper) << 32 | - (msg_addr_lower & ~aligned_offset); + msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower; + + aligned_offset = msg_addr & (epc->mem->window.page_size - 1); + msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size); ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, epc->mem->window.page_size); if (ret) @@ -551,7 +553,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, } aligned_offset = msg_addr & (epc->mem->window.page_size - 1); - msg_addr &= ~aligned_offset; + msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size); ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, epc->mem->window.page_size); if (ret) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 9ab9b1008d8b..c3585229c12a 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -2522,29 +2522,36 @@ static void pci_pme_list_scan(struct work_struct *work) if (pdev->pme_poll) { struct pci_dev *bridge = pdev->bus->self; struct device *dev = &pdev->dev; - int pm_status; + struct device *bdev = bridge ? &bridge->dev : NULL; + int bref = 0; /* - * If bridge is in low power state, the - * configuration space of subordinate devices - * may be not accessible + * If we have a bridge, it should be in an active/D0 + * state or the configuration space of subordinate + * devices may not be accessible or stable over the + * course of the call. */ - if (bridge && bridge->current_state != PCI_D0) - continue; + if (bdev) { + bref = pm_runtime_get_if_active(bdev, true); + if (!bref) + continue; + + if (bridge->current_state != PCI_D0) + goto put_bridge; + } /* - * If the device is in a low power state it - * should not be polled either. + * The device itself should be suspended but config + * space must be accessible, therefore it cannot be in + * D3cold. */ - pm_status = pm_runtime_get_if_active(dev, true); - if (!pm_status) - continue; - - if (pdev->current_state != PCI_D3cold) + if (pm_runtime_suspended(dev) && + pdev->current_state != PCI_D3cold) pci_pme_wakeup(pdev, NULL); - if (pm_status > 0) - pm_runtime_put(dev); +put_bridge: + if (bref > 0) + pm_runtime_put(bdev); } else { list_del(&pme_dev->list); kfree(pme_dev); diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index c584165b13ba..7e3aa7e2345f 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -2305,6 +2305,17 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) dev_dbg(cmn->dev, "ignoring external node %llx\n", reg); continue; } + /* + * AmpereOneX erratum AC04_MESH_1 makes some XPs report a bogus + * child count larger than the number of valid child pointers. + * A child offset of 0 can only occur on CMN-600; otherwise it + * would imply the root node being its own grandchild, which + * we can safely dismiss in general. + */ + if (reg == 0 && cmn->part != PART_CMN600) { + dev_dbg(cmn->dev, "bogus child pointer?\n"); + continue; + } arm_cmn_init_node_info(cmn, reg & CMN_CHILD_NODE_ADDR, dn); diff --git a/drivers/perf/cxl_pmu.c b/drivers/perf/cxl_pmu.c index 365d964b0f6a..bc0d414a6aff 100644 --- a/drivers/perf/cxl_pmu.c +++ b/drivers/perf/cxl_pmu.c @@ -419,7 +419,7 @@ static struct attribute *cxl_pmu_event_attrs[] = { CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmp, CXL_PMU_GID_S2M_NDR, BIT(0)), CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmps, CXL_PMU_GID_S2M_NDR, BIT(1)), CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmpe, CXL_PMU_GID_S2M_NDR, BIT(2)), - CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_biconflictack, CXL_PMU_GID_S2M_NDR, BIT(3)), + CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_biconflictack, CXL_PMU_GID_S2M_NDR, BIT(4)), /* CXL rev 3.0 Table 3-46 S2M DRS opcodes */ CXL_PMU_EVENT_CXL_ATTR(s2m_drs_memdata, CXL_PMU_GID_S2M_DRS, BIT(0)), CXL_PMU_EVENT_CXL_ATTR(s2m_drs_memdatanxm, CXL_PMU_GID_S2M_DRS, BIT(1)), diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 03ecb3d1aaf6..49f89b70dcec 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -1159,7 +1159,7 @@ static int amd_gpio_probe(struct platform_device *pdev) } ret = devm_request_irq(&pdev->dev, gpio_dev->irq, amd_gpio_irq_handler, - IRQF_SHARED, KBUILD_MODNAME, gpio_dev); + IRQF_SHARED | IRQF_ONESHOT, KBUILD_MODNAME, gpio_dev); if (ret) goto out2; diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c index a1f6cba3ae6c..18e232b5ed53 100644 --- a/drivers/pmdomain/core.c +++ b/drivers/pmdomain/core.c @@ -1109,7 +1109,7 @@ static int __init genpd_power_off_unused(void) return 0; } -late_initcall(genpd_power_off_unused); +late_initcall_sync(genpd_power_off_unused); #ifdef CONFIG_PM_SLEEP diff --git a/drivers/pmdomain/mediatek/mtk-pm-domains.c b/drivers/pmdomain/mediatek/mtk-pm-domains.c index e26dc17d07ad..e274e3315fe7 100644 --- a/drivers/pmdomain/mediatek/mtk-pm-domains.c +++ b/drivers/pmdomain/mediatek/mtk-pm-domains.c @@ -561,6 +561,11 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren goto err_put_node; } + /* recursive call to add all subdomains */ + ret = scpsys_add_subdomain(scpsys, child); + if (ret) + goto err_put_node; + ret = pm_genpd_add_subdomain(parent_pd, child_pd); if (ret) { dev_err(scpsys->dev, "failed to add %s subdomain to parent %s\n", @@ -570,11 +575,6 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren dev_dbg(scpsys->dev, "%s add subdomain: %s\n", parent_pd->name, child_pd->name); } - - /* recursive call to add all subdomains */ - ret = scpsys_add_subdomain(scpsys, child); - if (ret) - goto err_put_node; } return 0; @@ -588,9 +588,6 @@ static void scpsys_remove_one_domain(struct scpsys_domain *pd) { int ret; - if (scpsys_domain_is_on(pd)) - scpsys_power_off(&pd->genpd); - /* * We're in the error cleanup already, so we only complain, * but won't emit another error on top of the original one. @@ -600,6 +597,8 @@ static void scpsys_remove_one_domain(struct scpsys_domain *pd) dev_err(pd->scpsys->dev, "failed to remove domain '%s' : %d - state may be inconsistent\n", pd->genpd.name, ret); + if (scpsys_domain_is_on(pd)) + scpsys_power_off(&pd->genpd); clk_bulk_put(pd->num_clks, pd->clks); clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks); diff --git a/drivers/pmdomain/renesas/r8a77980-sysc.c b/drivers/pmdomain/renesas/r8a77980-sysc.c index 39ca84a67daa..621e411fc999 100644 --- a/drivers/pmdomain/renesas/r8a77980-sysc.c +++ b/drivers/pmdomain/renesas/r8a77980-sysc.c @@ -25,7 +25,8 @@ static const struct rcar_sysc_area r8a77980_areas[] __initconst = { PD_CPU_NOCR }, { "ca53-cpu3", 0x200, 3, R8A77980_PD_CA53_CPU3, R8A77980_PD_CA53_SCU, PD_CPU_NOCR }, - { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON }, + { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON, + PD_CPU_NOCR }, { "a3ir", 0x180, 0, R8A77980_PD_A3IR, R8A77980_PD_ALWAYS_ON }, { "a2ir0", 0x400, 0, R8A77980_PD_A2IR0, R8A77980_PD_A3IR }, { "a2ir1", 0x400, 1, R8A77980_PD_A2IR1, R8A77980_PD_A3IR }, diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index b92a32b4b114..04c64ce0a1ca 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -255,9 +255,10 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover) if (!recover) { hash_del(&addr->hnode); kfree(addr); - continue; + } else { + /* prepare for recovery */ + addr->disp_flag = QETH_DISP_ADDR_ADD; } - addr->disp_flag = QETH_DISP_ADDR_ADD; } mutex_unlock(&card->ip_lock); @@ -278,9 +279,11 @@ static void qeth_l3_recover_ip(struct qeth_card *card) if (addr->disp_flag == QETH_DISP_ADDR_ADD) { rc = qeth_l3_register_addr_entry(card, addr); - if (!rc) { + if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) { + /* keep it in the records */ addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; } else { + /* bad address */ hash_del(&addr->hnode); kfree(addr); } diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 19eee108db02..5c8d1ba3f8f3 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -319,17 +319,16 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip) { struct fcoe_fcf *sel; struct fcoe_fcf *fcf; - unsigned long flags; mutex_lock(&fip->ctlr_mutex); - spin_lock_irqsave(&fip->ctlr_lock, flags); + spin_lock_bh(&fip->ctlr_lock); kfree_skb(fip->flogi_req); fip->flogi_req = NULL; list_for_each_entry(fcf, &fip->fcfs, list) fcf->flogi_sent = 0; - spin_unlock_irqrestore(&fip->ctlr_lock, flags); + spin_unlock_bh(&fip->ctlr_lock); sel = fip->sel_fcf; if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr)) @@ -700,7 +699,6 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, { struct fc_frame *fp; struct fc_frame_header *fh; - unsigned long flags; u16 old_xid; u8 op; u8 mac[ETH_ALEN]; @@ -734,11 +732,11 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, op = FIP_DT_FLOGI; if (fip->mode == FIP_MODE_VN2VN) break; - spin_lock_irqsave(&fip->ctlr_lock, flags); + spin_lock_bh(&fip->ctlr_lock); kfree_skb(fip->flogi_req); fip->flogi_req = skb; fip->flogi_req_send = 1; - spin_unlock_irqrestore(&fip->ctlr_lock, flags); + spin_unlock_bh(&fip->ctlr_lock); schedule_work(&fip->timer_work); return -EINPROGRESS; case ELS_FDISC: @@ -1707,11 +1705,10 @@ static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip) static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip) { struct fcoe_fcf *fcf; - unsigned long flags; int error; mutex_lock(&fip->ctlr_mutex); - spin_lock_irqsave(&fip->ctlr_lock, flags); + spin_lock_bh(&fip->ctlr_lock); LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n"); fcf = fcoe_ctlr_select(fip); if (!fcf || fcf->flogi_sent) { @@ -1722,7 +1719,7 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip) fcoe_ctlr_solicit(fip, NULL); error = fcoe_ctlr_flogi_send_locked(fip); } - spin_unlock_irqrestore(&fip->ctlr_lock, flags); + spin_unlock_bh(&fip->ctlr_lock); mutex_unlock(&fip->ctlr_mutex); return error; } @@ -1739,9 +1736,8 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip) static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip) { struct fcoe_fcf *fcf; - unsigned long flags; - spin_lock_irqsave(&fip->ctlr_lock, flags); + spin_lock_bh(&fip->ctlr_lock); fcf = fip->sel_fcf; if (!fcf || !fip->flogi_req_send) goto unlock; @@ -1768,7 +1764,7 @@ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip) } else /* XXX */ LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n"); unlock: - spin_unlock_irqrestore(&fip->ctlr_lock, flags); + spin_unlock_bh(&fip->ctlr_lock); } /** diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 2074937c05bc..ce73f08ee889 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -305,6 +305,7 @@ struct fnic { unsigned int copy_wq_base; struct work_struct link_work; struct work_struct frame_work; + struct work_struct flush_work; struct sk_buff_head frame_queue; struct sk_buff_head tx_queue; @@ -363,7 +364,7 @@ void fnic_handle_event(struct work_struct *work); int fnic_rq_cmpl_handler(struct fnic *fnic, int); int fnic_alloc_rq_frame(struct vnic_rq *rq); void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); -void fnic_flush_tx(struct fnic *); +void fnic_flush_tx(struct work_struct *work); void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb); void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *); void fnic_update_mac(struct fc_lport *, u8 *new); diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 5e312a55cc7d..a08293b2ad9f 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -1182,7 +1182,7 @@ int fnic_send(struct fc_lport *lp, struct fc_frame *fp) /** * fnic_flush_tx() - send queued frames. - * @fnic: fnic device + * @work: pointer to work element * * Send frames that were waiting to go out in FC or Ethernet mode. * Whenever changing modes we purge queued frames, so these frames should @@ -1190,8 +1190,9 @@ int fnic_send(struct fc_lport *lp, struct fc_frame *fp) * * Called without fnic_lock held. */ -void fnic_flush_tx(struct fnic *fnic) +void fnic_flush_tx(struct work_struct *work) { + struct fnic *fnic = container_of(work, struct fnic, flush_work); struct sk_buff *skb; struct fc_frame *fp; diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 5ed1d897311a..29eead383eb9 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -830,6 +830,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&fnic->vlans_lock); INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); INIT_WORK(&fnic->event_work, fnic_handle_event); + INIT_WORK(&fnic->flush_work, fnic_flush_tx); skb_queue_head_init(&fnic->fip_frame_queue); INIT_LIST_HEAD(&fnic->evlist); INIT_LIST_HEAD(&fnic->vlans); diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 8d7fc5284293..fc4cee91b175 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -680,7 +680,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, spin_unlock_irqrestore(&fnic->fnic_lock, flags); - fnic_flush_tx(fnic); + queue_work(fnic_event_queue, &fnic->flush_work); reset_cmpl_handler_end: fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); @@ -736,7 +736,7 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic, } spin_unlock_irqrestore(&fnic->fnic_lock, flags); - fnic_flush_tx(fnic); + queue_work(fnic_event_queue, &fnic->flush_work); queue_work(fnic_event_queue, &fnic->frame_work); } else { spin_unlock_irqrestore(&fnic->fnic_lock, flags); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index d26941b131fd..bf879d81846b 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1918,7 +1918,7 @@ out: * * Returns the number of SGEs added to the SGL. **/ -static int +static uint32_t lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct sli4_sge *sgl, int datasegcnt, struct lpfc_io_buf *lpfc_cmd) @@ -1926,8 +1926,8 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct scatterlist *sgde = NULL; /* s/g data entry */ struct sli4_sge_diseed *diseed = NULL; dma_addr_t physaddr; - int i = 0, num_sge = 0, status; - uint32_t reftag; + int i = 0, status; + uint32_t reftag, num_sge = 0; uint8_t txop, rxop; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS uint32_t rc; @@ -2099,7 +2099,7 @@ out: * * Returns the number of SGEs added to the SGL. **/ -static int +static uint32_t lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct sli4_sge *sgl, int datacnt, int protcnt, struct lpfc_io_buf *lpfc_cmd) @@ -2123,8 +2123,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, uint32_t rc; #endif uint32_t checking = 1; - uint32_t dma_offset = 0; - int num_sge = 0, j = 2; + uint32_t dma_offset = 0, num_sge = 0; + int j = 2; struct sli4_hybrid_sgl *sgl_xtra = NULL; sgpe = scsi_prot_sglist(sc); diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 4f455884fdc4..612489afe8d2 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -282,11 +282,12 @@ static void scsi_eh_inc_host_failed(struct rcu_head *head) { struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu); struct Scsi_Host *shost = scmd->device->host; + unsigned int busy = scsi_host_busy(shost); unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); shost->host_failed++; - scsi_eh_wakeup(shost, scsi_host_busy(shost)); + scsi_eh_wakeup(shost, busy); spin_unlock_irqrestore(shost->host_lock, flags); } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 1fb80eae9a63..df5ac03d5d6c 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -278,9 +278,11 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd) rcu_read_lock(); __clear_bit(SCMD_STATE_INFLIGHT, &cmd->state); if (unlikely(scsi_host_in_recovery(shost))) { + unsigned int busy = scsi_host_busy(shost); + spin_lock_irqsave(shost->host_lock, flags); if (shost->host_failed || shost->host_eh_scheduled) - scsi_eh_wakeup(shost, scsi_host_busy(shost)); + scsi_eh_wakeup(shost, busy); spin_unlock_irqrestore(shost->host_lock, flags); } rcu_read_unlock(); diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 546cdce525fc..833a1bb7a914 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -2,6 +2,7 @@ // Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. // Copyright (C) 2008 Juergen Beisert +#include <linux/bits.h> #include <linux/clk.h> #include <linux/completion.h> #include <linux/delay.h> @@ -660,15 +661,15 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx, << MX51_ECSPI_CTRL_BL_OFFSET; else { if (spi_imx->usedma) { - ctrl |= (spi_imx->bits_per_word * - spi_imx_bytes_per_word(spi_imx->bits_per_word) - 1) + ctrl |= (spi_imx->bits_per_word - 1) << MX51_ECSPI_CTRL_BL_OFFSET; } else { if (spi_imx->count >= MX51_ECSPI_CTRL_MAX_BURST) - ctrl |= (MX51_ECSPI_CTRL_MAX_BURST - 1) + ctrl |= (MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1) << MX51_ECSPI_CTRL_BL_OFFSET; else - ctrl |= (spi_imx->count * spi_imx->bits_per_word - 1) + ctrl |= spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word, + BITS_PER_BYTE) * spi_imx->bits_per_word << MX51_ECSPI_CTRL_BL_OFFSET; } } diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c index 07d20ca1164c..4337ca51d7aa 100644 --- a/drivers/spi/spi-intel-pci.c +++ b/drivers/spi/spi-intel-pci.c @@ -85,6 +85,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = { { PCI_VDEVICE(INTEL, 0xa2a4), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&cnl_info }, + { PCI_VDEVICE(INTEL, 0xa823), (unsigned long)&cnl_info }, { }, }; MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids); diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c index 1bf080339b5a..88cbe4f00cc3 100644 --- a/drivers/spi/spi-mxs.c +++ b/drivers/spi/spi-mxs.c @@ -39,6 +39,7 @@ #include <linux/spi/spi.h> #include <linux/spi/mxs-spi.h> #include <trace/events/spi.h> +#include <linux/dma/mxs-dma.h> #define DRIVER_NAME "mxs-spi" @@ -252,7 +253,7 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, desc = dmaengine_prep_slave_sg(ssp->dmach, &dma_xfer[sg_count].sg, 1, (flags & TXRX_WRITE) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END); if (!desc) { dev_err(ssp->dev, diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index a0c9fea908f5..ddf1c684bcc7 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -53,8 +53,6 @@ /* per-register bitmasks: */ #define OMAP2_MCSPI_IRQSTATUS_EOW BIT(17) -#define OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY BIT(0) -#define OMAP2_MCSPI_IRQSTATUS_RX0_FULL BIT(2) #define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0) #define OMAP2_MCSPI_MODULCTRL_MS BIT(2) @@ -293,7 +291,7 @@ static void omap2_mcspi_set_mode(struct spi_controller *ctlr) } static void omap2_mcspi_set_fifo(const struct spi_device *spi, - struct spi_transfer *t, int enable, int dma_enabled) + struct spi_transfer *t, int enable) { struct spi_controller *ctlr = spi->controller; struct omap2_mcspi_cs *cs = spi->controller_state; @@ -314,28 +312,20 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi, max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2; else max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH; - if (dma_enabled) - wcnt = t->len / bytes_per_word; - else - wcnt = 0; + + wcnt = t->len / bytes_per_word; if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT) goto disable_fifo; xferlevel = wcnt << 16; if (t->rx_buf != NULL) { chconf |= OMAP2_MCSPI_CHCONF_FFER; - if (dma_enabled) - xferlevel |= (bytes_per_word - 1) << 8; - else - xferlevel |= (max_fifo_depth - 1) << 8; + xferlevel |= (bytes_per_word - 1) << 8; } if (t->tx_buf != NULL) { chconf |= OMAP2_MCSPI_CHCONF_FFET; - if (dma_enabled) - xferlevel |= bytes_per_word - 1; - else - xferlevel |= (max_fifo_depth - 1); + xferlevel |= bytes_per_word - 1; } mcspi_write_reg(ctlr, OMAP2_MCSPI_XFERLEVEL, xferlevel); @@ -892,113 +882,6 @@ out: return count - c; } -static unsigned -omap2_mcspi_txrx_piofifo(struct spi_device *spi, struct spi_transfer *xfer) -{ - struct omap2_mcspi_cs *cs = spi->controller_state; - struct omap2_mcspi *mcspi; - unsigned int count, c; - unsigned int iter, cwc; - int last_request; - void __iomem *base = cs->base; - void __iomem *tx_reg; - void __iomem *rx_reg; - void __iomem *chstat_reg; - void __iomem *irqstat_reg; - int word_len, bytes_per_word; - u8 *rx; - const u8 *tx; - - mcspi = spi_controller_get_devdata(spi->controller); - count = xfer->len; - c = count; - word_len = cs->word_len; - bytes_per_word = mcspi_bytes_per_word(word_len); - - /* - * We store the pre-calculated register addresses on stack to speed - * up the transfer loop. - */ - tx_reg = base + OMAP2_MCSPI_TX0; - rx_reg = base + OMAP2_MCSPI_RX0; - chstat_reg = base + OMAP2_MCSPI_CHSTAT0; - irqstat_reg = base + OMAP2_MCSPI_IRQSTATUS; - - if (c < (word_len >> 3)) - return 0; - - rx = xfer->rx_buf; - tx = xfer->tx_buf; - - do { - /* calculate number of words in current iteration */ - cwc = min((unsigned int)mcspi->fifo_depth / bytes_per_word, - c / bytes_per_word); - last_request = cwc != (mcspi->fifo_depth / bytes_per_word); - if (tx) { - if (mcspi_wait_for_reg_bit(irqstat_reg, - OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY) < 0) { - dev_err(&spi->dev, "TX Empty timed out\n"); - goto out; - } - writel_relaxed(OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY, irqstat_reg); - - for (iter = 0; iter < cwc; iter++, tx += bytes_per_word) { - if (bytes_per_word == 1) - writel_relaxed(*tx, tx_reg); - else if (bytes_per_word == 2) - writel_relaxed(*((u16 *)tx), tx_reg); - else if (bytes_per_word == 4) - writel_relaxed(*((u32 *)tx), tx_reg); - } - } - - if (rx) { - if (!last_request && - mcspi_wait_for_reg_bit(irqstat_reg, - OMAP2_MCSPI_IRQSTATUS_RX0_FULL) < 0) { - dev_err(&spi->dev, "RX_FULL timed out\n"); - goto out; - } - writel_relaxed(OMAP2_MCSPI_IRQSTATUS_RX0_FULL, irqstat_reg); - - for (iter = 0; iter < cwc; iter++, rx += bytes_per_word) { - if (last_request && - mcspi_wait_for_reg_bit(chstat_reg, - OMAP2_MCSPI_CHSTAT_RXS) < 0) { - dev_err(&spi->dev, "RXS timed out\n"); - goto out; - } - if (bytes_per_word == 1) - *rx = readl_relaxed(rx_reg); - else if (bytes_per_word == 2) - *((u16 *)rx) = readl_relaxed(rx_reg); - else if (bytes_per_word == 4) - *((u32 *)rx) = readl_relaxed(rx_reg); - } - } - - if (last_request) { - if (mcspi_wait_for_reg_bit(chstat_reg, - OMAP2_MCSPI_CHSTAT_EOT) < 0) { - dev_err(&spi->dev, "EOT timed out\n"); - goto out; - } - if (mcspi_wait_for_reg_bit(chstat_reg, - OMAP2_MCSPI_CHSTAT_TXFFE) < 0) { - dev_err(&spi->dev, "TXFFE timed out\n"); - goto out; - } - omap2_mcspi_set_enable(spi, 0); - } - c -= cwc * bytes_per_word; - } while (c >= bytes_per_word); - -out: - omap2_mcspi_set_enable(spi, 1); - return count - c; -} - static u32 omap2_mcspi_calc_divisor(u32 speed_hz, u32 ref_clk_hz) { u32 div; @@ -1323,9 +1206,7 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr, if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) && ctlr->cur_msg_mapped && ctlr->can_dma(ctlr, spi, t)) - omap2_mcspi_set_fifo(spi, t, 1, 1); - else if (t->len > OMAP2_MCSPI_MAX_FIFODEPTH) - omap2_mcspi_set_fifo(spi, t, 1, 0); + omap2_mcspi_set_fifo(spi, t, 1); omap2_mcspi_set_enable(spi, 1); @@ -1338,8 +1219,6 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr, ctlr->cur_msg_mapped && ctlr->can_dma(ctlr, spi, t)) count = omap2_mcspi_txrx_dma(spi, t); - else if (mcspi->fifo_depth > 0) - count = omap2_mcspi_txrx_piofifo(spi, t); else count = omap2_mcspi_txrx_pio(spi, t); @@ -1352,7 +1231,7 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr, omap2_mcspi_set_enable(spi, 0); if (mcspi->fifo_depth > 0) - omap2_mcspi_set_fifo(spi, t, 0, 0); + omap2_mcspi_set_fifo(spi, t, 0); out: /* Restore defaults if they were overriden */ @@ -1375,7 +1254,7 @@ out: omap2_mcspi_set_cs(spi, !(spi->mode & SPI_CS_HIGH)); if (mcspi->fifo_depth > 0 && t) - omap2_mcspi_set_fifo(spi, t, 0, 0); + omap2_mcspi_set_fifo(spi, t, 0); return status; } diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c index 03aab661be9d..942c3117ab3a 100644 --- a/drivers/spi/spi-ppc4xx.c +++ b/drivers/spi/spi-ppc4xx.c @@ -25,11 +25,13 @@ #include <linux/slab.h> #include <linux/errno.h> #include <linux/wait.h> +#include <linux/platform_device.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/interrupt.h> #include <linux/delay.h> +#include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> @@ -166,10 +168,8 @@ static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t) int scr; u8 cdm = 0; u32 speed; - u8 bits_per_word; /* Start with the generic configuration for this device. */ - bits_per_word = spi->bits_per_word; speed = spi->max_speed_hz; /* @@ -177,9 +177,6 @@ static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t) * the transfer to overwrite the generic configuration with zeros. */ if (t) { - if (t->bits_per_word) - bits_per_word = t->bits_per_word; - if (t->speed_hz) speed = min(t->speed_hz, spi->max_speed_hz); } @@ -362,22 +359,22 @@ static int spi_ppc4xx_of_probe(struct platform_device *op) /* Setup the state for the bitbang driver */ bbp = &hw->bitbang; - bbp->master = hw->host; + bbp->ctlr = hw->host; bbp->setup_transfer = spi_ppc4xx_setupxfer; bbp->txrx_bufs = spi_ppc4xx_txrx; bbp->use_dma = 0; - bbp->master->setup = spi_ppc4xx_setup; - bbp->master->cleanup = spi_ppc4xx_cleanup; - bbp->master->bits_per_word_mask = SPI_BPW_MASK(8); - bbp->master->use_gpio_descriptors = true; + bbp->ctlr->setup = spi_ppc4xx_setup; + bbp->ctlr->cleanup = spi_ppc4xx_cleanup; + bbp->ctlr->bits_per_word_mask = SPI_BPW_MASK(8); + bbp->ctlr->use_gpio_descriptors = true; /* * The SPI core will count the number of GPIO descriptors to figure * out the number of chip selects available on the platform. */ - bbp->master->num_chipselect = 0; + bbp->ctlr->num_chipselect = 0; /* the spi->mode bits understood by this driver: */ - bbp->master->mode_bits = + bbp->ctlr->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST; /* Get the clock for the OPB */ diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c index e748a5d04e97..9149d41fe65b 100644 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c @@ -608,7 +608,7 @@ static void ad5933_work(struct work_struct *work) struct ad5933_state, work.work); struct iio_dev *indio_dev = i2c_get_clientdata(st->client); __be16 buf[2]; - int val[2]; + u16 val[2]; unsigned char status; int ret; diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c index f44e6412f4e3..d0db2efe0045 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c +++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c @@ -3723,12 +3723,10 @@ apply_min_padding: static int atomisp_set_crop(struct atomisp_device *isp, const struct v4l2_mbus_framefmt *format, + struct v4l2_subdev_state *sd_state, int which) { struct atomisp_input_subdev *input = &isp->inputs[isp->asd.input_curr]; - struct v4l2_subdev_state pad_state = { - .pads = &input->pad_cfg, - }; struct v4l2_subdev_selection sel = { .which = which, .target = V4L2_SEL_TGT_CROP, @@ -3754,7 +3752,7 @@ static int atomisp_set_crop(struct atomisp_device *isp, sel.r.left = ((input->native_rect.width - sel.r.width) / 2) & ~1; sel.r.top = ((input->native_rect.height - sel.r.height) / 2) & ~1; - ret = v4l2_subdev_call(input->camera, pad, set_selection, &pad_state, &sel); + ret = v4l2_subdev_call(input->camera, pad, set_selection, sd_state, &sel); if (ret) dev_err(isp->dev, "Error setting crop to %ux%u @%ux%u: %d\n", sel.r.width, sel.r.height, sel.r.left, sel.r.top, ret); @@ -3770,9 +3768,6 @@ int atomisp_try_fmt(struct atomisp_device *isp, struct v4l2_pix_format *f, const struct atomisp_format_bridge *fmt, *snr_fmt; struct atomisp_sub_device *asd = &isp->asd; struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr]; - struct v4l2_subdev_state pad_state = { - .pads = &input->pad_cfg, - }; struct v4l2_subdev_format format = { .which = V4L2_SUBDEV_FORMAT_TRY, }; @@ -3809,11 +3804,16 @@ int atomisp_try_fmt(struct atomisp_device *isp, struct v4l2_pix_format *f, dev_dbg(isp->dev, "try_mbus_fmt: asking for %ux%u\n", format.format.width, format.format.height); - ret = atomisp_set_crop(isp, &format.format, V4L2_SUBDEV_FORMAT_TRY); - if (ret) - return ret; + v4l2_subdev_lock_state(input->try_sd_state); + + ret = atomisp_set_crop(isp, &format.format, input->try_sd_state, + V4L2_SUBDEV_FORMAT_TRY); + if (ret == 0) + ret = v4l2_subdev_call(input->camera, pad, set_fmt, + input->try_sd_state, &format); + + v4l2_subdev_unlock_state(input->try_sd_state); - ret = v4l2_subdev_call(input->camera, pad, set_fmt, &pad_state, &format); if (ret) return ret; @@ -4238,9 +4238,7 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p struct atomisp_device *isp = asd->isp; struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr]; const struct atomisp_format_bridge *format; - struct v4l2_subdev_state pad_state = { - .pads = &input->pad_cfg, - }; + struct v4l2_subdev_state *act_sd_state; struct v4l2_subdev_format vformat = { .which = V4L2_SUBDEV_FORMAT_TRY, }; @@ -4268,12 +4266,18 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p /* Disable dvs if resolution can't be supported by sensor */ if (asd->params.video_dis_en && asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) { - ret = atomisp_set_crop(isp, &vformat.format, V4L2_SUBDEV_FORMAT_TRY); - if (ret) - return ret; + v4l2_subdev_lock_state(input->try_sd_state); + + ret = atomisp_set_crop(isp, &vformat.format, input->try_sd_state, + V4L2_SUBDEV_FORMAT_TRY); + if (ret == 0) { + vformat.which = V4L2_SUBDEV_FORMAT_TRY; + ret = v4l2_subdev_call(input->camera, pad, set_fmt, + input->try_sd_state, &vformat); + } + + v4l2_subdev_unlock_state(input->try_sd_state); - vformat.which = V4L2_SUBDEV_FORMAT_TRY; - ret = v4l2_subdev_call(input->camera, pad, set_fmt, &pad_state, &vformat); if (ret) return ret; @@ -4291,12 +4295,18 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p } } - ret = atomisp_set_crop(isp, &vformat.format, V4L2_SUBDEV_FORMAT_ACTIVE); - if (ret) - return ret; + act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera); + + ret = atomisp_set_crop(isp, &vformat.format, act_sd_state, + V4L2_SUBDEV_FORMAT_ACTIVE); + if (ret == 0) { + vformat.which = V4L2_SUBDEV_FORMAT_ACTIVE; + ret = v4l2_subdev_call(input->camera, pad, set_fmt, act_sd_state, &vformat); + } + + if (act_sd_state) + v4l2_subdev_unlock_state(act_sd_state); - vformat.which = V4L2_SUBDEV_FORMAT_ACTIVE; - ret = v4l2_subdev_call(input->camera, pad, set_fmt, NULL, &vformat); if (ret) return ret; diff --git a/drivers/staging/media/atomisp/pci/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp_internal.h index f7b4bee9574b..d5b077e602ca 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_internal.h +++ b/drivers/staging/media/atomisp/pci/atomisp_internal.h @@ -132,8 +132,8 @@ struct atomisp_input_subdev { /* Sensor rects for sensors which support crop */ struct v4l2_rect native_rect; struct v4l2_rect active_rect; - /* Sensor pad_cfg for which == V4L2_SUBDEV_FORMAT_TRY calls */ - struct v4l2_subdev_pad_config pad_cfg; + /* Sensor state for which == V4L2_SUBDEV_FORMAT_TRY calls */ + struct v4l2_subdev_state *try_sd_state; struct v4l2_subdev *motor; diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c index 01b7fa9b56a2..5b2d88c02d36 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c +++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c @@ -781,12 +781,20 @@ static int atomisp_enum_framesizes(struct file *file, void *priv, .which = V4L2_SUBDEV_FORMAT_ACTIVE, .code = input->code, }; + struct v4l2_subdev_state *act_sd_state; int ret; + if (!input->camera) + return -EINVAL; + if (input->crop_support) return atomisp_enum_framesizes_crop(isp, fsize); - ret = v4l2_subdev_call(input->camera, pad, enum_frame_size, NULL, &fse); + act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera); + ret = v4l2_subdev_call(input->camera, pad, enum_frame_size, + act_sd_state, &fse); + if (act_sd_state) + v4l2_subdev_unlock_state(act_sd_state); if (ret) return ret; @@ -803,18 +811,25 @@ static int atomisp_enum_frameintervals(struct file *file, void *priv, struct video_device *vdev = video_devdata(file); struct atomisp_device *isp = video_get_drvdata(vdev); struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; + struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr]; struct v4l2_subdev_frame_interval_enum fie = { - .code = atomisp_in_fmt_conv[0].code, + .code = atomisp_in_fmt_conv[0].code, .index = fival->index, .width = fival->width, .height = fival->height, .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; + struct v4l2_subdev_state *act_sd_state; int ret; - ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - pad, enum_frame_interval, NULL, - &fie); + if (!input->camera) + return -EINVAL; + + act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera); + ret = v4l2_subdev_call(input->camera, pad, enum_frame_interval, + act_sd_state, &fie); + if (act_sd_state) + v4l2_subdev_unlock_state(act_sd_state); if (ret) return ret; @@ -830,30 +845,25 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh, struct video_device *vdev = video_devdata(file); struct atomisp_device *isp = video_get_drvdata(vdev); struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; + struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr]; struct v4l2_subdev_mbus_code_enum code = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; const struct atomisp_format_bridge *format; - struct v4l2_subdev *camera; + struct v4l2_subdev_state *act_sd_state; unsigned int i, fi = 0; - int rval; + int ret; - camera = isp->inputs[asd->input_curr].camera; - if(!camera) { - dev_err(isp->dev, "%s(): camera is NULL, device is %s\n", - __func__, vdev->name); + if (!input->camera) return -EINVAL; - } - rval = v4l2_subdev_call(camera, pad, enum_mbus_code, NULL, &code); - if (rval == -ENOIOCTLCMD) { - dev_warn(isp->dev, - "enum_mbus_code pad op not supported by %s. Please fix your sensor driver!\n", - camera->name); - } - - if (rval) - return rval; + act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera); + ret = v4l2_subdev_call(input->camera, pad, enum_mbus_code, + act_sd_state, &code); + if (act_sd_state) + v4l2_subdev_unlock_state(act_sd_state); + if (ret) + return ret; for (i = 0; i < ARRAY_SIZE(atomisp_output_fmts); i++) { format = &atomisp_output_fmts[i]; diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c index c1c8501ec61f..547e1444ad97 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c +++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c @@ -862,6 +862,9 @@ static void atomisp_unregister_entities(struct atomisp_device *isp) v4l2_device_unregister(&isp->v4l2_dev); media_device_unregister(&isp->media_dev); media_device_cleanup(&isp->media_dev); + + for (i = 0; i < isp->input_cnt; i++) + __v4l2_subdev_state_free(isp->inputs[i].try_sd_state); } static int atomisp_register_entities(struct atomisp_device *isp) @@ -933,32 +936,49 @@ v4l2_device_failed: static void atomisp_init_sensor(struct atomisp_input_subdev *input) { + static struct lock_class_key try_sd_state_key; struct v4l2_subdev_mbus_code_enum mbus_code_enum = { }; struct v4l2_subdev_frame_size_enum fse = { }; - struct v4l2_subdev_state sd_state = { - .pads = &input->pad_cfg, - }; struct v4l2_subdev_selection sel = { }; + struct v4l2_subdev_state *try_sd_state, *act_sd_state; int i, err; + /* + * FIXME: Drivers are not supposed to use __v4l2_subdev_state_alloc() + * but atomisp needs this for try_fmt on its /dev/video# node since + * it emulates a normal v4l2 device there, passing through try_fmt / + * set_fmt to the sensor. + */ + try_sd_state = __v4l2_subdev_state_alloc(input->camera, + "atomisp:try_sd_state->lock", &try_sd_state_key); + if (IS_ERR(try_sd_state)) + return; + + input->try_sd_state = try_sd_state; + + act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera); + mbus_code_enum.which = V4L2_SUBDEV_FORMAT_ACTIVE; - err = v4l2_subdev_call(input->camera, pad, enum_mbus_code, NULL, &mbus_code_enum); + err = v4l2_subdev_call(input->camera, pad, enum_mbus_code, + act_sd_state, &mbus_code_enum); if (!err) input->code = mbus_code_enum.code; sel.which = V4L2_SUBDEV_FORMAT_ACTIVE; sel.target = V4L2_SEL_TGT_NATIVE_SIZE; - err = v4l2_subdev_call(input->camera, pad, get_selection, NULL, &sel); + err = v4l2_subdev_call(input->camera, pad, get_selection, + act_sd_state, &sel); if (err) - return; + goto unlock_act_sd_state; input->native_rect = sel.r; sel.which = V4L2_SUBDEV_FORMAT_ACTIVE; sel.target = V4L2_SEL_TGT_CROP_DEFAULT; - err = v4l2_subdev_call(input->camera, pad, get_selection, NULL, &sel); + err = v4l2_subdev_call(input->camera, pad, get_selection, + act_sd_state, &sel); if (err) - return; + goto unlock_act_sd_state; input->active_rect = sel.r; @@ -973,7 +993,8 @@ static void atomisp_init_sensor(struct atomisp_input_subdev *input) fse.code = input->code; fse.which = V4L2_SUBDEV_FORMAT_ACTIVE; - err = v4l2_subdev_call(input->camera, pad, enum_frame_size, NULL, &fse); + err = v4l2_subdev_call(input->camera, pad, enum_frame_size, + act_sd_state, &fse); if (err) break; @@ -989,22 +1010,26 @@ static void atomisp_init_sensor(struct atomisp_input_subdev *input) * for padding, set the crop rect to cover the entire sensor instead * of only the default active area. * - * Do this for both try and active formats since the try_crop rect in - * pad_cfg may influence (clamp) future try_fmt calls with which == try. + * Do this for both try and active formats since the crop rect in + * try_sd_state may influence (clamp size) in calls with which == try. */ sel.which = V4L2_SUBDEV_FORMAT_TRY; sel.target = V4L2_SEL_TGT_CROP; sel.r = input->native_rect; - err = v4l2_subdev_call(input->camera, pad, set_selection, &sd_state, &sel); + v4l2_subdev_lock_state(input->try_sd_state); + err = v4l2_subdev_call(input->camera, pad, set_selection, + input->try_sd_state, &sel); + v4l2_subdev_unlock_state(input->try_sd_state); if (err) - return; + goto unlock_act_sd_state; sel.which = V4L2_SUBDEV_FORMAT_ACTIVE; sel.target = V4L2_SEL_TGT_CROP; sel.r = input->native_rect; - err = v4l2_subdev_call(input->camera, pad, set_selection, NULL, &sel); + err = v4l2_subdev_call(input->camera, pad, set_selection, + act_sd_state, &sel); if (err) - return; + goto unlock_act_sd_state; dev_info(input->camera->dev, "Supports crop native %dx%d active %dx%d binning %d\n", input->native_rect.width, input->native_rect.height, @@ -1012,6 +1037,10 @@ static void atomisp_init_sensor(struct atomisp_input_subdev *input) input->binning_support); input->crop_support = true; + +unlock_act_sd_state: + if (act_sd_state) + v4l2_subdev_unlock_state(act_sd_state); } int atomisp_register_device_nodes(struct atomisp_device *isp) diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index a5f58988130a..c1fbcdd16182 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -759,6 +759,29 @@ static ssize_t emulate_tas_store(struct config_item *item, return count; } +static int target_try_configure_unmap(struct se_device *dev, + const char *config_opt) +{ + if (!dev->transport->configure_unmap) { + pr_err("Generic Block Discard not supported\n"); + return -ENOSYS; + } + + if (!target_dev_configured(dev)) { + pr_err("Generic Block Discard setup for %s requires device to be configured\n", + config_opt); + return -ENODEV; + } + + if (!dev->transport->configure_unmap(dev)) { + pr_err("Generic Block Discard setup for %s failed\n", + config_opt); + return -ENOSYS; + } + + return 0; +} + static ssize_t emulate_tpu_store(struct config_item *item, const char *page, size_t count) { @@ -776,11 +799,9 @@ static ssize_t emulate_tpu_store(struct config_item *item, * Discard supported is detected iblock_create_virtdevice(). */ if (flag && !da->max_unmap_block_desc_count) { - if (!dev->transport->configure_unmap || - !dev->transport->configure_unmap(dev)) { - pr_err("Generic Block Discard not supported\n"); - return -ENOSYS; - } + ret = target_try_configure_unmap(dev, "emulate_tpu"); + if (ret) + return ret; } da->emulate_tpu = flag; @@ -806,11 +827,9 @@ static ssize_t emulate_tpws_store(struct config_item *item, * Discard supported is detected iblock_create_virtdevice(). */ if (flag && !da->max_unmap_block_desc_count) { - if (!dev->transport->configure_unmap || - !dev->transport->configure_unmap(dev)) { - pr_err("Generic Block Discard not supported\n"); - return -ENOSYS; - } + ret = target_try_configure_unmap(dev, "emulate_tpws"); + if (ret) + return ret; } da->emulate_tpws = flag; @@ -1022,12 +1041,9 @@ static ssize_t unmap_zeroes_data_store(struct config_item *item, * Discard supported is detected iblock_configure_device(). */ if (flag && !da->max_unmap_block_desc_count) { - if (!dev->transport->configure_unmap || - !dev->transport->configure_unmap(dev)) { - pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set because max_unmap_block_desc_count is zero\n", - da->da_dev); - return -ENOSYS; - } + ret = target_try_configure_unmap(dev, "unmap_zeroes_data"); + if (ret) + return ret; } da->unmap_zeroes_data = flag; pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n", diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index 87e4795275fe..6f798f6a2b84 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -203,7 +203,7 @@ struct tb_regs_switch_header { #define ROUTER_CS_5_WOP BIT(1) #define ROUTER_CS_5_WOU BIT(2) #define ROUTER_CS_5_WOD BIT(3) -#define ROUTER_CS_5_C3S BIT(23) +#define ROUTER_CS_5_CNS BIT(23) #define ROUTER_CS_5_PTO BIT(24) #define ROUTER_CS_5_UTO BIT(25) #define ROUTER_CS_5_HCO BIT(26) diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index f8f0d24ff6e4..1515eff8cc3e 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -290,7 +290,7 @@ int usb4_switch_setup(struct tb_switch *sw) } /* TBT3 supported by the CM */ - val |= ROUTER_CS_5_C3S; + val &= ~ROUTER_CS_5_CNS; return tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); } diff --git a/drivers/tty/serial/8250/8250_pci1xxxx.c b/drivers/tty/serial/8250/8250_pci1xxxx.c index cd258922bd78..2dda737b1660 100644 --- a/drivers/tty/serial/8250/8250_pci1xxxx.c +++ b/drivers/tty/serial/8250/8250_pci1xxxx.c @@ -302,7 +302,7 @@ static void pci1xxxx_process_read_data(struct uart_port *port, * to read, the data is received one byte at a time. */ while (valid_burst_count--) { - if (*buff_index >= (RX_BUF_SIZE - UART_BURST_SIZE)) + if (*buff_index > (RX_BUF_SIZE - UART_BURST_SIZE)) break; burst_buf = (u32 *)&rx_buff[*buff_index]; *burst_buf = readl(port->membase + UART_RX_BURST_FIFO); diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index 3ec725555bcc..4749331fe618 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -605,13 +605,16 @@ static void mxs_auart_tx_chars(struct mxs_auart_port *s) return; } - pending = uart_port_tx(&s->port, ch, + pending = uart_port_tx_flags(&s->port, ch, UART_TX_NOSTOP, !(mxs_read(s, REG_STAT) & AUART_STAT_TXFF), mxs_write(ch, s, REG_DATA)); if (pending) mxs_set(AUART_INTR_TXIEN, s, REG_INTR); else mxs_clr(AUART_INTR_TXIEN, s, REG_INTR); + + if (uart_tx_stopped(&s->port)) + mxs_auart_stop_tx(&s->port); } static void mxs_auart_rx_char(struct mxs_auart_port *s) diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 029d017fc1b6..d77b25b79ae3 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -3057,7 +3057,7 @@ bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd) */ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag) { - u32 mask = 1U << task_tag; + u32 mask; unsigned long flags; int err; @@ -3075,6 +3075,8 @@ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag) return 0; } + mask = 1U << task_tag; + /* clear outstanding transaction before retry */ spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_utrl_clear(hba, mask); @@ -6352,7 +6354,6 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba) ufshcd_hold(hba); if (!ufshcd_is_clkgating_allowed(hba)) ufshcd_setup_clocks(hba, true); - ufshcd_release(hba); pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM; ufshcd_vops_resume(hba, pm_op); } else { diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index aeb2c1f4c7c7..25dc0599345e 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -376,7 +376,6 @@ /* Global HWPARAMS4 Register */ #define DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(n) (((n) & (0x0f << 13)) >> 13) #define DWC3_MAX_HIBER_SCRATCHBUFS 15 -#define DWC3_EXT_BUFF_CONTROL BIT(21) /* Global HWPARAMS6 Register */ #define DWC3_GHWPARAMS6_BCSUPPORT BIT(14) diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 3a8c4c28573e..da0de9cc9fa3 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -652,12 +652,6 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action) params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1); } - if (dep->endpoint.fifo_mode) { - if (!(dwc->hwparams.hwparams4 & DWC3_EXT_BUFF_CONTROL)) - return -EINVAL; - params.param1 |= DWC3_DEPCFG_EBC_HWO_NOWB | DWC3_DEPCFG_USE_EBC; - } - return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, ¶ms); } diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h index 6d91a3f22e38..d73e735e4081 100644 --- a/drivers/usb/dwc3/gadget.h +++ b/drivers/usb/dwc3/gadget.h @@ -26,8 +26,6 @@ struct dwc3; #define DWC3_DEPCFG_XFER_NOT_READY_EN BIT(10) #define DWC3_DEPCFG_FIFO_ERROR_EN BIT(11) #define DWC3_DEPCFG_STREAM_EVENT_EN BIT(13) -#define DWC3_DEPCFG_EBC_HWO_NOWB BIT(14) -#define DWC3_DEPCFG_USE_EBC BIT(15) #define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16) #define DWC3_DEPCFG_STREAM_CAPABLE BIT(24) #define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25) diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index b8cfea7812d6..3b9f080109d7 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -923,8 +923,8 @@ static void shutdown_pirq(struct irq_data *data) return; do_mask(info, EVT_MASK_REASON_EXPLICIT); - xen_evtchn_close(evtchn); xen_irq_info_cleanup(info); + xen_evtchn_close(evtchn); } static void enable_pirq(struct irq_data *data) @@ -956,6 +956,7 @@ EXPORT_SYMBOL_GPL(xen_irq_from_gsi); static void __unbind_from_irq(struct irq_info *info, unsigned int irq) { evtchn_port_t evtchn; + bool close_evtchn = false; if (!info) { xen_irq_free_desc(irq); @@ -975,7 +976,7 @@ static void __unbind_from_irq(struct irq_info *info, unsigned int irq) struct xenbus_device *dev; if (!info->is_static) - xen_evtchn_close(evtchn); + close_evtchn = true; switch (info->type) { case IRQT_VIRQ: @@ -995,6 +996,9 @@ static void __unbind_from_irq(struct irq_info *info, unsigned int irq) } xen_irq_info_cleanup(info); + + if (close_evtchn) + xen_evtchn_close(evtchn); } xen_free_irq(info); diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c index 26ffb8755ffb..f93f73ecefee 100644 --- a/drivers/xen/gntalloc.c +++ b/drivers/xen/gntalloc.c @@ -317,7 +317,7 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv, rc = -EFAULT; goto out_free; } - if (copy_to_user(arg->gref_ids, gref_ids, + if (copy_to_user(arg->gref_ids_flex, gref_ids, sizeof(gref_ids[0]) * op.count)) { rc = -EFAULT; goto out_free; diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c index 508655273145..c63f317e3df3 100644 --- a/drivers/xen/pcpu.c +++ b/drivers/xen/pcpu.c @@ -65,7 +65,7 @@ struct pcpu { uint32_t flags; }; -static struct bus_type xen_pcpu_subsys = { +static const struct bus_type xen_pcpu_subsys = { .name = "xen_cpu", .dev_name = "xen_cpu", }; diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 35b6e306026a..67dfa4778864 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -1223,18 +1223,13 @@ struct privcmd_kernel_ioreq *alloc_ioreq(struct privcmd_ioeventfd *ioeventfd) kioreq->ioreq = (struct ioreq *)(page_to_virt(pages[0])); mmap_write_unlock(mm); - size = sizeof(*ports) * kioreq->vcpus; - ports = kzalloc(size, GFP_KERNEL); - if (!ports) { - ret = -ENOMEM; + ports = memdup_array_user(u64_to_user_ptr(ioeventfd->ports), + kioreq->vcpus, sizeof(*ports)); + if (IS_ERR(ports)) { + ret = PTR_ERR(ports); goto error_kfree; } - if (copy_from_user(ports, u64_to_user_ptr(ioeventfd->ports), size)) { - ret = -EFAULT; - goto error_kfree_ports; - } - for (i = 0; i < kioreq->vcpus; i++) { kioreq->ports[i].vcpu = i; kioreq->ports[i].port = ports[i]; @@ -1256,7 +1251,7 @@ struct privcmd_kernel_ioreq *alloc_ioreq(struct privcmd_ioeventfd *ioeventfd) error_unbind: while (--i >= 0) unbind_from_irqhandler(irq_from_evtchn(ports[i]), &kioreq->ports[i]); -error_kfree_ports: + kfree(ports); error_kfree: kfree(kioreq); diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c index 8cd583db20b1..b293d7652f15 100644 --- a/drivers/xen/xen-balloon.c +++ b/drivers/xen/xen-balloon.c @@ -237,7 +237,7 @@ static const struct attribute_group *balloon_groups[] = { NULL }; -static struct bus_type balloon_subsys = { +static const struct bus_type balloon_subsys = { .name = BALLOON_CLASS_NAME, .dev_name = BALLOON_CLASS_NAME, }; diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 32835b4b9bc5..51b3124b0d56 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -116,14 +116,15 @@ EXPORT_SYMBOL_GPL(xenbus_strstate); * @dev: xenbus device * @path: path to watch * @watch: watch to register + * @will_handle: events queuing determine callback * @callback: callback to register * * Register a @watch on the given path, using the given xenbus_watch structure - * for storage, and the given @callback function as the callback. On success, - * the given @path will be saved as @watch->node, and remains the - * caller's to free. On error, @watch->node will - * be NULL, the device will switch to %XenbusStateClosing, and the error will - * be saved in the store. + * for storage, @will_handle function as the callback to determine if each + * event need to be queued, and the given @callback function as the callback. + * On success, the given @path will be saved as @watch->node, and remains the + * caller's to free. On error, @watch->node will be NULL, the device will + * switch to %XenbusStateClosing, and the error will be saved in the store. * * Returns: %0 on success or -errno on error */ @@ -158,11 +159,13 @@ EXPORT_SYMBOL_GPL(xenbus_watch_path); * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path * @dev: xenbus device * @watch: watch to register + * @will_handle: events queuing determine callback * @callback: callback to register * @pathfmt: format of path to watch * * Register a watch on the given @path, using the given xenbus_watch - * structure for storage, and the given @callback function as the + * structure for storage, @will_handle function as the callback to determine if + * each event need to be queued, and the given @callback function as the * callback. On success, the watched path (@path/@path2) will be saved * as @watch->node, and becomes the caller's to kfree(). * On error, watch->node will be NULL, so the caller has nothing to |