summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/adp/adp_drv.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_sdma_pkt_open.h70
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c43
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c7
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h114
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c15
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c55
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c3
-rw-r--r--drivers/gpu/drm/bridge/Kconfig13
-rw-r--r--drivers/gpu/drm/bridge/tda998x_drv.c2
-rw-r--r--drivers/gpu/drm/drm_gpusvm.c4
-rw-r--r--drivers/gpu/drm/drm_panic.c5
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs13
-rw-r--r--drivers/gpu/drm/drm_sysfs.c10
-rw-r--r--drivers/gpu/drm/drm_vblank.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c2
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h2
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c2
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c8
-rw-r--r--drivers/gpu/drm/i915/i915_iosf_mbi.h6
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c12
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c2
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c2
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_preempt.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c2
-rw-r--r--drivers/gpu/drm/xe/Kconfig2
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h4
-rw-r--r--drivers/gpu/drm/xe/xe_device.c17
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.c8
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_clock.c54
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c33
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c16
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.c31
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.h1
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c2
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c6
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules2
-rw-r--r--drivers/gpu/nova-core/driver.rs4
-rw-r--r--drivers/gpu/nova-core/regs.rs2
96 files changed, 632 insertions, 803 deletions
diff --git a/drivers/gpu/drm/adp/adp_drv.c b/drivers/gpu/drm/adp/adp_drv.c
index 0eeb9e5fab26..c98c647f981d 100644
--- a/drivers/gpu/drm/adp/adp_drv.c
+++ b/drivers/gpu/drm/adp/adp_drv.c
@@ -232,9 +232,9 @@ static struct drm_plane *adp_plane_new(struct adp_drv_private *adp)
ALL_CRTCS, &adp_plane_funcs,
plane_formats, ARRAY_SIZE(plane_formats),
NULL, DRM_PLANE_TYPE_PRIMARY, "plane");
- if (!plane) {
+ if (IS_ERR(plane)) {
drm_err(drm, "failed to allocate plane");
- return ERR_PTR(-ENOMEM);
+ return plane;
}
drm_plane_helper_add(plane, &adp_plane_helper_funcs);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index dc47f5fd4ea1..b4ad163f42a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -195,6 +195,10 @@ static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
{
const struct aca_bank_ops *bank_ops = handle->bank_ops;
+ /* Parse all deferred errors with UMC aca handle */
+ if (ACA_BANK_ERR_IS_DEFFERED(bank))
+ return handle->hwip == ACA_HWIP_TYPE_UMC;
+
if (!aca_bank_hwip_is_matched(bank, handle->hwip))
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
index 6b180f1b33fd..38c88897e1ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
@@ -80,14 +80,6 @@ struct ras_query_context;
(ACA_REG__STATUS__POISON((bank)->regs[ACA_REG_IDX_STATUS]) || \
ACA_REG__STATUS__DEFERRED((bank)->regs[ACA_REG_IDX_STATUS]))
-#define ACA_BANK_ERR_CE_DE_DECODE(bank) \
- (ACA_BANK_ERR_IS_DEFFERED(bank) ? ACA_ERROR_TYPE_DEFERRED : \
- ACA_ERROR_TYPE_CE)
-
-#define ACA_BANK_ERR_UE_DE_DECODE(bank) \
- (ACA_BANK_ERR_IS_DEFFERED(bank) ? ACA_ERROR_TYPE_DEFERRED : \
- ACA_ERROR_TYPE_UE)
-
enum aca_reg_idx {
ACA_REG_IDX_CTL = 0,
ACA_REG_IDX_STATUS = 1,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 75fcc521c171..00e96419fcda 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -447,6 +447,13 @@ success:
return true;
}
+static bool amdgpu_prefer_rom_resource(struct amdgpu_device *adev)
+{
+ struct resource *res = &adev->pdev->resource[PCI_ROM_RESOURCE];
+
+ return (res->flags & IORESOURCE_ROM_SHADOW);
+}
+
static bool amdgpu_get_bios_dgpu(struct amdgpu_device *adev)
{
if (amdgpu_atrm_get_bios(adev)) {
@@ -465,14 +472,27 @@ static bool amdgpu_get_bios_dgpu(struct amdgpu_device *adev)
goto success;
}
- if (amdgpu_read_platform_bios(adev)) {
- dev_info(adev->dev, "Fetched VBIOS from platform\n");
- goto success;
- }
+ if (amdgpu_prefer_rom_resource(adev)) {
+ if (amdgpu_read_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
+ goto success;
+ }
- if (amdgpu_read_bios(adev)) {
- dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
- goto success;
+ if (amdgpu_read_platform_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from platform\n");
+ goto success;
+ }
+
+ } else {
+ if (amdgpu_read_platform_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from platform\n");
+ goto success;
+ }
+
+ if (amdgpu_read_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
+ goto success;
+ }
}
if (amdgpu_read_bios_from_rom(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6ebf6179064b..a30111d2c3ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -246,7 +246,7 @@ static void amdgpu_device_attr_sysfs_fini(struct amdgpu_device *adev)
}
static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t ppos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -282,8 +282,8 @@ static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
return bytes_read;
}
-BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
- AMDGPU_SYS_REG_STATE_END);
+static const BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
+ AMDGPU_SYS_REG_STATE_END);
int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 23cfce5aa1fc..26bf896f1444 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1809,7 +1809,6 @@ static const u16 amdgpu_unsupported_pciidlist[] = {
};
static const struct pci_device_id pciidlist[] = {
-#ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
{0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
{0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@@ -1882,8 +1881,6 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
{0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
{0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */
{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
@@ -1966,7 +1963,6 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
-#endif
/* topaz */
{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
@@ -2313,14 +2309,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return -ENOTSUPP;
}
+ switch (flags & AMD_ASIC_MASK) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
#ifdef CONFIG_DRM_AMDGPU_SI
- if (!amdgpu_si_support) {
- switch (flags & AMD_ASIC_MASK) {
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- case CHIP_VERDE:
- case CHIP_OLAND:
- case CHIP_HAINAN:
+ if (!amdgpu_si_support) {
dev_info(&pdev->dev,
"SI support provided by radeon.\n");
dev_info(&pdev->dev,
@@ -2328,16 +2324,18 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
);
return -ENODEV;
}
- }
+ break;
+#else
+ dev_info(&pdev->dev, "amdgpu is built without SI support.\n");
+ return -ENODEV;
#endif
+ case CHIP_KAVERI:
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
#ifdef CONFIG_DRM_AMDGPU_CIK
- if (!amdgpu_cik_support) {
- switch (flags & AMD_ASIC_MASK) {
- case CHIP_KAVERI:
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- case CHIP_KABINI:
- case CHIP_MULLINS:
+ if (!amdgpu_cik_support) {
dev_info(&pdev->dev,
"CIK support provided by radeon.\n");
dev_info(&pdev->dev,
@@ -2345,8 +2343,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
);
return -ENODEV;
}
- }
+ break;
+#else
+ dev_info(&pdev->dev, "amdgpu is built without CIK support.\n");
+ return -ENODEV;
#endif
+ default:
+ break;
+ }
adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev);
if (IS_ERR(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 2f24a6aa13bf..5f5c00ace96b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -280,7 +280,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
} while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
- if (del_timer(&ring->fence_drv.fallback_timer) &&
+ if (timer_delete(&ring->fence_drv.fallback_timer) &&
seq != ring->fence_drv.sync_seq)
amdgpu_fence_schedule_fallback(ring);
@@ -618,7 +618,7 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
- del_timer_sync(&ring->fence_drv.fallback_timer);
+ timer_delete_sync(&ring->fence_drv.fallback_timer);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 85f774063f9b..fb212f0a1136 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -1239,7 +1239,7 @@ void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
return;
amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
- del_timer_sync(&ring->fence_drv.fallback_timer);
+ timer_delete_sync(&ring->fence_drv.fallback_timer);
amdgpu_ring_fini(ring);
kfree(ring);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index d54bb1377262..df5d5dbd7f0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -4025,7 +4025,7 @@ int is_psp_fw_valid(struct psp_bin_desc bin)
}
static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -4061,7 +4061,7 @@ static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
}
static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buffer,
+ const struct bin_attribute *bin_attr, char *buffer,
loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -4113,11 +4113,11 @@ rel_buf:
* Writing to this file will stage an IFWI for update. Reading from this file
* will trigger the update process.
*/
-static struct bin_attribute psp_vbflash_bin_attr = {
+static const struct bin_attribute psp_vbflash_bin_attr = {
.attr = {.name = "psp_vbflash", .mode = 0660},
.size = 0,
- .write = amdgpu_psp_vbflash_write,
- .read = amdgpu_psp_vbflash_read,
+ .write_new = amdgpu_psp_vbflash_write,
+ .read_new = amdgpu_psp_vbflash_read,
};
/**
@@ -4144,7 +4144,7 @@ static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
}
static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
-static struct bin_attribute *bin_flash_attrs[] = {
+static const struct bin_attribute *const bin_flash_attrs[] = {
&psp_vbflash_bin_attr,
NULL
};
@@ -4180,7 +4180,7 @@ static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
const struct attribute_group amdgpu_flash_attr_group = {
.attrs = flash_attrs,
- .bin_attrs = bin_flash_attrs,
+ .bin_attrs_new = bin_flash_attrs,
.is_bin_visible = amdgpu_bin_flash_attr_is_visible,
.is_visible = amdgpu_flash_attr_is_visible,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index bed2603ae4c4..443409d4f4b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -77,6 +77,7 @@ const char *ras_block_string[] = {
"jpeg",
"ih",
"mpio",
+ "mmsch",
};
const char *ras_mca_block_string[] = {
@@ -1733,7 +1734,7 @@ static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
*/
static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
- struct kobject *kobj, struct bin_attribute *attr,
+ struct kobject *kobj, const struct bin_attribute *attr,
char *buf, loff_t ppos, size_t count)
{
struct amdgpu_ras *con =
@@ -2068,8 +2069,8 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
/* debugfs end */
/* ras fs */
-static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
- amdgpu_ras_sysfs_badpages_read, NULL, 0);
+static const BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
+ amdgpu_ras_sysfs_badpages_read, NULL, 0);
static DEVICE_ATTR(features, S_IRUGO,
amdgpu_ras_sysfs_features_read, NULL);
static DEVICE_ATTR(version, 0444,
@@ -2091,7 +2092,7 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
&con->event_state_attr.attr,
NULL
};
- struct bin_attribute *bin_attrs[] = {
+ const struct bin_attribute *bin_attrs[] = {
NULL,
NULL,
};
@@ -2117,11 +2118,10 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
if (amdgpu_bad_page_threshold != 0) {
/* add bad_page_features entry */
- bin_attr_gpu_vram_bad_pages.private = NULL;
con->badpages_attr = bin_attr_gpu_vram_bad_pages;
+ sysfs_bin_attr_init(&con->badpages_attr);
bin_attrs[0] = &con->badpages_attr;
- group.bin_attrs = bin_attrs;
- sysfs_bin_attr_init(bin_attrs[0]);
+ group.bin_attrs_new = bin_attrs;
}
r = sysfs_create_group(&adev->dev->kobj, &group);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 764e9fa0a914..927d6bff734a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -98,6 +98,7 @@ enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__JPEG,
AMDGPU_RAS_BLOCK__IH,
AMDGPU_RAS_BLOCK__MPIO,
+ AMDGPU_RAS_BLOCK__MMSCH,
AMDGPU_RAS_BLOCK__LAST,
AMDGPU_RAS_BLOCK__ANY = -1
@@ -795,6 +796,12 @@ amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) {
return TA_RAS_BLOCK__VCN;
case AMDGPU_RAS_BLOCK__JPEG:
return TA_RAS_BLOCK__JPEG;
+ case AMDGPU_RAS_BLOCK__IH:
+ return TA_RAS_BLOCK__IH;
+ case AMDGPU_RAS_BLOCK__MPIO:
+ return TA_RAS_BLOCK__MPIO;
+ case AMDGPU_RAS_BLOCK__MMSCH:
+ return TA_RAS_BLOCK__MMSCH;
default:
WARN_ONCE(1, "RAS ERROR: unexpected block id %d\n", block);
return TA_RAS_BLOCK__UMC;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index d55c8b7fdb59..59acdbfe28d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -608,59 +608,17 @@ static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_ring *ring = file_inode(f)->i_private;
- volatile u32 *mqd;
- u32 *kbuf;
- int r, i;
- uint32_t value, result;
+ ssize_t bytes = min_t(ssize_t, ring->mqd_size - *pos, size);
+ void *from = ((u8 *)ring->mqd_ptr) + *pos;
- if (*pos & 3 || size & 3)
- return -EINVAL;
-
- kbuf = kmalloc(ring->mqd_size, GFP_KERNEL);
- if (!kbuf)
- return -ENOMEM;
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- goto err_free;
-
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
- if (r)
- goto err_unreserve;
-
- /*
- * Copy to local buffer to avoid put_user(), which might fault
- * and acquire mmap_sem, under reservation_ww_class_mutex.
- */
- for (i = 0; i < ring->mqd_size/sizeof(u32); i++)
- kbuf[i] = mqd[i];
+ if (*pos > ring->mqd_size)
+ return 0;
- amdgpu_bo_kunmap(ring->mqd_obj);
- amdgpu_bo_unreserve(ring->mqd_obj);
+ if (copy_to_user(buf, from, bytes))
+ return -EFAULT;
- result = 0;
- while (size) {
- if (*pos >= ring->mqd_size)
- break;
-
- value = kbuf[*pos/4];
- r = put_user(value, (uint32_t *)buf);
- if (r)
- goto err_free;
- buf += 4;
- result += 4;
- size -= 4;
- *pos += 4;
- }
-
- kfree(kbuf);
- return result;
-
-err_unreserve:
- amdgpu_bo_unreserve(ring->mqd_obj);
-err_free:
- kfree(kbuf);
- return r;
+ *pos += bytes;
+ return bytes;
}
static const struct file_operations amdgpu_debugfs_mqd_fops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
index 1c66da1c3fb4..03ed14663107 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
@@ -124,7 +124,7 @@ static void amdgpu_mux_resubmit_chunks(struct amdgpu_ring_mux *mux)
}
}
- del_timer(&mux->resubmit_timer);
+ timer_delete(&mux->resubmit_timer);
mux->s_resubmit = false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 6029a799074d..477424472bbe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -1172,7 +1172,7 @@ static int xgmi_v6_4_0_aca_bank_parser(struct aca_handle *handle, struct aca_ban
break;
case ACA_SMU_TYPE_CE:
count = ext_error_code == 6 ? count : 0ULL;
- bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank);
+ bank->aca_err_type = ACA_ERROR_TYPE_CE;
ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, count);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index 3c07517be09a..ae071985f26e 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -473,7 +473,8 @@ static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
break;
case AMDGPU_DPX_PARTITION_MODE:
num_xcp = 2;
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
+ nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
case AMDGPU_TPX_PARTITION_MODE:
num_xcp = 3;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 6d514efb0a6d..a63ce747863f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -6851,22 +6851,9 @@ static int gfx_v10_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
{
int r, i;
- struct amdgpu_ring *ring;
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- return r;
-
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v10_0_kgq_init_queue(ring, false);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v10_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false);
if (r)
return r;
}
@@ -7173,55 +7160,24 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring, bool restore)
static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring;
- int r;
-
- ring = &adev->gfx.kiq[0].ring;
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- return r;
-
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(ring->mqd_obj);
- return r;
- }
-
- gfx_v10_0_kiq_init_queue(ring);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- amdgpu_bo_unreserve(ring->mqd_obj);
+ gfx_v10_0_kiq_init_queue(&adev->gfx.kiq[0].ring);
return 0;
}
static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring = NULL;
- int r = 0, i;
+ int i, r;
gfx_v10_0_cp_compute_enable(adev, true);
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- goto done;
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v10_0_kcq_init_queue(ring, false);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v10_0_kcq_init_queue(&adev->gfx.compute_ring[i],
+ false);
if (r)
- goto done;
+ return r;
}
- r = amdgpu_gfx_enable_kcq(adev, 0);
-done:
- return r;
+ return amdgpu_gfx_enable_kcq(adev, 0);
}
static int gfx_v10_0_cp_resume(struct amdgpu_device *adev)
@@ -9579,20 +9535,9 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
if (r)
return r;
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0)) {
- DRM_ERROR("fail to resv mqd_obj\n");
- return r;
- }
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v10_0_kgq_init_queue(ring, true);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v10_0_kgq_init_queue(ring, true);
if (r) {
- DRM_ERROR("fail to unresv mqd_obj\n");
+ DRM_ERROR("fail to init kgq\n");
return r;
}
@@ -9649,20 +9594,9 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
return r;
}
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0)) {
- dev_err(adev->dev, "fail to resv mqd_obj\n");
- return r;
- }
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v10_0_kcq_init_queue(ring, true);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v10_0_kcq_init_queue(ring, true);
if (r) {
- dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ dev_err(adev->dev, "fail to init kcq\n");
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index d8772cd6db63..d57db42f9536 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -1581,7 +1581,7 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
- adev->gfx.mec.num_mec = 2;
+ adev->gfx.mec.num_mec = 1;
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 4;
break;
@@ -4115,22 +4115,9 @@ static int gfx_v11_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
{
int r, i;
- struct amdgpu_ring *ring;
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- return r;
-
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v11_0_kgq_init_queue(ring, false);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v11_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false);
if (r)
return r;
}
@@ -4452,57 +4439,24 @@ static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset)
static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring;
- int r;
-
- ring = &adev->gfx.kiq[0].ring;
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- return r;
-
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(ring->mqd_obj);
- return r;
- }
-
- gfx_v11_0_kiq_init_queue(ring);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- amdgpu_bo_unreserve(ring->mqd_obj);
- ring->sched.ready = true;
+ gfx_v11_0_kiq_init_queue(&adev->gfx.kiq[0].ring);
return 0;
}
static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring = NULL;
- int r = 0, i;
+ int i, r;
if (!amdgpu_async_gfx_ring)
gfx_v11_0_cp_compute_enable(adev, true);
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- goto done;
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v11_0_kcq_init_queue(ring, false);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v11_0_kcq_init_queue(&adev->gfx.compute_ring[i], false);
if (r)
- goto done;
+ return r;
}
- r = amdgpu_gfx_enable_kcq(adev, 0);
-done:
- return r;
+ return amdgpu_gfx_enable_kcq(adev, 0);
}
static int gfx_v11_0_cp_resume(struct amdgpu_device *adev)
@@ -6667,20 +6621,9 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
if (r)
return r;
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0)) {
- dev_err(adev->dev, "fail to resv mqd_obj\n");
- return r;
- }
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v11_0_kgq_init_queue(ring, true);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v11_0_kgq_init_queue(ring, true);
if (r) {
- dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ dev_err(adev->dev, "failed to init kgq\n");
return r;
}
@@ -6707,20 +6650,9 @@ static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0)) {
- dev_err(adev->dev, "fail to resv mqd_obj\n");
- return r;
- }
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v11_0_kcq_init_queue(ring, true);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v11_0_kcq_init_queue(ring, true);
if (r) {
- dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ dev_err(adev->dev, "fail to init kcq\n");
return r;
}
r = amdgpu_mes_map_legacy_queue(adev, ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index dceb5ad38862..e7b58e470292 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -1355,7 +1355,7 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
- adev->gfx.mec.num_mec = 2;
+ adev->gfx.mec.num_mec = 1;
adev->gfx.mec.num_pipe_per_mec = 2;
adev->gfx.mec.num_queue_per_pipe = 4;
break;
@@ -3001,37 +3001,19 @@ static int gfx_v12_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
static int gfx_v12_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
{
- int r, i;
- struct amdgpu_ring *ring;
+ int i, r;
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- goto done;
-
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v12_0_kgq_init_queue(ring, false);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v12_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false);
if (r)
- goto done;
+ return r;
}
r = amdgpu_gfx_enable_kgq(adev, 0);
if (r)
- goto done;
-
- r = gfx_v12_0_cp_gfx_start(adev);
- if (r)
- goto done;
+ return r;
-done:
- return r;
+ return gfx_v12_0_cp_gfx_start(adev);
}
static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
@@ -3344,57 +3326,25 @@ static int gfx_v12_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset)
static int gfx_v12_0_kiq_resume(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring;
- int r;
-
- ring = &adev->gfx.kiq[0].ring;
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- return r;
-
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(ring->mqd_obj);
- return r;
- }
-
- gfx_v12_0_kiq_init_queue(ring);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- amdgpu_bo_unreserve(ring->mqd_obj);
- ring->sched.ready = true;
+ gfx_v12_0_kiq_init_queue(&adev->gfx.kiq[0].ring);
+ adev->gfx.kiq[0].ring.sched.ready = true;
return 0;
}
static int gfx_v12_0_kcq_resume(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring = NULL;
- int r = 0, i;
+ int i, r;
if (!amdgpu_async_gfx_ring)
gfx_v12_0_cp_compute_enable(adev, true);
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- goto done;
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v12_0_kcq_init_queue(ring, false);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v12_0_kcq_init_queue(&adev->gfx.compute_ring[i], false);
if (r)
- goto done;
+ return r;
}
- r = amdgpu_gfx_enable_kcq(adev, 0);
-done:
- return r;
+ return amdgpu_gfx_enable_kcq(adev, 0);
}
static int gfx_v12_0_cp_resume(struct amdgpu_device *adev)
@@ -5224,20 +5174,9 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0)) {
- dev_err(adev->dev, "fail to resv mqd_obj\n");
- return r;
- }
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v12_0_kgq_init_queue(ring, true);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v12_0_kgq_init_queue(ring, true);
if (r) {
- DRM_ERROR("fail to unresv mqd_obj\n");
+ dev_err(adev->dev, "failed to init kgq\n");
return r;
}
@@ -5264,20 +5203,9 @@ static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0)) {
- DRM_ERROR("fail to resv mqd_obj\n");
- return r;
- }
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v12_0_kcq_init_queue(ring, true);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v12_0_kcq_init_queue(ring, true);
if (r) {
- DRM_ERROR("fail to unresv mqd_obj\n");
+ dev_err(adev->dev, "failed to init kcq\n");
return r;
}
r = amdgpu_mes_map_legacy_queue(adev, ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index d116a2e2f469..bfedd487efc5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4683,60 +4683,25 @@ static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev)
static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring;
- int r;
-
- ring = &adev->gfx.kiq[0].ring;
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- return r;
-
- r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
- if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(ring->mqd_obj);
- return r;
- }
-
- gfx_v8_0_kiq_init_queue(ring);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- amdgpu_bo_unreserve(ring->mqd_obj);
+ gfx_v8_0_kiq_init_queue(&adev->gfx.kiq[0].ring);
return 0;
}
static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring = NULL;
- int r = 0, i;
+ int i, r;
gfx_v8_0_cp_compute_enable(adev, true);
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- goto done;
- r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
- if (!r) {
- r = gfx_v8_0_kcq_init_queue(ring);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v8_0_kcq_init_queue(&adev->gfx.compute_ring[i]);
if (r)
- goto done;
+ return r;
}
gfx_v8_0_set_mec_doorbell_range(adev);
- r = gfx_v8_0_kiq_kcq_enable(adev);
- if (r)
- goto done;
-
-done:
- return r;
+ return gfx_v8_0_kiq_kcq_enable(adev);
}
static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index d345285ea885..d7db4cb907ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1269,6 +1269,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
adev->gfx.mec_fw_write_wait = false;
if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) &&
+ (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) &&
((adev->gfx.mec_fw_version < 0x000001a5) ||
(adev->gfx.mec_feature_version < 46) ||
(adev->gfx.pfp_fw_version < 0x000000b7) ||
@@ -3890,55 +3891,23 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring, bool restore)
static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring;
- int r;
-
- ring = &adev->gfx.kiq[0].ring;
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- return r;
-
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(ring->mqd_obj);
- return r;
- }
-
- gfx_v9_0_kiq_init_queue(ring);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- amdgpu_bo_unreserve(ring->mqd_obj);
+ gfx_v9_0_kiq_init_queue(&adev->gfx.kiq[0].ring);
return 0;
}
static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring = NULL;
- int r = 0, i;
+ int i, r;
gfx_v9_0_cp_compute_enable(adev, true);
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- goto done;
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v9_0_kcq_init_queue(ring, false);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v9_0_kcq_init_queue(&adev->gfx.compute_ring[i], false);
if (r)
- goto done;
+ return r;
}
- r = amdgpu_gfx_enable_kcq(adev, 0);
-done:
- return r;
+ return amdgpu_gfx_enable_kcq(adev, 0);
}
static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
@@ -7319,20 +7288,9 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
return r;
}
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0)){
- dev_err(adev->dev, "fail to resv mqd_obj\n");
- return r;
- }
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v9_0_kcq_init_queue(ring, true);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v9_0_kcq_init_queue(ring, true);
if (r) {
- dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ dev_err(adev->dev, "fail to init kcq\n");
return r;
}
spin_lock_irqsave(&kiq->ring_lock, flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 736398b0d16d..53fbf6ca7cdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -867,13 +867,12 @@ static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle,
switch (type) {
case ACA_SMU_TYPE_UE:
- bank->aca_err_type = ACA_BANK_ERR_UE_DE_DECODE(bank);
+ bank->aca_err_type = ACA_ERROR_TYPE_UE;
ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 1ULL);
break;
case ACA_SMU_TYPE_CE:
- bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank);
- ret = aca_error_cache_log_bank_error(handle, &info,
- bank->aca_err_type,
+ bank->aca_err_type = ACA_ERROR_TYPE_CE;
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
ACA_REG__MISC0__ERRCNT(misc0));
break;
default:
@@ -2168,55 +2167,27 @@ static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_
static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id)
{
- struct amdgpu_ring *ring;
- int r;
-
- ring = &adev->gfx.kiq[xcc_id].ring;
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- return r;
-
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(ring->mqd_obj);
- return r;
- }
-
- gfx_v9_4_3_xcc_kiq_init_queue(ring, xcc_id);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- amdgpu_bo_unreserve(ring->mqd_obj);
+ gfx_v9_4_3_xcc_kiq_init_queue(&adev->gfx.kiq[xcc_id].ring, xcc_id);
return 0;
}
static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
{
- struct amdgpu_ring *ring = NULL;
- int r = 0, i;
+ struct amdgpu_ring *ring;
+ int i, r;
gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id);
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- goto done;
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ ring = &adev->gfx.compute_ring[i + xcc_id *
+ adev->gfx.num_compute_rings];
+
+ r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
if (r)
- goto done;
+ return r;
}
- r = amdgpu_gfx_enable_kcq(adev, xcc_id);
-done:
- return r;
+ return amdgpu_gfx_enable_kcq(adev, xcc_id);
}
static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id)
@@ -3588,20 +3559,9 @@ pipe_reset:
return r;
}
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0)){
- dev_err(adev->dev, "fail to resv mqd_obj\n");
- return r;
- }
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
if (r) {
- dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ dev_err(adev->dev, "fail to init kcq\n");
return r;
}
spin_lock_irqsave(&kiq->ring_lock, flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index 5598a35f72af..a8ccae361ec7 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -1328,7 +1328,7 @@ static int jpeg_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_ban
1ULL);
break;
case ACA_SMU_TYPE_CE:
- bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank);
+ bank->aca_err_type = ACA_ERROR_TYPE_CE;
ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
ACA_REG__MISC0__ERRCNT(misc0));
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index a54e7b929295..84cde1239ee4 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -751,7 +751,7 @@ static int mmhub_v1_8_aca_bank_parser(struct aca_handle *handle, struct aca_bank
1ULL);
break;
case ACA_SMU_TYPE_CE:
- bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank);
+ bank->aca_err_type = ACA_ERROR_TYPE_CE;
ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
ACA_REG__MISC0__ERRCNT(misc0));
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index dc94d58d33a6..688a720bbbbd 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -31,7 +31,6 @@
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"
#include "amdgpu_reset.h"
-#include "gc/gc_9_0_sh_mask.h"
#include "sdma/sdma_4_4_2_offset.h"
#include "sdma/sdma_4_4_2_sh_mask.h"
@@ -1291,71 +1290,21 @@ static void sdma_v4_4_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
seq, 0xffffffff, 4);
}
-/*
- * sdma_v4_4_2_get_invalidate_req - Construct the VM_INVALIDATE_ENG0_REQ register value
- * @vmid: The VMID to invalidate
- * @flush_type: The type of flush (0 = legacy, 1 = lightweight, 2 = heavyweight)
- *
- * This function constructs the VM_INVALIDATE_ENG0_REQ register value for the specified VMID
- * and flush type. It ensures that all relevant page table cache levels (L1 PTEs, L2 PTEs, and
- * L2 PDEs) are invalidated.
- */
-static uint32_t sdma_v4_4_2_get_invalidate_req(unsigned int vmid,
- uint32_t flush_type)
-{
- u32 req = 0;
-
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
- PER_VMID_INVALIDATE_REQ, 1 << vmid);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
- CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
- return req;
-}
-
-/*
- * sdma_v4_4_2_ring_emit_vm_flush - Emit VM flush commands for SDMA
- * @ring: The SDMA ring
- * @vmid: The VMID to flush
- * @pd_addr: The page directory address
+/**
+ * sdma_v4_4_2_ring_emit_vm_flush - vm flush using sDMA
*
- * This function emits the necessary register writes and waits to perform a VM flush for the
- * specified VMID. It updates the PTB address registers and issues a VM invalidation request
- * using the specified VM invalidation engine.
+ * @ring: amdgpu_ring pointer
+ * @vmid: vmid number to use
+ * @pd_addr: address
+ *
+ * Update the page table base and flush the VM TLB
+ * using sDMA.
*/
static void sdma_v4_4_2_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned int vmid, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
- struct amdgpu_device *adev = ring->adev;
- uint32_t req = sdma_v4_4_2_get_invalidate_req(vmid, 0);
- unsigned int eng = ring->vm_inv_eng;
- struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub];
-
- amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
- (hub->ctx_addr_distance * vmid),
- lower_32_bits(pd_addr));
-
- amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
- (hub->ctx_addr_distance * vmid),
- upper_32_bits(pd_addr));
- /*
- * Construct and emit the VM invalidation packet
- */
- amdgpu_ring_write(ring,
- SDMA_PKT_VM_INVALIDATION_HEADER_OP(SDMA_OP_VM_INVALIDATE) |
- SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(SDMA_SUBOP_VM_INVALIDATE) |
- SDMA_PKT_VM_INVALIDATION_HEADER_XCC0_ENG_ID(0x1f) |
- SDMA_PKT_VM_INVALIDATION_HEADER_XCC1_ENG_ID(0x1f) |
- SDMA_PKT_VM_INVALIDATION_HEADER_MMHUB_ENG_ID(eng));
- amdgpu_ring_write(ring, SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_INVALIDATEREQ(req));
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(BIT(vmid)));
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
}
static void sdma_v4_4_2_ring_emit_wreg(struct amdgpu_ring *ring,
@@ -2177,7 +2126,8 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
3 + /* hdp invalidate */
6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */
/* sdma_v4_4_2_ring_emit_vm_flush */
- 4 + 2 * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */
.emit_ib = sdma_v4_4_2_ring_emit_ib,
@@ -2209,7 +2159,8 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
3 + /* hdp invalidate */
6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */
/* sdma_v4_4_2_ring_emit_vm_flush */
- 4 + 2 * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */
.emit_ib = sdma_v4_4_2_ring_emit_ib,
@@ -2595,7 +2546,7 @@ static int sdma_v4_4_2_aca_bank_parser(struct aca_handle *handle, struct aca_ban
1ULL);
break;
case ACA_SMU_TYPE_CE:
- bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank);
+ bank->aca_err_type = ACA_ERROR_TYPE_CE;
ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
ACA_REG__MISC0__ERRCNT(misc0));
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
index a3b5fda22432..8a3f326474e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
@@ -92,6 +92,9 @@ enum ta_ras_block {
TA_RAS_BLOCK__MCA,
TA_RAS_BLOCK__VCN,
TA_RAS_BLOCK__JPEG,
+ TA_RAS_BLOCK__IH,
+ TA_RAS_BLOCK__MPIO,
+ TA_RAS_BLOCK__MMSCH,
TA_NUM_BLOCK_MAX
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index 74f57b2d30a5..0e404c074975 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -85,7 +85,8 @@ bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_sta
return (amdgpu_ras_is_poison_mode_supported(adev) &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1));
+ ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1) ||
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Poison) == 1)));
}
bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 7446ecc55714..3e176b4b7c69 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -1965,7 +1965,7 @@ static int vcn_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank
1ULL);
break;
case ACA_SMU_TYPE_CE:
- bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank);
+ bank->aca_err_type = ACA_ERROR_TYPE_CE;
ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
ACA_REG__MISC0__ERRCNT(misc0));
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/vega10_sdma_pkt_open.h
index 3ca8a417c6d8..8de4ccce5e38 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_sdma_pkt_open.h
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_sdma_pkt_open.h
@@ -64,9 +64,6 @@
#define HEADER_BARRIER 5
#define SDMA_OP_AQL_COPY 0
#define SDMA_OP_AQL_BARRIER_OR 0
-/* vm invalidation is only available for GC9.4.3/GC9.4.4/GC9.5.0 */
-#define SDMA_OP_VM_INVALIDATE 8
-#define SDMA_SUBOP_VM_INVALIDATE 4
/*define for op field*/
#define SDMA_PKT_HEADER_op_offset 0
@@ -3334,72 +3331,5 @@
#define SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_completion_signal_63_32_shift 0
#define SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_COMPLETION_SIGNAL_63_32(x) (((x) & SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_completion_signal_63_32_mask) << SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_completion_signal_63_32_shift)
-/*
-** Definitions for SDMA_PKT_VM_INVALIDATION packet
-*/
-
-/*define for HEADER word*/
-/*define for op field*/
-#define SDMA_PKT_VM_INVALIDATION_HEADER_op_offset 0
-#define SDMA_PKT_VM_INVALIDATION_HEADER_op_mask 0x000000FF
-#define SDMA_PKT_VM_INVALIDATION_HEADER_op_shift 0
-#define SDMA_PKT_VM_INVALIDATION_HEADER_OP(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_op_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_op_shift)
-
-/*define for sub_op field*/
-#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_offset 0
-#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_mask 0x000000FF
-#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_shift 8
-#define SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_shift)
-
-/*define for xcc0_eng_id field*/
-#define SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_offset 0
-#define SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_mask 0x0000001F
-#define SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_shift 16
-#define SDMA_PKT_VM_INVALIDATION_HEADER_XCC0_ENG_ID(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_shift)
-
-/*define for xcc1_eng_id field*/
-#define SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_offset 0
-#define SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_mask 0x0000001F
-#define SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_shift 21
-#define SDMA_PKT_VM_INVALIDATION_HEADER_XCC1_ENG_ID(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_shift)
-
-/*define for mmhub_eng_id field*/
-#define SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_offset 0
-#define SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_mask 0x0000001F
-#define SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_shift 26
-#define SDMA_PKT_VM_INVALIDATION_HEADER_MMHUB_ENG_ID(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_shift)
-
-/*define for INVALIDATEREQ word*/
-/*define for invalidatereq field*/
-#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_offset 1
-#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_mask 0xFFFFFFFF
-#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_shift 0
-#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_INVALIDATEREQ(x) ((x & SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_mask) << SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_shift)
-
-/*define for ADDRESSRANGELO word*/
-/*define for addressrangelo field*/
-#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_offset 2
-#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_mask 0xFFFFFFFF
-#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_shift 0
-#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_ADDRESSRANGELO(x) ((x & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_shift)
-
-/*define for ADDRESSRANGEHI word*/
-/*define for invalidateack field*/
-#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_offset 3
-#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_mask 0x0000FFFF
-#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_shift 0
-#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(x) ((x & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_shift)
-
-/*define for addressrangehi field*/
-#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_offset 3
-#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_mask 0x0000001F
-#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_shift 16
-#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_ADDRESSRANGEHI(x) ((x & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_shift)
-
-/*define for reserved field*/
-#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_offset 3
-#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_mask 0x000001FF
-#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_shift 23
-#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_RESERVED(x) ((x & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_shift)
#endif /* __SDMA_PKT_OPEN_H_ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index bae83a129b5f..d0d8ad5368c3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8707,14 +8707,39 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
int offdelay;
if (acrtc_state) {
- if (amdgpu_ip_version(adev, DCE_HWIP, 0) <
- IP_VERSION(3, 5, 0) ||
- acrtc_state->stream->link->psr_settings.psr_version <
- DC_PSR_VERSION_UNSUPPORTED ||
- !(adev->flags & AMD_IS_APU)) {
- timing = &acrtc_state->stream->timing;
-
- /* at least 2 frames */
+ timing = &acrtc_state->stream->timing;
+
+ /*
+ * Depending on when the HW latching event of double-buffered
+ * registers happen relative to the PSR SDP deadline, and how
+ * bad the Panel clock has drifted since the last ALPM off
+ * event, there can be up to 3 frames of delay between sending
+ * the PSR exit cmd to DMUB fw, and when the panel starts
+ * displaying live frames.
+ *
+ * We can set:
+ *
+ * 20/100 * offdelay_ms = 3_frames_ms
+ * => offdelay_ms = 5 * 3_frames_ms
+ *
+ * This ensures that `3_frames_ms` will only be experienced as a
+ * 20% delay on top how long the display has been static, and
+ * thus make the delay less perceivable.
+ */
+ if (acrtc_state->stream->link->psr_settings.psr_version <
+ DC_PSR_VERSION_UNSUPPORTED) {
+ offdelay = DIV64_U64_ROUND_UP((u64)5 * 3 * 10 *
+ timing->v_total *
+ timing->h_total,
+ timing->pix_clk_100hz);
+ config.offdelay_ms = offdelay ?: 30;
+ } else if (amdgpu_ip_version(adev, DCE_HWIP, 0) <
+ IP_VERSION(3, 5, 0) ||
+ !(adev->flags & AMD_IS_APU)) {
+ /*
+ * Older HW and DGPU have issues with instant off;
+ * use a 2 frame offdelay.
+ */
offdelay = DIV64_U64_ROUND_UP((u64)20 *
timing->v_total *
timing->h_total,
@@ -8722,6 +8747,8 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
config.offdelay_ms = offdelay ?: 30;
} else {
+ /* offdelay_ms = 0 will never disable vblank */
+ config.offdelay_ms = 1;
config.disable_immediate = true;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index a3e93b2891f0..5198a079b463 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -597,7 +597,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
* incorrect/corrupted and we should correct our SRM by getting it from PSP
*/
static ssize_t srm_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buffer,
+ const struct bin_attribute *bin_attr, char *buffer,
loff_t pos, size_t count)
{
struct hdcp_workqueue *work;
@@ -621,7 +621,7 @@ static ssize_t srm_data_write(struct file *filp, struct kobject *kobj,
}
static ssize_t srm_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buffer,
+ const struct bin_attribute *bin_attr, char *buffer,
loff_t pos, size_t count)
{
struct hdcp_workqueue *work;
@@ -681,8 +681,8 @@ ret:
static const struct bin_attribute data_attr = {
.attr = {.name = "hdcp_srm", .mode = 0664},
.size = PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, /* Limit SRM size */
- .write = srm_data_write,
- .read = srm_data_read,
+ .write_new = srm_data_write,
+ .read_new = srm_data_read,
};
struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index 70c39df62533..2061d43b92e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -590,11 +590,11 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
p->out_states->state_array[i].dtbclk_mhz = max_dtbclk_mhz;
p->out_states->state_array[i].phyclk_mhz = max_phyclk_mhz;
+ p->out_states->state_array[i].dscclk_mhz = max_dispclk_mhz / 3.0;
p->out_states->state_array[i].phyclk_mhz = max_phyclk_mhz;
p->out_states->state_array[i].dtbclk_mhz = max_dtbclk_mhz;
/* Dependent states. */
- p->out_states->state_array[i].dscclk_mhz = p->in_states->state_array[i].dscclk_mhz;
p->out_states->state_array[i].dram_speed_mts = p->in_states->state_array[i].dram_speed_mts;
p->out_states->state_array[i].fabricclk_mhz = p->in_states->state_array[i].fabricclk_mhz;
p->out_states->state_array[i].socclk_mhz = p->in_states->state_array[i].socclk_mhz;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index 926c08e790c1..846c9c51f2d9 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -3033,7 +3033,11 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
phyd32clk = get_phyd32clk_src(link);
- dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
+ if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) {
+ dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
+ } else {
+ dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
+ }
} else {
if (dccg->funcs->enable_symclk_se)
dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 8f5da0ded850..5489f3d431f6 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -936,8 +936,11 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx)
if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) {
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
dccg->funcs->set_dpstreamclk(dccg, DPREFCLK, tg->inst, dp_hpo_inst);
-
- dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
+ if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) {
+ dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
+ } else {
+ dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
+ }
} else {
dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,
link_enc->transmitter - TRANSMITTER_UNIPHY_A);
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 9189dcb65188..2a9606118d89 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -341,6 +341,7 @@ enum pp_policy_soc_pstate {
#define MAX_CLKS 4
#define NUM_VCN 4
#define NUM_JPEG_ENG 32
+#define NUM_JPEG_ENG_V1 40
#define MAX_XCC 8
#define NUM_XCP 8
struct seq_file;
@@ -376,6 +377,20 @@ struct amdgpu_xcp_metrics_v1_1 {
uint64_t gfx_below_host_limit_acc[MAX_XCC];
};
+struct amdgpu_xcp_metrics_v1_2 {
+ /* Utilization Instantaneous (%) */
+ uint32_t gfx_busy_inst[MAX_XCC];
+ uint16_t jpeg_busy[NUM_JPEG_ENG_V1];
+ uint16_t vcn_busy[NUM_VCN];
+ /* Utilization Accumulated (%) */
+ uint64_t gfx_busy_acc[MAX_XCC];
+ /* Total App Clock Counter Accumulated */
+ uint64_t gfx_below_host_limit_ppt_acc[MAX_XCC];
+ uint64_t gfx_below_host_limit_thm_acc[MAX_XCC];
+ uint64_t gfx_low_utilization_acc[MAX_XCC];
+ uint64_t gfx_below_host_limit_total_acc[MAX_XCC];
+};
+
struct amd_pm_funcs {
/* export for dpm on ci and si */
int (*pre_set_power_state)(void *handle);
@@ -1090,6 +1105,105 @@ struct gpu_metrics_v1_7 {
uint32_t pcie_lc_perf_other_end_recovery;
};
+struct gpu_metrics_v1_8 {
+ struct metrics_table_header common_header;
+
+ /* Temperature (Celsius) */
+ uint16_t temperature_hotspot;
+ uint16_t temperature_mem;
+ uint16_t temperature_vrsoc;
+
+ /* Power (Watts) */
+ uint16_t curr_socket_power;
+
+ /* Utilization (%) */
+ uint16_t average_gfx_activity;
+ uint16_t average_umc_activity; // memory controller
+
+ /* VRAM max bandwidthi (in GB/sec) at max memory clock */
+ uint64_t mem_max_bandwidth;
+
+ /* Energy (15.259uJ (2^-16) units) */
+ uint64_t energy_accumulator;
+
+ /* Driver attached timestamp (in ns) */
+ uint64_t system_clock_counter;
+
+ /* Accumulation cycle counter */
+ uint32_t accumulation_counter;
+
+ /* Accumulated throttler residencies */
+ uint32_t prochot_residency_acc;
+ uint32_t ppt_residency_acc;
+ uint32_t socket_thm_residency_acc;
+ uint32_t vr_thm_residency_acc;
+ uint32_t hbm_thm_residency_acc;
+
+ /* Clock Lock Status. Each bit corresponds to clock instance */
+ uint32_t gfxclk_lock_status;
+
+ /* Link width (number of lanes) and speed (in 0.1 GT/s) */
+ uint16_t pcie_link_width;
+ uint16_t pcie_link_speed;
+
+ /* XGMI bus width and bitrate (in Gbps) */
+ uint16_t xgmi_link_width;
+ uint16_t xgmi_link_speed;
+
+ /* Utilization Accumulated (%) */
+ uint32_t gfx_activity_acc;
+ uint32_t mem_activity_acc;
+
+ /*PCIE accumulated bandwidth (GB/sec) */
+ uint64_t pcie_bandwidth_acc;
+
+ /*PCIE instantaneous bandwidth (GB/sec) */
+ uint64_t pcie_bandwidth_inst;
+
+ /* PCIE L0 to recovery state transition accumulated count */
+ uint64_t pcie_l0_to_recov_count_acc;
+
+ /* PCIE replay accumulated count */
+ uint64_t pcie_replay_count_acc;
+
+ /* PCIE replay rollover accumulated count */
+ uint64_t pcie_replay_rover_count_acc;
+
+ /* PCIE NAK sent accumulated count */
+ uint32_t pcie_nak_sent_count_acc;
+
+ /* PCIE NAK received accumulated count */
+ uint32_t pcie_nak_rcvd_count_acc;
+
+ /* XGMI accumulated data transfer size(KiloBytes) */
+ uint64_t xgmi_read_data_acc[NUM_XGMI_LINKS];
+ uint64_t xgmi_write_data_acc[NUM_XGMI_LINKS];
+
+ /* XGMI link status(active/inactive) */
+ uint16_t xgmi_link_status[NUM_XGMI_LINKS];
+
+ uint16_t padding;
+
+ /* PMFW attached timestamp (10ns resolution) */
+ uint64_t firmware_timestamp;
+
+ /* Current clocks (Mhz) */
+ uint16_t current_gfxclk[MAX_GFX_CLKS];
+ uint16_t current_socclk[MAX_CLKS];
+ uint16_t current_vclk0[MAX_CLKS];
+ uint16_t current_dclk0[MAX_CLKS];
+ uint16_t current_uclk;
+
+ /* Number of current partition */
+ uint16_t num_partition;
+
+ /* XCP metrics stats */
+ struct amdgpu_xcp_metrics_v1_2 xcp_stats[NUM_XCP];
+
+ /* PCIE other end recovery counter */
+ uint32_t pcie_lc_perf_other_end_recovery;
+};
+
/*
* gpu_metrics_v2_0 is not recommended as it's not naturally aligned.
* Use gpu_metrics_v2_1 or later instead.
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
index a8fc0fa44db6..ba5c1237fcfe 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
@@ -267,10 +267,10 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
(hwmgr->thermal_controller.fanInfo.
ucTachometerPulsesPerRevolution == 0) ||
- speed == 0 ||
+ (!speed || speed > UINT_MAX/8) ||
(speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
- return 0;
+ return -EINVAL;
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
index 379012494da5..56423aedf3fa 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
@@ -307,10 +307,10 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
int result = 0;
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
- speed == 0 ||
+ (!speed || speed > UINT_MAX/8) ||
(speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
- return -1;
+ return -EINVAL;
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
index a3331ffb2daf..1b1c88590156 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
@@ -191,7 +191,7 @@ int vega20_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
uint32_t tach_period, crystal_clock_freq;
int result = 0;
- if (!speed)
+ if (!speed || speed > UINT_MAX/8)
return -EINVAL;
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index f8ed45857878..d26f35119a12 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -127,7 +127,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
-#define SMU_METRICS_TABLE_VERSION 0xF
+#define SMU_METRICS_TABLE_VERSION 0x10
// Unified metrics table for smu_v13_0_6
typedef struct __attribute__((packed, aligned(4))) {
@@ -241,7 +241,10 @@ typedef struct __attribute__((packed, aligned(4))) {
uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated
//Total App Clock Counter
- uint64_t GfxclkBelowHostLimitAcc[8];
+ uint64_t GfxclkBelowHostLimitPptAcc[8];
+ uint64_t GfxclkBelowHostLimitThmAcc[8];
+ uint64_t GfxclkBelowHostLimitTotalAcc[8];
+ uint64_t GfxclkLowUtilizationAcc[8];
} MetricsTableV0_t;
// Metrics table for smu_v13_0_6 APUS
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 8aa61a9f7778..453952cdc353 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1267,6 +1267,9 @@ static int arcturus_set_fan_speed_rpm(struct smu_context *smu,
uint32_t crystal_clock_freq = 2500;
uint32_t tach_period;
+ if (!speed || speed > UINT_MAX/8)
+ return -EINVAL;
+
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
WREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT,
REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT),
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 0915d6377613..ba5a9012dbd5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -1226,7 +1226,7 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t tach_period;
int ret;
- if (!speed)
+ if (!speed || speed > UINT_MAX/8)
return -EINVAL;
ret = smu_v13_0_auto_fan_control(smu, 0);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 682646068000..c478b3be37af 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -109,7 +109,6 @@ enum smu_v13_0_6_caps {
SMU_CAP(OTHER_END_METRICS),
SMU_CAP(SET_UCLK_MAX),
SMU_CAP(PCIE_METRICS),
- SMU_CAP(HST_LIMIT_METRICS),
SMU_CAP(MCA_DEBUG_MODE),
SMU_CAP(PER_INST_METRICS),
SMU_CAP(CTF_LIMIT),
@@ -325,8 +324,6 @@ static void smu_v13_0_14_init_caps(struct smu_context *smu)
if (fw_ver >= 0x05550E00)
smu_v13_0_6_cap_set(smu, SMU_CAP(OTHER_END_METRICS));
- if (fw_ver >= 0x05551000)
- smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
if (fw_ver >= 0x05550B00)
smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
if (fw_ver >= 0x5551200)
@@ -342,7 +339,6 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu)
SMU_CAP(RMA_MSG),
SMU_CAP(ACA_SYND),
SMU_CAP(OTHER_END_METRICS),
- SMU_CAP(HST_LIMIT_METRICS),
SMU_CAP(PER_INST_METRICS) };
uint32_t fw_ver = smu->smc_fw_version;
@@ -387,8 +383,6 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG));
smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND));
- if (fw_ver >= 0x04556F00)
- smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
if (fw_ver >= 0x04556A00)
smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
} else {
@@ -408,8 +402,6 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG));
if (fw_ver < 0x00555600)
smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND));
- if (pgm == 0 && fw_ver >= 0x557900)
- smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
}
if (((pgm == 7) && (fw_ver >= 0x7550700)) ||
((pgm == 0) && (fw_ver >= 0x00557900)) ||
@@ -2674,13 +2666,6 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusyAcc,
version)[inst]);
-
- if (smu_v13_0_6_cap_supported(
- smu, SMU_CAP(HST_LIMIT_METRICS)))
- gpu_metrics->xcp_stats[i].gfx_below_host_limit_acc[idx] =
- SMUQ10_ROUND(GET_GPU_METRIC_FIELD
- (GfxclkBelowHostLimitAcc, version)
- [inst]);
idx++;
}
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index f7cfe1f35cae..82c2db972491 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -79,6 +79,7 @@
#define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8
#define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9
#define PP_OD_FEATURE_FAN_MINIMUM_PWM 10
+#define PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE 11
static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
@@ -1052,6 +1053,10 @@ static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu,
od_min_setting = overdrive_lowerlimits->FanMinimumPwm;
od_max_setting = overdrive_upperlimits->FanMinimumPwm;
break;
+ case PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE:
+ od_min_setting = overdrive_lowerlimits->FanZeroRpmEnable;
+ od_max_setting = overdrive_upperlimits->FanZeroRpmEnable;
+ break;
default:
od_min_setting = od_max_setting = INT_MAX;
break;
@@ -1330,6 +1335,24 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
min_value, max_value);
break;
+ case SMU_OD_FAN_ZERO_RPM_ENABLE:
+ if (!smu_v14_0_2_is_od_feature_supported(smu,
+ PP_OD_FEATURE_ZERO_FAN_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_ENABLE:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.FanZeroRpmEnable);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "ZERO_RPM_ENABLE: %u %u\n",
+ min_value, max_value);
+ break;
+
case SMU_OD_RANGE:
if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) &&
!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) &&
@@ -2270,7 +2293,9 @@ static void smu_v14_0_2_set_supported_od_feature_mask(struct smu_context *smu)
OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE |
OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET |
OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE |
- OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET;
+ OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET |
+ OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET;
}
static int smu_v14_0_2_get_overdrive_table(struct smu_context *smu,
@@ -2349,6 +2374,8 @@ static int smu_v14_0_2_set_default_od_settings(struct smu_context *smu)
user_od_table_bak.OverDriveTable.FanTargetTemperature;
user_od_table->OverDriveTable.FanMinimumPwm =
user_od_table_bak.OverDriveTable.FanMinimumPwm;
+ user_od_table->OverDriveTable.FanZeroRpmEnable =
+ user_od_table_bak.OverDriveTable.FanZeroRpmEnable;
}
smu_v14_0_2_set_supported_od_feature_mask(smu);
@@ -2396,6 +2423,11 @@ static int smu_v14_0_2_od_restore_table_single(struct smu_context *smu, long inp
od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
break;
+ case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE:
+ od_table->OverDriveTable.FanZeroRpmEnable =
+ boot_overdrive_table->OverDriveTable.FanZeroRpmEnable;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
+ break;
case PP_OD_EDIT_ACOUSTIC_LIMIT:
od_table->OverDriveTable.AcousticLimitRpmThreshold =
boot_overdrive_table->OverDriveTable.AcousticLimitRpmThreshold;
@@ -2678,6 +2710,27 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
break;
+ case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE:
+ if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) {
+ dev_warn(adev->dev, "Zero RPM setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "zero RPM enable setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanZeroRpmEnable = input[0];
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
+ break;
+
case PP_OD_RESTORE_DEFAULT_TABLE:
if (size == 1) {
ret = smu_v14_0_2_od_restore_table_single(smu, input[0]);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index d834d134ad2b..80eb1a03b3ca 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -1083,6 +1083,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
case METRICS_VERSION(1, 7):
structure_size = sizeof(struct gpu_metrics_v1_7);
break;
+ case METRICS_VERSION(1, 8):
+ structure_size = sizeof(struct gpu_metrics_v1_8);
+ break;
case METRICS_VERSION(2, 0):
structure_size = sizeof(struct gpu_metrics_v2_0);
break;
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index d20f1646dac2..09a1be234f71 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -91,12 +91,13 @@ config DRM_FSL_LDB
Support for i.MX8MP DPI-to-LVDS on-SoC encoder.
config DRM_I2C_NXP_TDA998X
- tristate "NXP Semiconductors TDA998X HDMI encoder"
- default m if DRM_TILCDC
- select CEC_CORE if CEC_NOTIFIER
- select SND_SOC_HDMI_CODEC if SND_SOC
- help
- Support for NXP Semiconductors TDA998X HDMI encoders.
+ tristate "NXP Semiconductors TDA998X HDMI encoder"
+ default m if DRM_TILCDC
+ select CEC_CORE if CEC_NOTIFIER
+ select DRM_KMS_HELPER
+ select SND_SOC_HDMI_CODEC if SND_SOC
+ help
+ Support for NXP Semiconductors TDA998X HDMI encoders.
config DRM_ITE_IT6263
tristate "ITE IT6263 LVDS/HDMI bridge"
diff --git a/drivers/gpu/drm/bridge/tda998x_drv.c b/drivers/gpu/drm/bridge/tda998x_drv.c
index ebc758c72891..20658258fb51 100644
--- a/drivers/gpu/drm/bridge/tda998x_drv.c
+++ b/drivers/gpu/drm/bridge/tda998x_drv.c
@@ -1763,7 +1763,7 @@ static void tda998x_destroy(struct device *dev)
if (priv->hdmi->irq)
free_irq(priv->hdmi->irq, priv);
- del_timer_sync(&priv->edid_delay_timer);
+ timer_delete_sync(&priv->edid_delay_timer);
cancel_work_sync(&priv->detect_work);
i2c_unregister_device(priv->cec);
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index 2451c816edd5..38431e8360e7 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -1416,13 +1416,13 @@ map_pages:
goto err_unmap;
}
zdd = page->zone_device_data;
- if (pagemap != page->pgmap) {
+ if (pagemap != page_pgmap(page)) {
if (i > 0) {
err = -EOPNOTSUPP;
goto err_unmap;
}
- pagemap = page->pgmap;
+ pagemap = page_pgmap(page);
dpagemap = zdd->devmem_allocation->dpagemap;
if (drm_WARN_ON(gpusvm->drm, !dpagemap)) {
/*
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
index ab42a2b1567d..b47ea25fdfaa 100644
--- a/drivers/gpu/drm/drm_panic.c
+++ b/drivers/gpu/drm/drm_panic.c
@@ -486,11 +486,6 @@ static void drm_panic_qr_exit(void)
stream.workspace = NULL;
}
-extern size_t drm_panic_qr_max_data_size(u8 version, size_t url_len);
-
-extern u8 drm_panic_qr_generate(const char *url, u8 *data, size_t data_len, size_t data_size,
- u8 *tmp, size_t tmp_size);
-
static int drm_panic_get_qr_code_url(u8 **qr_image)
{
struct kmsg_dump_iter iter;
diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
index 5d21f6d10bcb..f2a99681b998 100644
--- a/drivers/gpu/drm/drm_panic_qr.rs
+++ b/drivers/gpu/drm/drm_panic_qr.rs
@@ -27,7 +27,7 @@
//! * <https://github.com/erwanvivien/fast_qr>
//! * <https://github.com/bjguillot/qr>
-use kernel::str::CStr;
+use kernel::{prelude::*, str::CStr};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd)]
struct Version(usize);
@@ -891,7 +891,7 @@ impl QrImage<'_> {
/// * `tmp` must be valid for reading and writing for `tmp_size` bytes.
///
/// They must remain valid for the duration of the function call.
-#[no_mangle]
+#[export]
pub unsafe extern "C" fn drm_panic_qr_generate(
url: *const kernel::ffi::c_char,
data: *mut u8,
@@ -942,8 +942,13 @@ pub unsafe extern "C" fn drm_panic_qr_generate(
/// * If `url_len` > 0, remove the 2 segments header/length and also count the
/// conversion to numeric segments.
/// * If `url_len` = 0, only removes 3 bytes for 1 binary segment.
-#[no_mangle]
-pub extern "C" fn drm_panic_qr_max_data_size(version: u8, url_len: usize) -> usize {
+///
+/// # Safety
+///
+/// Always safe to call.
+// Required to be unsafe due to the `#[export]` annotation.
+#[export]
+pub unsafe extern "C" fn drm_panic_qr_max_data_size(version: u8, url_len: usize) -> usize {
#[expect(clippy::manual_range_contains)]
if version < 1 || version > 40 {
return 0;
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index fb3bbb6adcd1..60c1f26edb6f 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -261,7 +261,7 @@ static ssize_t enabled_show(struct device *device,
}
static ssize_t edid_show(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
+ const struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
struct device *connector_dev = kobj_to_dev(kobj);
@@ -315,21 +315,21 @@ static struct attribute *connector_dev_attrs[] = {
NULL
};
-static struct bin_attribute edid_attr = {
+static const struct bin_attribute edid_attr = {
.attr.name = "edid",
.attr.mode = 0444,
.size = 0,
- .read = edid_show,
+ .read_new = edid_show,
};
-static struct bin_attribute *connector_bin_attrs[] = {
+static const struct bin_attribute *const connector_bin_attrs[] = {
&edid_attr,
NULL
};
static const struct attribute_group connector_dev_group = {
.attrs = connector_dev_attrs,
- .bin_attrs = connector_bin_attrs,
+ .bin_attrs_new = connector_bin_attrs,
};
static const struct attribute_group *connector_dev_groups[] = {
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 94e45ed6869d..78958ddf8485 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -508,7 +508,7 @@ static void drm_vblank_init_release(struct drm_device *dev, void *ptr)
drm_core_check_feature(dev, DRIVER_MODESET));
drm_vblank_destroy_worker(vblank);
- del_timer_sync(&vblank->disable_timer);
+ timer_delete_sync(&vblank->disable_timer);
}
/**
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index fd388b1dbe68..08cf79a62025 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -427,7 +427,7 @@ static void vidi_unbind(struct device *dev, struct device *master, void *data)
{
struct vidi_context *ctx = dev_get_drvdata(dev);
- del_timer_sync(&ctx->timer);
+ timer_delete_sync(&ctx->timer);
}
static const struct component_ops vidi_component_ops = {
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index e163649816d5..77cfcf37ddd2 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -254,7 +254,7 @@ static int gud_usb_bulk(struct gud_device *gdrm, size_t len)
usb_sg_wait(&ctx.sgr);
- if (!del_timer_sync(&ctx.timer))
+ if (!timer_delete_sync(&ctx.timer))
ret = -ETIMEDOUT;
else if (ctx.sgr.status < 0)
ret = ctx.sgr.status;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h
index ca2c8c438f02..89bad3a2b01a 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.h
@@ -6,6 +6,8 @@
#ifndef __INTEL_FBDEV_H__
#define __INTEL_FBDEV_H__
+#include <linux/types.h>
+
struct drm_fb_helper;
struct drm_fb_helper_surface_size;
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 2d0de1c63308..621e97943542 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -2314,6 +2314,7 @@ cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state)
static int
dsc_prefill_latency(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
int linetime = DIV_ROUND_UP(1000 * crtc_state->hw.adjusted_mode.htotal,
@@ -2323,7 +2324,9 @@ dsc_prefill_latency(const struct intel_crtc_state *crtc_state)
crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ? 2 : 1;
u32 dsc_prefill_latency = 0;
- if (!crtc_state->dsc.compression_enable || !num_scaler_users)
+ if (!crtc_state->dsc.compression_enable ||
+ !num_scaler_users ||
+ num_scaler_users > crtc->num_scalers)
return dsc_prefill_latency;
dsc_prefill_latency = DIV_ROUND_UP(15 * linetime * chroma_downscaling_factor, 10);
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 4a80ffa1b962..03baa7fa0a27 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -2502,7 +2502,7 @@ static void execlists_irq_handler(struct intel_engine_cs *engine, u16 iir)
ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI));
ENGINE_TRACE(engine, "semaphore yield: %08x\n",
engine->execlists.yield);
- if (del_timer(&engine->execlists.timer))
+ if (timer_delete(&engine->execlists.timer))
tasklet = true;
}
@@ -3370,8 +3370,8 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
static void execlists_shutdown(struct intel_engine_cs *engine)
{
/* Synchronise with residual timers and any softirq they raise */
- del_timer_sync(&engine->execlists.timer);
- del_timer_sync(&engine->execlists.preempt);
+ timer_delete_sync(&engine->execlists.timer);
+ timer_delete_sync(&engine->execlists.preempt);
tasklet_kill(&engine->sched_engine->tasklet);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 2cfaedb04876..64e9317f58fb 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -161,7 +161,7 @@ static void rps_start_timer(struct intel_rps *rps)
static void rps_stop_timer(struct intel_rps *rps)
{
- del_timer_sync(&rps->timer);
+ timer_delete_sync(&rps->timer);
rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp);
cancel_work_sync(&rps->work);
}
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index c0637bf799a3..64315b714743 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -297,7 +297,7 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
struct i915_request *rq;
unsigned long flags;
- del_timer_sync(&mock->hw_delay);
+ timer_delete_sync(&mock->hw_delay);
spin_lock_irqsave(&engine->sched_engine->lock, flags);
@@ -432,7 +432,7 @@ void mock_engine_flush(struct intel_engine_cs *engine)
container_of(engine, typeof(*mock), base);
struct i915_request *request, *rn;
- del_timer_sync(&mock->hw_delay);
+ timer_delete_sync(&mock->hw_delay);
spin_lock_irq(&mock->hw_lock);
list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link)
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index d7717de17ecc..0454eb1814bb 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -1198,7 +1198,7 @@ static int live_timeslice_rewind(void *arg)
ENGINE_TRACE(engine, "forcing tasklet for rewind\n");
while (i915_request_is_active(rq[A2])) { /* semaphore yield! */
/* Wait for the timeslice to kick in */
- del_timer(&engine->execlists.timer);
+ timer_delete(&engine->execlists.timer);
tasklet_hi_schedule(&engine->sched_engine->tasklet);
intel_engine_flush_submission(engine);
}
@@ -2357,7 +2357,7 @@ static int __cancel_fail(struct live_preempt_cancel *arg)
/* force preempt reset [failure] */
while (!engine->execlists.pending[0])
intel_engine_flush_submission(engine);
- del_timer_sync(&engine->execlists.preempt);
+ timer_delete_sync(&engine->execlists.preempt);
intel_engine_flush_submission(engine);
cancel_reset_timeout(engine);
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
index 1bf7b88d9a9d..401bee030dbc 100644
--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -660,7 +660,7 @@ static int live_emit_pte_full_ring(void *arg)
out_rq:
i915_request_add(rq); /* GEM_BUG_ON(rq->reserved_space > ring->space)? */
- del_timer_sync(&st.timer);
+ timer_delete_sync(&st.timer);
destroy_timer_on_stack(&st.timer);
out_unpin:
intel_context_unpin(ce);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 819ab933bb10..a6613eed3398 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -2490,7 +2490,7 @@ void i915_gpu_error_debugfs_register(struct drm_i915_private *i915)
}
static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
@@ -2526,7 +2526,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
}
static ssize_t error_state_write(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *kdev = kobj_to_dev(kobj);
@@ -2542,8 +2542,8 @@ static const struct bin_attribute error_state_attr = {
.attr.name = "error",
.attr.mode = S_IRUSR | S_IWUSR,
.size = 0,
- .read = error_state_read,
- .write = error_state_write,
+ .read_new = error_state_read,
+ .write_new = error_state_write,
};
void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/i915_iosf_mbi.h b/drivers/gpu/drm/i915/i915_iosf_mbi.h
index 8f81b7603d37..317075d0da4e 100644
--- a/drivers/gpu/drm/i915/i915_iosf_mbi.h
+++ b/drivers/gpu/drm/i915/i915_iosf_mbi.h
@@ -31,12 +31,6 @@ iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(struct notifier_block *nb)
{
return 0;
}
-
-static inline
-int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb)
-{
- return 0;
-}
#endif
#endif /* __I915_IOSF_MBI_H__ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 8775beab9cb8..f936e8f1f129 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -60,7 +60,7 @@ static int l3_access_valid(struct drm_i915_private *i915, loff_t offset)
static ssize_t
i915_l3_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t count)
{
struct device *kdev = kobj_to_dev(kobj);
@@ -88,7 +88,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
static ssize_t
i915_l3_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t count)
{
struct device *kdev = kobj_to_dev(kobj);
@@ -140,8 +140,8 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
static const struct bin_attribute dpf_attrs = {
.attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
.size = GEN7_L3LOG_SIZE,
- .read = i915_l3_read,
- .write = i915_l3_write,
+ .read_new = i915_l3_read,
+ .write_new = i915_l3_write,
.mmap = NULL,
.private = (void *)0
};
@@ -149,8 +149,8 @@ static const struct bin_attribute dpf_attrs = {
static const struct bin_attribute dpf_attrs_1 = {
.attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
.size = GEN7_L3LOG_SIZE,
- .read = i915_l3_read,
- .write = i915_l3_write,
+ .read_new = i915_l3_read,
+ .write_new = i915_l3_write,
.mmap = NULL,
.private = (void *)1
};
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index 2576f8f6c0f6..b60c28fbd207 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -52,7 +52,7 @@ void cancel_timer(struct timer_list *t)
if (!timer_active(t))
return;
- del_timer(t);
+ timer_delete(t);
WRITE_ONCE(t->expires, 0);
}
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 87f246047312..07e81be4d392 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -163,7 +163,7 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
unsigned long flags;
if (!timeout) {
- if (del_timer_sync(&wf->timer))
+ if (timer_delete_sync(&wf->timer))
wakeref_auto_timeout(&wf->timer);
return;
}
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
index bf2752cc1e0b..d5ecc68155da 100644
--- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
@@ -74,7 +74,7 @@ void timed_fence_init(struct timed_fence *tf, unsigned long expires)
void timed_fence_fini(struct timed_fence *tf)
{
- if (del_timer_sync(&tf->timer))
+ if (timer_delete_sync(&tf->timer))
i915_sw_fence_commit(&tf->fence);
destroy_timer_on_stack(&tf->timer);
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index 2067c5b65c57..11ace5cebf4c 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -310,7 +310,7 @@ static bool lima_read_block(struct lima_block_reader *reader,
}
static ssize_t lima_error_state_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -336,7 +336,7 @@ static ssize_t lima_error_state_read(struct file *filp, struct kobject *kobj,
}
static ssize_t lima_error_state_write(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -362,8 +362,8 @@ static const struct bin_attribute lima_error_state_attr = {
.attr.name = "error",
.attr.mode = 0600,
.size = 0,
- .read = lima_error_state_read,
- .write = lima_error_state_write,
+ .read_new = lima_error_state_read,
+ .write_new = lima_error_state_write,
};
static int lima_pdev_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index ccdc57cef3ea..fed3307d3374 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -2847,7 +2847,7 @@ static void mtk_dp_remove(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
if (mtk_dp->data->bridge_type != DRM_MODE_CONNECTOR_eDP)
- del_timer_sync(&mtk_dp->debounce_timer);
+ timer_delete_sync(&mtk_dp->debounce_timer);
platform_device_unregister(mtk_dp->phy_dev);
if (mtk_dp->audio_pdev)
platform_device_unregister(mtk_dp->audio_pdev);
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 71dca78cd7a5..650e5bac225f 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1253,7 +1253,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
/* Turn off the hangcheck timer to keep it from bothering us */
- del_timer(&gpu->hangcheck_timer);
+ timer_delete(&gpu->hangcheck_timer);
kthread_queue_work(gpu->worker, &gpu->recover_work);
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 0469fea55010..36f72c43eae8 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -182,7 +182,7 @@ void a5xx_preempt_irq(struct msm_gpu *gpu)
return;
/* Delete the preemption watchdog timer */
- del_timer(&a5xx_gpu->preempt_timer);
+ timer_delete(&a5xx_gpu->preempt_timer);
/*
* The hardware should be setting CP_CONTEXT_SWITCH_CNTL to zero before
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 38c94915d4c9..c8711938a5f4 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -28,7 +28,7 @@ static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
gmu->hung = true;
/* Turn off the hangcheck timer while we are resetting */
- del_timer(&gpu->hangcheck_timer);
+ timer_delete(&gpu->hangcheck_timer);
/* Queue the GPU handler because we need to treat this as a recovery */
kthread_queue_work(gpu->worker, &gpu->recover_work);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 1820c167fcee..06465bc2d0b4 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1706,7 +1706,7 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE));
/* Turn off the hangcheck timer to keep it from bothering us */
- del_timer(&gpu->hangcheck_timer);
+ timer_delete(&gpu->hangcheck_timer);
kthread_queue_work(gpu->worker, &gpu->recover_work);
}
@@ -1726,7 +1726,7 @@ static void a7xx_sw_fuse_violation_irq(struct msm_gpu *gpu)
*/
if (status & (A7XX_CX_MISC_SW_FUSE_VALUE_RAYTRACING |
A7XX_CX_MISC_SW_FUSE_VALUE_LPAC)) {
- del_timer(&gpu->hangcheck_timer);
+ timer_delete(&gpu->hangcheck_timer);
kthread_queue_work(gpu->worker, &gpu->recover_work);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
index 2fd4e39f618f..9b5e27d2373c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
@@ -146,7 +146,7 @@ void a6xx_preempt_irq(struct msm_gpu *gpu)
return;
/* Delete the preemption watchdog timer */
- del_timer(&a6xx_gpu->preempt_timer);
+ timer_delete(&a6xx_gpu->preempt_timer);
/*
* The hardware should be setting the stop bit of CP_CONTEXT_SWITCH_CNTL
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 7156cda07b03..26db1f4b5fb9 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -292,7 +292,7 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
if (do_devcoredump) {
/* Turn off the hangcheck timer to keep it from bothering us */
- del_timer(&gpu->hangcheck_timer);
+ timer_delete(&gpu->hangcheck_timer);
gpu->fault_info.ttbr0 = info->ttbr0;
gpu->fault_info.iova = iova;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 284e69bb47c1..8610bbf2b87c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -1410,7 +1410,7 @@ static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc,
/* after phys waits for frame-done, should be no more frames pending */
if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
- del_timer_sync(&dpu_enc->frame_done_timer);
+ timer_delete_sync(&dpu_enc->frame_done_timer);
}
dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
@@ -1582,7 +1582,7 @@ void dpu_encoder_frame_done_callback(
if (!dpu_enc->frame_busy_mask[0]) {
atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
- del_timer(&dpu_enc->frame_done_timer);
+ timer_delete(&dpu_enc->frame_done_timer);
dpu_encoder_resource_control(drm_enc,
DPU_ENC_RC_EVENT_FRAME_DONE);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 1a072568cef6..61d0f411ef84 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -88,7 +88,8 @@ struct nouveau_dmem {
static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
{
- return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap);
+ return container_of(page_pgmap(page), struct nouveau_dmem_chunk,
+ pagemap);
}
static struct nouveau_drm *page_to_drm(struct page *page)
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 825c867eba7c..e12e2596ed84 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -610,10 +610,9 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
notifier_seq = mmu_interval_read_begin(&notifier->notifier);
mmap_read_lock(mm);
- ret = make_device_exclusive_range(mm, start, start + PAGE_SIZE,
- &page, drm->dev);
+ page = make_device_exclusive(mm, start, drm->dev, &folio);
mmap_read_unlock(mm);
- if (ret <= 0 || !page) {
+ if (IS_ERR(page)) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 59d20eb8a7e0..9b9cc593790c 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -452,7 +452,7 @@ static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
#ifdef DSI_CATCH_MISSING_TE
if (irqstatus & DSI_IRQ_TE_TRIGGER)
- del_timer(&dsi->te_timer);
+ timer_delete(&dsi->te_timer);
#endif
/* make a copy and unlock, so that isrs can unregister
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index fb450b6a4d44..7125773889f1 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -1043,7 +1043,7 @@ static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused)
struct vc4_dev *vc4 = to_vc4_dev(dev);
int i;
- del_timer(&vc4->bo_cache.time_timer);
+ timer_delete(&vc4->bo_cache.time_timer);
cancel_work_sync(&vc4->bo_cache.time_work);
vc4_bo_cache_purge(dev);
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index e15754178395..37bb1fb58cf9 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -49,7 +49,7 @@ static void vgem_fence_release(struct dma_fence *base)
{
struct vgem_fence *fence = container_of(base, typeof(*fence), base);
- del_timer_sync(&fence->timer);
+ timer_delete_sync(&fence->timer);
dma_fence_free(&fence->base);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 1f15990d3934..1d9a42cbc88f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -289,7 +289,7 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
return -EINVAL;
}
-STACK_FRAME_NON_STANDARD(vmw_send_msg);
+STACK_FRAME_NON_STANDARD_FP(vmw_send_msg);
/**
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 7d7995196702..5c2f459a2925 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -53,7 +53,7 @@ config DRM_XE
config DRM_XE_DISPLAY
bool "Enable display support"
depends on DRM_XE && DRM_XE=m && HAS_IOPORT
- select FB_IOMEM_HELPERS
+ select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION
select I2C
select I2C_ALGOBIT
default y
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index 4f372dc2cb89..fb8ec317b6ee 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -130,6 +130,10 @@
#define RING_EXECLIST_STATUS_LO(base) XE_REG((base) + 0x234)
#define RING_EXECLIST_STATUS_HI(base) XE_REG((base) + 0x234 + 4)
+#define RING_IDLEDLY(base) XE_REG((base) + 0x23c)
+#define INHIBIT_SWITCH_UNTIL_PREEMPTED REG_BIT(31)
+#define IDLE_DELAY REG_GENMASK(20, 0)
+
#define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244, XE_REG_OPTION_MASKED)
#define CTX_CTRL_PXP_ENABLE REG_BIT(10)
#define CTX_CTRL_OAC_CONTEXT_ENABLE REG_BIT(8)
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 5d79b439dd62..00191227bc95 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -53,6 +53,7 @@
#include "xe_pxp.h"
#include "xe_query.h"
#include "xe_shrinker.h"
+#include "xe_survivability_mode.h"
#include "xe_sriov.h"
#include "xe_tile.h"
#include "xe_ttm_stolen_mgr.h"
@@ -705,8 +706,20 @@ int xe_device_probe_early(struct xe_device *xe)
sriov_update_device_info(xe);
err = xe_pcode_probe_early(xe);
- if (err)
- return err;
+ if (err) {
+ int save_err = err;
+
+ /*
+ * Try to leave device in survivability mode if device is
+ * possible, but still return the previous error for error
+ * propagation
+ */
+ err = xe_survivability_mode_enable(xe);
+ if (err)
+ return err;
+
+ return save_err;
+ }
err = wait_for_lmem_ready(xe);
if (err)
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c
index 88a92baf5c95..f2bb9168967c 100644
--- a/drivers/gpu/drm/xe/xe_eu_stall.c
+++ b/drivers/gpu/drm/xe/xe_eu_stall.c
@@ -222,13 +222,7 @@ int xe_eu_stall_init(struct xe_gt *gt)
goto exit_free;
}
- ret = devm_add_action_or_reset(xe->drm.dev, xe_eu_stall_fini, gt);
- if (ret)
- goto exit_destroy;
-
- return 0;
-exit_destroy:
- destroy_workqueue(gt->eu_stall->buf_ptr_poll_wq);
+ return devm_add_action_or_reset(xe->drm.dev, xe_eu_stall_fini, gt);
exit_free:
mutex_destroy(&gt->eu_stall->stream_lock);
kfree(gt->eu_stall);
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index 9fbed1a2fcc6..788f56b066b6 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -297,7 +297,7 @@ err:
void xe_execlist_port_destroy(struct xe_execlist_port *port)
{
- del_timer(&port->irq_fail);
+ timer_delete(&port->irq_fail);
/* Prevent an interrupt while we're destroying */
spin_lock_irq(&gt_to_xe(port->hwe->gt)->irq.lock);
diff --git a/drivers/gpu/drm/xe/xe_gt_clock.c b/drivers/gpu/drm/xe/xe_gt_clock.c
index 2a958c92d8ea..4f011d1573c6 100644
--- a/drivers/gpu/drm/xe/xe_gt_clock.c
+++ b/drivers/gpu/drm/xe/xe_gt_clock.c
@@ -16,35 +16,47 @@
#include "xe_macros.h"
#include "xe_mmio.h"
-static u32 get_crystal_clock_freq(u32 rpm_config_reg)
+#define f19_2_mhz 19200000
+#define f24_mhz 24000000
+#define f25_mhz 25000000
+#define f38_4_mhz 38400000
+#define ts_base_83 83333
+#define ts_base_52 52083
+#define ts_base_80 80000
+
+static void read_crystal_clock(struct xe_gt *gt, u32 rpm_config_reg, u32 *freq,
+ u32 *timestamp_base)
{
- const u32 f19_2_mhz = 19200000;
- const u32 f24_mhz = 24000000;
- const u32 f25_mhz = 25000000;
- const u32 f38_4_mhz = 38400000;
u32 crystal_clock = REG_FIELD_GET(RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK,
rpm_config_reg);
switch (crystal_clock) {
case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
- return f24_mhz;
+ *freq = f24_mhz;
+ *timestamp_base = ts_base_83;
+ return;
case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
- return f19_2_mhz;
+ *freq = f19_2_mhz;
+ *timestamp_base = ts_base_52;
+ return;
case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
- return f38_4_mhz;
+ *freq = f38_4_mhz;
+ *timestamp_base = ts_base_52;
+ return;
case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
- return f25_mhz;
+ *freq = f25_mhz;
+ *timestamp_base = ts_base_80;
+ return;
default:
- XE_WARN_ON("NOT_POSSIBLE");
- return 0;
+ xe_gt_warn(gt, "Invalid crystal clock frequency: %u", crystal_clock);
+ *freq = 0;
+ *timestamp_base = 0;
+ return;
}
}
-int xe_gt_clock_init(struct xe_gt *gt)
+static void check_ctc_mode(struct xe_gt *gt)
{
- u32 c0 = xe_mmio_read32(&gt->mmio, RPM_CONFIG0);
- u32 freq = 0;
-
/*
* CTC_MODE[0] = 1 is definitely not supported for Xe2 and later
* platforms. In theory it could be a valid setting for pre-Xe2
@@ -57,8 +69,18 @@ int xe_gt_clock_init(struct xe_gt *gt)
*/
if (xe_mmio_read32(&gt->mmio, CTC_MODE) & CTC_SOURCE_DIVIDE_LOGIC)
xe_gt_warn(gt, "CTC_MODE[0] is set; this is unexpected and undocumented\n");
+}
+
+int xe_gt_clock_init(struct xe_gt *gt)
+{
+ u32 freq;
+ u32 c0;
+
+ if (!IS_SRIOV_VF(gt_to_xe(gt)))
+ check_ctc_mode(gt);
- freq = get_crystal_clock_freq(c0);
+ c0 = xe_mmio_read32(&gt->mmio, RPM_CONFIG0);
+ read_crystal_clock(gt, c0, &freq, &gt->info.timestamp_base);
/*
* Now figure out how the command stream's timestamp
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index e3cfb026ac88..7def0959da35 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -121,6 +121,8 @@ struct xe_gt {
enum xe_gt_type type;
/** @info.reference_clock: clock frequency */
u32 reference_clock;
+ /** @info.timestamp_base: GT timestamp base */
+ u32 timestamp_base;
/**
* @info.engine_mask: mask of engines present on GT. Some of
* them may be reserved in runtime and not available for user.
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 223b95de388c..8c05fd30b7df 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -8,7 +8,9 @@
#include <linux/nospec.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include <uapi/drm/xe_drm.h>
+#include <generated/xe_wa_oob.h>
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
@@ -21,6 +23,7 @@
#include "xe_gsc.h"
#include "xe_gt.h"
#include "xe_gt_ccs_mode.h"
+#include "xe_gt_clock.h"
#include "xe_gt_printk.h"
#include "xe_gt_mcr.h"
#include "xe_gt_topology.h"
@@ -564,6 +567,33 @@ static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
xe_reg_whitelist_process_engine(hwe);
}
+static void adjust_idledly(struct xe_hw_engine *hwe)
+{
+ struct xe_gt *gt = hwe->gt;
+ u32 idledly, maxcnt;
+ u32 idledly_units_ps = 8 * gt->info.timestamp_base;
+ u32 maxcnt_units_ns = 640;
+ bool inhibit_switch = 0;
+
+ if (!IS_SRIOV_VF(gt_to_xe(hwe->gt)) && XE_WA(gt, 16023105232)) {
+ idledly = xe_mmio_read32(&gt->mmio, RING_IDLEDLY(hwe->mmio_base));
+ maxcnt = xe_mmio_read32(&gt->mmio, RING_PWRCTX_MAXCNT(hwe->mmio_base));
+
+ inhibit_switch = idledly & INHIBIT_SWITCH_UNTIL_PREEMPTED;
+ idledly = REG_FIELD_GET(IDLE_DELAY, idledly);
+ idledly = DIV_ROUND_CLOSEST(idledly * idledly_units_ps, 1000);
+ maxcnt = REG_FIELD_GET(IDLE_WAIT_TIME, maxcnt);
+ maxcnt *= maxcnt_units_ns;
+
+ if (xe_gt_WARN_ON(gt, idledly >= maxcnt || inhibit_switch)) {
+ idledly = DIV_ROUND_CLOSEST(((maxcnt - 1) * maxcnt_units_ns),
+ idledly_units_ps);
+ idledly = DIV_ROUND_CLOSEST(idledly, 1000);
+ xe_mmio_write32(&gt->mmio, RING_IDLEDLY(hwe->mmio_base), idledly);
+ }
+ }
+}
+
static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
enum xe_hw_engine_id id)
{
@@ -604,6 +634,9 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
if (xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY)
gt->usm.reserved_bcs_instance = hwe->instance;
+ /* Ensure IDLEDLY is lower than MAXCNT */
+ adjust_idledly(hwe);
+
return devm_add_action_or_reset(xe->drm.dev, hw_engine_fini, hwe);
err_hwsp:
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index da9679c8cf26..818f023166d5 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -803,16 +803,14 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
err = xe_device_probe_early(xe);
-
- /*
- * In Boot Survivability mode, no drm card is exposed and driver is
- * loaded with bare minimum to allow for firmware to be flashed through
- * mei. If early probe fails, check if survivability mode is flagged by
- * HW to be enabled. In that case enable it and return success.
- */
if (err) {
- if (xe_survivability_mode_required(xe) &&
- xe_survivability_mode_enable(xe))
+ /*
+ * In Boot Survivability mode, no drm card is exposed and driver
+ * is loaded with bare minimum to allow for firmware to be
+ * flashed through mei. If early probe failed, but it managed to
+ * enable survivability mode, return success.
+ */
+ if (xe_survivability_mode_is_enabled(xe))
return 0;
return err;
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.c b/drivers/gpu/drm/xe/xe_survivability_mode.c
index d939ce70e6fa..cb813b337fd3 100644
--- a/drivers/gpu/drm/xe/xe_survivability_mode.c
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.c
@@ -155,13 +155,21 @@ static int enable_survivability_mode(struct pci_dev *pdev)
if (ret)
return ret;
+ /* Make sure xe_heci_gsc_init() knows about survivability mode */
+ survivability->mode = true;
+
ret = xe_heci_gsc_init(xe);
- if (ret)
+ if (ret) {
+ /*
+ * But if it fails, device can't enter survivability
+ * so move it back for correct error handling
+ */
+ survivability->mode = false;
return ret;
+ }
xe_vsec_init(xe);
- survivability->mode = true;
dev_err(dev, "In Survivability Mode\n");
return 0;
@@ -178,15 +186,16 @@ bool xe_survivability_mode_is_enabled(struct xe_device *xe)
return xe->survivability.mode;
}
-/**
- * xe_survivability_mode_required - checks if survivability mode is required
- * @xe: xe device instance
+/*
+ * survivability_mode_requested - check if it's possible to enable
+ * survivability mode and that was requested by firmware
*
- * This function reads the boot status from Pcode
+ * This function reads the boot status from Pcode.
*
- * Return: true if boot status indicates failure, false otherwise
+ * Return: true if platform support is available and boot status indicates
+ * failure, false otherwise.
*/
-bool xe_survivability_mode_required(struct xe_device *xe)
+static bool survivability_mode_requested(struct xe_device *xe)
{
struct xe_survivability *survivability = &xe->survivability;
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
@@ -208,7 +217,8 @@ bool xe_survivability_mode_required(struct xe_device *xe)
*
* Initialize survivability information and enable survivability mode
*
- * Return: 0 for success, negative error code otherwise.
+ * Return: 0 if survivability mode is enabled or not requested; negative error
+ * code otherwise.
*/
int xe_survivability_mode_enable(struct xe_device *xe)
{
@@ -216,6 +226,9 @@ int xe_survivability_mode_enable(struct xe_device *xe)
struct xe_survivability_info *info;
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ if (!survivability_mode_requested(xe))
+ return 0;
+
survivability->size = MAX_SCRATCH_MMIO;
info = devm_kcalloc(xe->drm.dev, survivability->size, sizeof(*info),
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.h b/drivers/gpu/drm/xe/xe_survivability_mode.h
index f4df5f9025ce..d7e64885570d 100644
--- a/drivers/gpu/drm/xe/xe_survivability_mode.h
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.h
@@ -12,6 +12,5 @@ struct xe_device;
int xe_survivability_mode_enable(struct xe_device *xe);
bool xe_survivability_mode_is_enabled(struct xe_device *xe);
-bool xe_survivability_mode_required(struct xe_device *xe);
#endif /* _XE_SURVIVABILITY_MODE_H_ */
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 516898e99b26..3e829c87d7b4 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -341,7 +341,7 @@ static void xe_svm_garbage_collector_work_func(struct work_struct *w)
static struct xe_vram_region *page_to_vr(struct page *page)
{
- return container_of(page->pgmap, struct xe_vram_region, pagemap);
+ return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
}
static struct xe_tile *vr_to_tile(struct xe_vram_region *vr)
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index a25afb757f70..24f644c0a673 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -622,6 +622,12 @@ static const struct xe_rtp_entry_sr engine_was[] = {
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, RES_CHK_SPR_DIS))
},
+ { XE_RTP_NAME("16023105232"),
+ XE_RTP_RULES(MEDIA_VERSION_RANGE(1301, 3000), OR,
+ GRAPHICS_VERSION_RANGE(2001, 3001)),
+ XE_RTP_ACTIONS(SET(RING_PSMI_CTL(0), RC_SEMA_IDLE_MSG_DISABLE,
+ XE_RTP_ACTION_FLAG(ENGINE_BASE)))
+ },
};
static const struct xe_rtp_entry_sr lrc_was[] = {
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index e0c5fa460487..0c738af24f7c 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -53,3 +53,5 @@ no_media_l3 MEDIA_VERSION(3000)
GRAPHICS_VERSION_RANGE(1270, 1274)
1508761755 GRAPHICS_VERSION(1255)
GRAPHICS_VERSION(1260), GRAPHICS_STEP(A0, B0)
+16023105232 GRAPHICS_VERSION_RANGE(2001, 3001)
+ MEDIA_VERSION_RANGE(1301, 3000)
diff --git a/drivers/gpu/nova-core/driver.rs b/drivers/gpu/nova-core/driver.rs
index 63c19f140fbd..a08fb6599267 100644
--- a/drivers/gpu/nova-core/driver.rs
+++ b/drivers/gpu/nova-core/driver.rs
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-use kernel::{bindings, c_str, pci, prelude::*};
+use kernel::{bindings, c_str, device::Core, pci, prelude::*};
use crate::gpu::Gpu;
@@ -27,7 +27,7 @@ impl pci::Driver for NovaCore {
type IdInfo = ();
const ID_TABLE: pci::IdTable<Self::IdInfo> = &PCI_TABLE;
- fn probe(pdev: &mut pci::Device, _info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
+ fn probe(pdev: &pci::Device<Core>, _info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
dev_dbg!(pdev.as_ref(), "Probe Nova Core GPU driver.\n");
pdev.enable_device_mem()?;
diff --git a/drivers/gpu/nova-core/regs.rs b/drivers/gpu/nova-core/regs.rs
index 50aefb150b0b..b1a25b86ef17 100644
--- a/drivers/gpu/nova-core/regs.rs
+++ b/drivers/gpu/nova-core/regs.rs
@@ -35,7 +35,7 @@ pub(crate) struct Boot0(u32);
impl Boot0 {
#[inline]
pub(crate) fn read(bar: &Bar0) -> Self {
- Self(bar.readl(BOOT0_OFFSET))
+ Self(bar.read32(BOOT0_OFFSET))
}
#[inline]