From 78347b651aa5be8b48462c48fee7e8302dcc5819 Mon Sep 17 00:00:00 2001 From: YiPeng Chai Date: Mon, 1 Jul 2024 14:43:17 +0800 Subject: drm/amdgpu: sysfs node disable query error count during gpu reset Sysfs node disable query error count during gpu reset. Signed-off-by: YiPeng Chai Reviewed-by: Stanley.Yang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/aldebaran.c | 2 -- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 3 +++ 3 files changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c index d0a8da67dc2a..b0f95a7649bf 100644 --- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c @@ -316,8 +316,6 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev) adev->ip_blocks[i].status.late_initialized = true; } - amdgpu_ras_set_error_query_ready(adev, true); - amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index b27336a05aae..bcacf2e35eba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3142,7 +3142,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) return r; } - amdgpu_ras_set_error_query_ready(adev, true); + if (!amdgpu_in_reset(adev)) + amdgpu_ras_set_error_query_ready(adev, true); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 6d1f974e2987..53b5ac2d7bed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1295,6 +1295,9 @@ ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *a .head = obj->head, }; + if (!amdgpu_ras_get_error_query_ready(obj->adev)) + return sysfs_emit(buf, "Query currently inaccessible\n"); + if (amdgpu_ras_query_error_status(obj->adev, &info)) return -EINVAL; -- cgit v1.2.3 From 064d92436b6924937ef414894d9174fa4465f788 Mon Sep 17 00:00:00 2001 From: Tim Huang Date: Thu, 13 Jun 2024 10:34:13 +0800 Subject: drm/amd/pm: avoid to load smu firmware for APUs Certain call paths still load the SMU firmware for APUs, which needs to be skipped. Signed-off-by: Tim Huang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 8 +++----- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 8 +++----- drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c | 8 +++----- drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 2 +- 4 files changed, 10 insertions(+), 16 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index a52c72739b40..4bc2abe97087 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -7324,11 +7324,9 @@ static int gfx_v10_0_hw_init(void *handle) * loaded firstly, so in direct type, it has to load smc ucode * here before rlc. */ - if (!(adev->flags & AMD_IS_APU)) { - r = amdgpu_pm_load_smu_firmware(adev, NULL); - if (r) - return r; - } + r = amdgpu_pm_load_smu_firmware(adev, NULL); + if (r) + return r; gfx_v10_0_disable_gpa_mode(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 6228dd0450a7..4f57cf3dac48 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -4558,11 +4558,9 @@ static int gfx_v11_0_hw_init(void *handle) * loaded firstly, so in direct type, it has to load smc ucode * here before rlc. */ - if (!(adev->flags & AMD_IS_APU)) { - r = amdgpu_pm_load_smu_firmware(adev, NULL); - if (r) - return r; - } + r = amdgpu_pm_load_smu_firmware(adev, NULL); + if (r) + return r; } gfx_v11_0_constants_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index ccb26f78252a..40edda2c3003 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -3306,11 +3306,9 @@ static int gfx_v12_0_hw_init(void *handle) * loaded firstly, so in direct type, it has to load smc ucode * here before rlc. */ - if (!(adev->flags & AMD_IS_APU)) { - r = amdgpu_pm_load_smu_firmware(adev, NULL); - if (r) - return r; - } + r = amdgpu_pm_load_smu_firmware(adev, NULL); + if (r) + return r; } gfx_v12_0_constants_init(adev); diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index b3b5e7b74c85..a1b8a82d77cf 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -618,7 +618,7 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int r = 0; - if (!pp_funcs || !pp_funcs->load_firmware) + if (!pp_funcs || !pp_funcs->load_firmware || adev->flags & AMD_IS_APU) return 0; mutex_lock(&adev->pm.mutex); -- cgit v1.2.3 From ffcc5745ed56d5c4e7ddf2774ab7ad26e93143d2 Mon Sep 17 00:00:00 2001 From: Frank Min Date: Thu, 20 Jun 2024 13:57:55 +0800 Subject: drm/amdgpu: update gfxhub client id for gfx12 update gfxhub client id for gfx12 Signed-off-by: Frank Min Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfxhub_v12_0.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v12_0.c index 7ea64f1e1e48..7609b9cecae8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v12_0.c @@ -35,7 +35,27 @@ #define regGRBM_GFX_INDEX_DEFAULT 0xe0000000 static const char *gfxhub_client_ids[] = { - /* TODO */ + "CB", + "DB", + "GE1", + "GE2", + "CPF", + "CPC", + "CPG", + "RLC", + "TCP", + "SQC (inst)", + "SQC (data)", + "SQG/PC/SC", + "Reserved", + "SDMA0", + "SDMA1", + "GCR", + "Reserved", + "Reserved", + "WGS", + "DSM", + "PA" }; static uint32_t gfxhub_v12_0_get_invalidate_req(unsigned int vmid, -- cgit v1.2.3 From fbbbb62112a8788f76e28bfb7e822bf33ab1075e Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Thu, 27 Jun 2024 12:22:06 +0530 Subject: drm/amdgpu: add gfx12 register support in ipdump Add general registers of gfx12 in ipdump for devcoredump support. Reviewed-by: Alex Deucher Signed-off-by: Sunil Khatri Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c | 101 +++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index 40edda2c3003..d5a6788e7731 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -63,6 +63,73 @@ MODULE_FIRMWARE("amdgpu/gc_12_0_1_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_toc.bin"); +static const struct amdgpu_hwip_reg_entry gc_reg_list_12_0[] = { + SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2), + SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS3), + SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1), + SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2), + SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT3), + SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1), + SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1), + SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT), + SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT), + SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT), + SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT2), + SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT2), + SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HPD_STATUS0), + SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_BASE), + SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_BASE), + SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ), + SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_CMD_BUFSZ), + SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ), + SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BUFSZ), + SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS_2), + SOC15_REG_ENTRY_STR(GC, 0, regPA_CL_CNTL_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regSQC_CACHES), + SOC15_REG_ENTRY_STR(GC, 0, regSQG_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_STATUS_LO32), + SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_STATUS_HI32), + SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_INSTR_PNTR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_INSTR_PNTR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_INSTR_PNTR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_RS64_INSTR_PNTR0), + SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_RS64_INSTR_PNTR1), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_RS64_INSTR_PNTR), + + /* cp header registers */ + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), + /* SE status registers */ + SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0), + SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1), + SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2), + SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3) +}; + #define DEFAULT_SH_MEM_CONFIG \ ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ @@ -1129,6 +1196,20 @@ static int gfx_v12_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev) return 0; } +static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev) +{ + uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0); + uint32_t *ptr; + + ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL); + if (ptr == NULL) { + DRM_ERROR("Failed to allocate memory for GFX IP Dump\n"); + adev->gfx.ip_dump_core = NULL; + } else { + adev->gfx.ip_dump_core = ptr; + } +} + static int gfx_v12_0_sw_init(void *handle) { int i, j, k, r, ring_id = 0; @@ -1261,6 +1342,8 @@ static int gfx_v12_0_sw_init(void *handle) if (r) return r; + gfx_v12_0_alloc_ip_dump(adev); + return 0; } @@ -1320,6 +1403,8 @@ static int gfx_v12_0_sw_fini(void *handle) gfx_v12_0_free_microcode(adev); + kfree(adev->gfx.ip_dump_core); + return 0; } @@ -4671,6 +4756,21 @@ static void gfx_v12_0_emit_mem_sync(struct amdgpu_ring *ring) amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */ } +static void gfx_v12_ip_dump(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + uint32_t i; + uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0); + + if (!adev->gfx.ip_dump_core) + return; + + amdgpu_gfx_off_ctrl(adev, false); + for (i = 0; i < reg_count; i++) + adev->gfx.ip_dump_core[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_12_0[i])); + amdgpu_gfx_off_ctrl(adev, true); +} + static const struct amd_ip_funcs gfx_v12_0_ip_funcs = { .name = "gfx_v12_0", .early_init = gfx_v12_0_early_init, @@ -4686,6 +4786,7 @@ static const struct amd_ip_funcs gfx_v12_0_ip_funcs = { .set_clockgating_state = gfx_v12_0_set_clockgating_state, .set_powergating_state = gfx_v12_0_set_powergating_state, .get_clockgating_state = gfx_v12_0_get_clockgating_state, + .dump_ip_state = gfx_v12_ip_dump, }; static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = { -- cgit v1.2.3 From 2262acad0ae911cd45b6cb976f803a74db9e9ea7 Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Thu, 27 Jun 2024 12:25:46 +0530 Subject: drm/amdgpu: add print support for gfx12 ipdump Add support of gfx12 ipdump print so devcoredump could trigger it to dump the captured registers in devcoredump. Reviewed-by: Alex Deucher Signed-off-by: Sunil Khatri Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index d5a6788e7731..fcb63fbe8dcc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -4756,6 +4756,21 @@ static void gfx_v12_0_emit_mem_sync(struct amdgpu_ring *ring) amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */ } +static void gfx_v12_ip_print(void *handle, struct drm_printer *p) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + uint32_t i; + uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0); + + if (!adev->gfx.ip_dump_core) + return; + + for (i = 0; i < reg_count; i++) + drm_printf(p, "%-50s \t 0x%08x\n", + gc_reg_list_12_0[i].reg_name, + adev->gfx.ip_dump_core[i]); +} + static void gfx_v12_ip_dump(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -4787,6 +4802,7 @@ static const struct amd_ip_funcs gfx_v12_0_ip_funcs = { .set_powergating_state = gfx_v12_0_set_powergating_state, .get_clockgating_state = gfx_v12_0_get_clockgating_state, .dump_ip_state = gfx_v12_ip_dump, + .print_ip_state = gfx_v12_ip_print, }; static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = { -- cgit v1.2.3 From 33f23fc3155b13c4a96d94a0a22dc26db767440b Mon Sep 17 00:00:00 2001 From: Yifan Zha Date: Thu, 27 Jun 2024 15:06:23 +0800 Subject: drm/amdgpu: Set no_hw_access when VF request full GPU fails [Why] If VF request full GPU access and the request failed, the VF driver can get stuck accessing registers for an extended period during the unload of KMS. [How] Set no_hw_access flag when VF request for full GPU access fails This prevents further hardware access attempts, avoiding the prolonged stuck state. Signed-off-by: Yifan Zha Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index ccb3d041c2b2..111c380f929b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -86,8 +86,10 @@ int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init) if (virt->ops && virt->ops->req_full_gpu) { r = virt->ops->req_full_gpu(adev, init); - if (r) + if (r) { + adev->no_hw_access = true; return r; + } adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; } -- cgit v1.2.3 From 906219ec94d0a51e4929d6860789656bab02cfa5 Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Wed, 3 Jul 2024 23:00:46 +0530 Subject: drm:amdgpu: enable IH ring1 for IH v7.0 We need IH ring1 for handling the pagefault interrupts which over flow in default ring for specific usecases. Enable ring1 allows software to redirect high interrupts to ring1 from default IH ring. Signed-off-by: Sunil Khatri Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/ih_v7_0.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c index aa6235dd4f2b..548b3c63a765 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c @@ -546,8 +546,15 @@ static int ih_v7_0_sw_init(void *handle) adev->irq.ih.use_doorbell = true; adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1; - adev->irq.ih1.ring_size = 0; - adev->irq.ih2.ring_size = 0; + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, IH_RING_SIZE, + use_bus_addr); + if (r) + return r; + + adev->irq.ih1.use_doorbell = true; + adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1; + } /* initialize ih control register offset */ ih_v7_0_init_register_offset(adev); -- cgit v1.2.3 From f0c6b79bfc9216bf0e4e35389d4e3e4fc7bbc2ad Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Wed, 3 Jul 2024 23:04:11 +0530 Subject: drm/amdgpu: enable redirection of irq's for IH v7.0 Enable redirection of irq for pagefaults for specific clients to avoid overflow without dropping interrupts. So here we redirect the interrupts to another IH ring i.e ring1 where only these interrupts are processed. Signed-off-by: Sunil Khatri Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/ih_v7_0.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c index 548b3c63a765..6852081fcff2 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c @@ -346,6 +346,21 @@ static int ih_v7_0_irq_init(struct amdgpu_device *adev) DELAY, 3); WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp); + /* Redirect the interrupts to IH RB1 for dGPU */ + if (adev->irq.ih1.ring_size) { + tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_INDEX, INDEX, 0); + WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX, tmp); + + tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, CLIENT_ID, 0xa); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, SOURCE_ID, 0x0); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, + SOURCE_ID_MATCH_ENABLE, 0x1); + + WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA, tmp); + } + pci_set_master(adev->pdev); /* enable interrupts */ -- cgit v1.2.3 From 495e6173a4176ef578e8ada9f17e1685fe45d00d Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Thu, 27 Jun 2024 18:39:55 +0530 Subject: drm/amdgpu: add cp queue registers for gfx12 ipdump Add gfx12 support of CP queue registers for all queues to be used by devcoredump. Reviewed-by: Alex Deucher Signed-off-by: Sunil Khatri Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c | 111 ++++++++++++++++++++++++++++++++- 1 file changed, 109 insertions(+), 2 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index fcb63fbe8dcc..ce9c89ad45b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -130,6 +130,49 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_12_0[] = { SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3) }; +static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_12[] = { + /* compute registers */ + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS) +}; + #define DEFAULT_SH_MEM_CONFIG \ ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ @@ -1200,6 +1243,7 @@ static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev) { uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0); uint32_t *ptr; + uint32_t inst; ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL); if (ptr == NULL) { @@ -1208,6 +1252,19 @@ static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev) } else { adev->gfx.ip_dump_core = ptr; } + + /* Allocate memory for compute queue registers for all the instances */ + reg_count = ARRAY_SIZE(gc_cp_reg_list_12); + inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * + adev->gfx.mec.num_queue_per_pipe; + + ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL); + if (ptr == NULL) { + DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n"); + adev->gfx.ip_dump_compute_queues = NULL; + } else { + adev->gfx.ip_dump_compute_queues = ptr; + } } static int gfx_v12_0_sw_init(void *handle) @@ -1404,6 +1461,7 @@ static int gfx_v12_0_sw_fini(void *handle) gfx_v12_0_free_microcode(adev); kfree(adev->gfx.ip_dump_core); + kfree(adev->gfx.ip_dump_compute_queues); return 0; } @@ -4759,7 +4817,7 @@ static void gfx_v12_0_emit_mem_sync(struct amdgpu_ring *ring) static void gfx_v12_ip_print(void *handle, struct drm_printer *p) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - uint32_t i; + uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0); if (!adev->gfx.ip_dump_core) @@ -4769,12 +4827,36 @@ static void gfx_v12_ip_print(void *handle, struct drm_printer *p) drm_printf(p, "%-50s \t 0x%08x\n", gc_reg_list_12_0[i].reg_name, adev->gfx.ip_dump_core[i]); + + /* print compute queue registers for all instances */ + if (!adev->gfx.ip_dump_compute_queues) + return; + + reg_count = ARRAY_SIZE(gc_cp_reg_list_12); + drm_printf(p, "\nnum_mec: %d num_pipe: %d num_queue: %d\n", + adev->gfx.mec.num_mec, + adev->gfx.mec.num_pipe_per_mec, + adev->gfx.mec.num_queue_per_pipe); + + for (i = 0; i < adev->gfx.mec.num_mec; i++) { + for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { + for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { + drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k); + for (reg = 0; reg < reg_count; reg++) { + drm_printf(p, "%-50s \t 0x%08x\n", + gc_cp_reg_list_12[reg].reg_name, + adev->gfx.ip_dump_compute_queues[index + reg]); + } + index += reg_count; + } + } + } } static void gfx_v12_ip_dump(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - uint32_t i; + uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0); if (!adev->gfx.ip_dump_core) @@ -4784,6 +4866,31 @@ static void gfx_v12_ip_dump(void *handle) for (i = 0; i < reg_count; i++) adev->gfx.ip_dump_core[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_12_0[i])); amdgpu_gfx_off_ctrl(adev, true); + + /* dump compute queue registers for all instances */ + if (!adev->gfx.ip_dump_compute_queues) + return; + + reg_count = ARRAY_SIZE(gc_cp_reg_list_12); + amdgpu_gfx_off_ctrl(adev, false); + mutex_lock(&adev->srbm_mutex); + for (i = 0; i < adev->gfx.mec.num_mec; i++) { + for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { + for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { + /* ME0 is for GFX so start from 1 for CP */ + soc24_grbm_select(adev, 1+i, j, k, 0); + for (reg = 0; reg < reg_count; reg++) { + adev->gfx.ip_dump_compute_queues[index + reg] = + RREG32(SOC15_REG_ENTRY_OFFSET( + gc_cp_reg_list_12[reg])); + } + index += reg_count; + } + } + } + soc24_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + amdgpu_gfx_off_ctrl(adev, true); } static const struct amd_ip_funcs gfx_v12_0_ip_funcs = { -- cgit v1.2.3 From c8714ac982e54e758eb937c0d82d4d265dd6e7f0 Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Thu, 27 Jun 2024 18:13:11 +0530 Subject: drm/amdgpu: add gfx queue support for gfx12 ipdump Add support of all the CP GFX queues for gfx12 ipdump to be used by devcoredump. Reviewed-by: Alex Deucher Signed-off-by: Sunil Khatri Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c | 94 ++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index ce9c89ad45b1..084b039eb765 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -173,6 +173,35 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_12[] = { SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS) }; +static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = { + /* gfx queue registers */ + SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_ACTIVE), + SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_VMID), + SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY), + SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUANTUM), + SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE), + SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CSMD_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST), + SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_MAPPED), + SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUE_MGR_CONTROL), + SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_CONTROL0), + SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_STATUS0), + SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO), + SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ), + SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ) +}; + #define DEFAULT_SH_MEM_CONFIG \ ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ @@ -1265,6 +1294,19 @@ static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev) } else { adev->gfx.ip_dump_compute_queues = ptr; } + + /* Allocate memory for gfx queue registers for all the instances */ + reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_12); + inst = adev->gfx.me.num_me * adev->gfx.me.num_pipe_per_me * + adev->gfx.me.num_queue_per_pipe; + + ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL); + if (ptr == NULL) { + DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n"); + adev->gfx.ip_dump_gfx_queues = NULL; + } else { + adev->gfx.ip_dump_gfx_queues = ptr; + } } static int gfx_v12_0_sw_init(void *handle) @@ -1462,6 +1504,7 @@ static int gfx_v12_0_sw_fini(void *handle) kfree(adev->gfx.ip_dump_core); kfree(adev->gfx.ip_dump_compute_queues); + kfree(adev->gfx.ip_dump_gfx_queues); return 0; } @@ -4851,6 +4894,31 @@ static void gfx_v12_ip_print(void *handle, struct drm_printer *p) } } } + + /* print gfx queue registers for all instances */ + if (!adev->gfx.ip_dump_gfx_queues) + return; + + index = 0; + reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_12); + drm_printf(p, "\nnum_me: %d num_pipe: %d num_queue: %d\n", + adev->gfx.me.num_me, + adev->gfx.me.num_pipe_per_me, + adev->gfx.me.num_queue_per_pipe); + + for (i = 0; i < adev->gfx.me.num_me; i++) { + for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { + for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) { + drm_printf(p, "\nme %d, pipe %d, queue %d\n", i, j, k); + for (reg = 0; reg < reg_count; reg++) { + drm_printf(p, "%-50s \t 0x%08x\n", + gc_gfx_queue_reg_list_12[reg].reg_name, + adev->gfx.ip_dump_gfx_queues[index + reg]); + } + index += reg_count; + } + } + } } static void gfx_v12_ip_dump(void *handle) @@ -4891,6 +4959,32 @@ static void gfx_v12_ip_dump(void *handle) soc24_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); amdgpu_gfx_off_ctrl(adev, true); + + /* dump gfx queue registers for all instances */ + if (!adev->gfx.ip_dump_gfx_queues) + return; + + index = 0; + reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_12); + amdgpu_gfx_off_ctrl(adev, false); + mutex_lock(&adev->srbm_mutex); + for (i = 0; i < adev->gfx.me.num_me; i++) { + for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { + for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) { + soc24_grbm_select(adev, i, j, k, 0); + + for (reg = 0; reg < reg_count; reg++) { + adev->gfx.ip_dump_gfx_queues[index + reg] = + RREG32(SOC15_REG_ENTRY_OFFSET( + gc_gfx_queue_reg_list_12[reg])); + } + index += reg_count; + } + } + } + soc24_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + amdgpu_gfx_off_ctrl(adev, true); } static const struct amd_ip_funcs gfx_v12_0_ip_funcs = { -- cgit v1.2.3 From 54837bd2be229cc386298c5eab7d05f36f26e1fc Mon Sep 17 00:00:00 2001 From: Frank Min Date: Thu, 30 May 2024 15:01:59 +0800 Subject: drm/amdgpu: restore dcc bo tilling configs while moving MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While moving buffer which has dcc tiling config, it is needed to restore its original dcc tiling. 1. extend copy flag to cover tiling bits 2. add logic to restore original dcc tiling config Signed-off-by: Frank Min Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 17 ++++++++++++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 11 +++++++++++ drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c | 10 ++++++++-- 3 files changed, 33 insertions(+), 5 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 58906bf7448e..b8bc7fa8c375 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -308,7 +308,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, mutex_lock(&adev->mman.gtt_window_lock); while (src_mm.remaining) { - uint64_t from, to, cur_size; + uint64_t from, to, cur_size, tiling_flags; + uint32_t num_type, data_format, max_com; struct dma_fence *next; /* Never copy more than 256MiB at once to avoid a timeout */ @@ -329,10 +330,20 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, abo_dst = ttm_to_amdgpu_bo(dst->bo); if (tmz) copy_flags |= AMDGPU_COPY_FLAGS_TMZ; - if (abo_src->flags & AMDGPU_GEM_CREATE_GFX12_DCC) + if ((abo_src->flags & AMDGPU_GEM_CREATE_GFX12_DCC) && + (abo_src->tbo.resource->mem_type == TTM_PL_VRAM)) copy_flags |= AMDGPU_COPY_FLAGS_READ_DECOMPRESSED; - if (abo_dst->flags & AMDGPU_GEM_CREATE_GFX12_DCC) + if ((abo_dst->flags & AMDGPU_GEM_CREATE_GFX12_DCC) && + (dst->mem->mem_type == TTM_PL_VRAM)) { copy_flags |= AMDGPU_COPY_FLAGS_WRITE_COMPRESSED; + amdgpu_bo_get_tiling_flags(abo_dst, &tiling_flags); + max_com = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK); + num_type = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_NUMBER_TYPE); + data_format = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_DATA_FORMAT); + copy_flags |= (AMDGPU_COPY_FLAGS_SET(MAX_COMPRESSED, max_com) | + AMDGPU_COPY_FLAGS_SET(NUMBER_TYPE, num_type) | + AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format)); + } r = amdgpu_copy_buffer(ring, from, to, cur_size, resv, &next, false, true, copy_flags); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index f2eb1cf364c5..138d80017f35 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -112,6 +112,17 @@ struct amdgpu_copy_mem { #define AMDGPU_COPY_FLAGS_TMZ (1 << 0) #define AMDGPU_COPY_FLAGS_READ_DECOMPRESSED (1 << 1) #define AMDGPU_COPY_FLAGS_WRITE_COMPRESSED (1 << 2) +#define AMDGPU_COPY_FLAGS_MAX_COMPRESSED_SHIFT 3 +#define AMDGPU_COPY_FLAGS_MAX_COMPRESSED_MASK 0x03 +#define AMDGPU_COPY_FLAGS_NUMBER_TYPE_SHIFT 5 +#define AMDGPU_COPY_FLAGS_NUMBER_TYPE_MASK 0x07 +#define AMDGPU_COPY_FLAGS_DATA_FORMAT_SHIFT 8 +#define AMDGPU_COPY_FLAGS_DATA_FORMAT_MASK 0x3f + +#define AMDGPU_COPY_FLAGS_SET(field, value) \ + (((__u32)(value) & AMDGPU_COPY_FLAGS_##field##_MASK) << AMDGPU_COPY_FLAGS_##field##_SHIFT) +#define AMDGPU_COPY_FLAGS_GET(value, field) \ + (((__u32)(value) >> AMDGPU_COPY_FLAGS_##field##_SHIFT) & AMDGPU_COPY_FLAGS_##field##_MASK) int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size); void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c index 96514fd77e35..41b5e45697dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c @@ -1566,6 +1566,12 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib, uint32_t byte_count, uint32_t copy_flags) { + uint32_t num_type, data_format, max_com; + + max_com = AMDGPU_COPY_FLAGS_GET(copy_flags, MAX_COMPRESSED); + data_format = AMDGPU_COPY_FLAGS_GET(copy_flags, DATA_FORMAT); + num_type = AMDGPU_COPY_FLAGS_GET(copy_flags, NUMBER_TYPE); + ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) | SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) | SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0) | @@ -1580,10 +1586,10 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib, ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); if ((copy_flags & (AMDGPU_COPY_FLAGS_READ_DECOMPRESSED | AMDGPU_COPY_FLAGS_WRITE_COMPRESSED))) - ib->ptr[ib->length_dw++] = SDMA_DCC_DATA_FORMAT(4) | SDMA_DCC_NUM_TYPE(4) | + ib->ptr[ib->length_dw++] = SDMA_DCC_DATA_FORMAT(data_format) | SDMA_DCC_NUM_TYPE(num_type) | ((copy_flags & AMDGPU_COPY_FLAGS_READ_DECOMPRESSED) ? SDMA_DCC_READ_CM(2) : 0) | ((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(1) : 0) | - SDMA_DCC_MAX_COM(1) | SDMA_DCC_MAX_UCOM(1); + SDMA_DCC_MAX_COM(max_com) | SDMA_DCC_MAX_UCOM(1); } /** -- cgit v1.2.3 From 332210c13ac0595c34516caf9a61430b45e16d21 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Thu, 4 Jul 2024 13:48:19 +0800 Subject: drm/amdgpu: remove redundant semicolons in RAS_EVENT_LOG remove redundant semicolons in RAS_EVENT_LOG to avoid code format check warning. Fixes: b712d7c20133 ("drm/amdgpu: fix compiler 'side-effect' check issue for RAS_EVENT_LOG()") Signed-off-by: Yang Wang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 0fa1148e6642..18d994c98a25 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -69,7 +69,7 @@ struct amdgpu_iv_entry; #define AMDGPU_RAS_GET_FEATURES(val) ((val) & ~AMDGPU_RAS_FEATURES_SOCKETID_MASK) #define RAS_EVENT_LOG(adev, id, fmt, ...) \ - amdgpu_ras_event_log_print((adev), (id), (fmt), ##__VA_ARGS__); + amdgpu_ras_event_log_print((adev), (id), (fmt), ##__VA_ARGS__) enum amdgpu_ras_block { AMDGPU_RAS_BLOCK__UMC = 0, -- cgit v1.2.3 From b6ad1091666732e8750357f9d2fb599f81bf6bff Mon Sep 17 00:00:00 2001 From: Saleemkhan Jamadar Date: Thu, 4 Jul 2024 15:19:35 +0530 Subject: drm/amdgpu: enable dpg for vcn and jpeg on GC 11_5_2 DPG mode is enabled for vcn and jpeg on VCN v4_0_5 Signed-off-by: Saleemkhan Jamadar Reviewed-by: Tim Huang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc21.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 6cc86d13f32a..d30ad7d56def 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -774,7 +774,9 @@ static int soc21_common_early_init(void *handle) AMD_CG_SUPPORT_IH_CG | AMD_CG_SUPPORT_BIF_MGCG | AMD_CG_SUPPORT_BIF_LS; - adev->pg_flags = AMD_PG_SUPPORT_VCN | + adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG | + AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_JPEG_DPG | AMD_PG_SUPPORT_JPEG | AMD_PG_SUPPORT_GFX_PG; adev->external_rev_id = adev->rev_id + 0x40; -- cgit v1.2.3 From 320debca1ba3a81c87247eac84eff976ead09ee0 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 19 Jan 2024 14:57:29 +0100 Subject: drm/amdgpu: reject gang submit on reserved VMIDs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A gang submit won't work if the VMID is reserved and we can't flush out VM changes from multiple engines at the same time. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 15 +++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 15 ++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h | 1 + 3 files changed, 30 insertions(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index ec888fc6ead8..916b6b8cf7d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1093,6 +1093,21 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) unsigned int i; int r; + /* + * We can't use gang submit on with reserved VMIDs when the VM changes + * can't be invalidated by more than one engine at the same time. + */ + if (p->gang_size > 1 && !p->adev->vm_manager.concurrent_flush) { + for (i = 0; i < p->gang_size; ++i) { + struct drm_sched_entity *entity = p->entities[i]; + struct drm_gpu_scheduler *sched = entity->rq->sched; + struct amdgpu_ring *ring = to_amdgpu_ring(sched); + + if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub)) + return -EINVAL; + } + } + r = amdgpu_vm_clear_freed(adev, vm, NULL); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index b5b9d4f40f53..b6a8bddada4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -424,7 +424,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (r || !idle) goto error; - if (vm->reserved_vmid[vmhub] || (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)))) { + if (amdgpu_vmid_uses_reserved(vm, vmhub)) { r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence); if (r || !id) goto error; @@ -474,6 +474,19 @@ error: return r; } +/* + * amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID + * @vm: the VM to check + * @vmhub: the VMHUB which will be used + * + * Returns: True if the VM will use a reserved VMID. + */ +bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub) +{ + return vm->reserved_vmid[vmhub] || + (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0))); +} + int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, unsigned vmhub) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h index fa8c42c83d5d..240fa6751260 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h @@ -78,6 +78,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv, bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, struct amdgpu_vmid *id); +bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub); int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, unsigned vmhub); void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, -- cgit v1.2.3 From e33697141bac18906345ea46533a240f1ad3cd21 Mon Sep 17 00:00:00 2001 From: Wayne Lin Date: Thu, 23 May 2024 12:18:07 +0800 Subject: drm/amd/display: Solve mst monitors blank out problem after resume [Why] In dm resume, we firstly restore dc state and do the mst resume for topology probing thereafter. If we change dpcd DP_MSTM_CTRL value after LT in mst reume, it will cause light up problem on the hub. [How] Revert commit 202dc359adda ("drm/amd/display: Defer handling mst up request in resume"). And adjust the reason to trigger dc_link_detect by DETECT_REASON_RESUMEFROMS3S4. Cc: stable@vger.kernel.org Fixes: 202dc359adda ("drm/amd/display: Defer handling mst up request in resume") Signed-off-by: Wayne Lin Reviewed-by: Fangzhi Zuo Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 98cf523a629e..29af22ddccc9 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2583,6 +2583,7 @@ static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr) ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, DP_MST_EN | + DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); if (ret < 0) { drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); @@ -3186,7 +3187,7 @@ static int dm_resume(void *handle) } else { mutex_lock(&dm->dc_lock); dc_exit_ips_for_hw_access(dm->dc); - dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); + dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4); mutex_unlock(&dm->dc_lock); } -- cgit v1.2.3 From 75ac6a250632d2fff62039ae728c842033dceddb Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Tue, 25 Jun 2024 14:23:42 +0800 Subject: drm/amdgpu: refine amdgpu ras event id core code v1: - use unified event id to manage ras events - add a new function amdgpu_ras_query_error_status_with_event() to accept event type as parameter. v2: add a warn log to show the location of function failure when calling amdgpu_ras_mark_event(). (Tao Zhou) v3: change RAS_EVENT_TYPE_ISR to RAS_EVENT_TYPE_FATAL. v4: rename amdgpu_ras_get_recovery_event() to amdgpu_ras_get_fatal_error_event(). Signed-off-by: Yang Wang Reviewed-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 102 ++++++++++++++++++++++++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 22 +++++-- 4 files changed, 104 insertions(+), 26 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c index 7945173321a2..19158cc30f31 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c @@ -119,7 +119,7 @@ static struct aca_regs_dump { static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, struct aca_bank *bank, struct ras_query_context *qctx) { - u64 event_id = qctx ? qctx->event_id : 0ULL; + u64 event_id = qctx ? qctx->evid.event_id : RAS_EVENT_INVALID_ID; int i; RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index 9d3a3c778504..2542bd7aa7c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -274,7 +274,7 @@ int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, struct mca_bank_entry *entry, struct ras_query_context *qctx) { - u64 event_id = qctx->event_id; + u64 event_id = qctx ? qctx->evid.event_id : RAS_EVENT_INVALID_ID; RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n"); RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].STATUS=0x%016llx\n", @@ -543,7 +543,7 @@ static int mca_dump_show(struct seq_file *m, enum amdgpu_mca_error_type type) amdgpu_mca_bank_set_init(&mca_set); - qctx.event_id = 0ULL; + qctx.evid.event_id = RAS_EVENT_INVALID_ID; ret = amdgpu_mca_smu_get_mca_set(adev, type, &mca_set, &qctx); if (ret) goto err_free_mca_set; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 53b5ac2d7bed..36917e4e3b19 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1055,7 +1055,7 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev, struct amdgpu_smuio_mcm_config_info *mcm_info; struct ras_err_node *err_node; struct ras_err_info *err_info; - u64 event_id = qctx->event_id; + u64 event_id = qctx->evid.event_id; if (is_ue) { for_each_ras_error(err_node, err_data) { @@ -1140,7 +1140,7 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, { struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head); const char *blk_name = get_ras_block_str(&query_if->head); - u64 event_id = qctx->event_id; + u64 event_id = qctx->evid.event_id; if (err_data->ce_count) { if (err_data_has_source_info(err_data)) { @@ -1366,7 +1366,9 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev, } /* query/inject/cure begin */ -int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info) +static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev, + struct ras_query_if *info, + enum ras_event_type type) { struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); struct ras_err_data err_data; @@ -1385,8 +1387,8 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_i return -EINVAL; memset(&qctx, 0, sizeof(qctx)); - qctx.event_id = amdgpu_ras_acquire_event_id(adev, amdgpu_ras_intr_triggered() ? - RAS_EVENT_TYPE_ISR : RAS_EVENT_TYPE_INVALID); + qctx.evid.type = type; + qctx.evid.event_id = amdgpu_ras_acquire_event_id(adev, type); if (!down_read_trylock(&adev->reset_domain->sem)) { ret = -EIO; @@ -1415,6 +1417,11 @@ out_fini_err_data: return ret; } +int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info) +{ + return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID); +} + int amdgpu_ras_reset_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block block) { @@ -2305,7 +2312,7 @@ static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev) /* ih end */ /* traversal all IPs except NBIO to query error counter */ -static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) +static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev, enum ras_event_type type) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj; @@ -2338,7 +2345,7 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) IP_VERSION(13, 0, 2))) continue; - amdgpu_ras_query_error_status(adev, &info); + amdgpu_ras_query_error_status_with_event(adev, &info, type); if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) && @@ -2477,6 +2484,14 @@ bool amdgpu_ras_in_recovery(struct amdgpu_device *adev) return false; } +static enum ras_event_type amdgpu_ras_get_fatal_error_event(struct amdgpu_device *adev) +{ + if (amdgpu_ras_intr_triggered()) + return RAS_EVENT_TYPE_FATAL; + else + return RAS_EVENT_TYPE_INVALID; +} + static void amdgpu_ras_do_recovery(struct work_struct *work) { struct amdgpu_ras *ras = @@ -2485,6 +2500,7 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) struct amdgpu_device *adev = ras->adev; struct list_head device_list, *device_list_handle = NULL; struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); + enum ras_event_type type; if (hive) { atomic_set(&hive->ras_recovery, 1); @@ -2512,10 +2528,11 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) device_list_handle = &device_list; } + type = amdgpu_ras_get_fatal_error_event(adev); list_for_each_entry(remote_adev, device_list_handle, gmc.xgmi.head) { amdgpu_ras_query_err_status(remote_adev); - amdgpu_ras_log_on_err_counter(remote_adev); + amdgpu_ras_log_on_err_counter(remote_adev, type); } } @@ -3406,8 +3423,11 @@ static void ras_event_mgr_init(struct ras_event_manager *mgr) { int i; - for (i = 0; i < ARRAY_SIZE(mgr->seqnos); i++) - atomic64_set(&mgr->seqnos[i], 0); + memset(mgr, 0, sizeof(*mgr)); + atomic64_set(&mgr->seqno, 0); + + for (i = 0; i < ARRAY_SIZE(mgr->last_seqno); i++) + mgr->last_seqno[i] = RAS_EVENT_INVALID_ID; } static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev) @@ -3907,23 +3927,63 @@ void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status) atomic_set(&ras->fed, !!status); } -bool amdgpu_ras_event_id_is_valid(struct amdgpu_device *adev, u64 id) +static struct ras_event_manager *__get_ras_event_mgr(struct amdgpu_device *adev) { - return !(id & BIT_ULL(63)); + struct amdgpu_ras *ras; + + ras = amdgpu_ras_get_context(adev); + if (!ras) + return NULL; + + return ras->event_mgr; +} + +int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_type type, + const void *caller) +{ + struct ras_event_manager *event_mgr; + int ret = 0; + + if (type >= RAS_EVENT_TYPE_COUNT) { + ret = -EINVAL; + goto out; + } + + event_mgr = __get_ras_event_mgr(adev); + if (!event_mgr) { + ret = -EINVAL; + goto out; + } + + event_mgr->last_seqno[type] = atomic64_inc_return(&event_mgr->seqno); + +out: + if (ret && caller) + dev_warn(adev->dev, "failed mark ras event (%d) in %ps, ret:%d\n", + (int)type, caller, ret); + + return ret; } u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type) { - struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + struct ras_event_manager *event_mgr; u64 id; + if (type >= RAS_EVENT_TYPE_COUNT) + return RAS_EVENT_INVALID_ID; + switch (type) { - case RAS_EVENT_TYPE_ISR: - id = (u64)atomic64_read(&ras->event_mgr->seqnos[type]); + case RAS_EVENT_TYPE_FATAL: + event_mgr = __get_ras_event_mgr(adev); + if (!event_mgr) + return RAS_EVENT_INVALID_ID; + + id = event_mgr->last_seqno[type]; break; case RAS_EVENT_TYPE_INVALID: default: - id = BIT_ULL(63) | 0ULL; + id = RAS_EVENT_INVALID_ID; break; } @@ -3934,7 +3994,13 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) { if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - u64 event_id = (u64)atomic64_inc_return(&ras->event_mgr->seqnos[RAS_EVENT_TYPE_ISR]); + enum ras_event_type type = RAS_EVENT_TYPE_FATAL; + u64 event_id; + + if (amdgpu_ras_mark_ras_event(adev, type)) + return; + + event_id = amdgpu_ras_acquire_event_id(adev, type); RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error" "(ERREVENT_ATHUB_INTERRUPT) detected!\n"); @@ -4668,7 +4734,7 @@ void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id, vaf.fmt = fmt; vaf.va = &args; - if (amdgpu_ras_event_id_is_valid(adev, event_id)) + if (RAS_EVENT_ID_IS_VALID(event_id)) dev_printk(KERN_INFO, adev->dev, "{%llu}%pV", event_id, &vaf); else dev_printk(KERN_INFO, adev->dev, "%pV", &vaf); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 18d994c98a25..7c20def1c4f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -68,9 +68,15 @@ struct amdgpu_iv_entry; /* The high three bits indicates socketid */ #define AMDGPU_RAS_GET_FEATURES(val) ((val) & ~AMDGPU_RAS_FEATURES_SOCKETID_MASK) +#define RAS_EVENT_INVALID_ID (BIT_ULL(63)) +#define RAS_EVENT_ID_IS_VALID(x) (!((x) & BIT_ULL(63))) + #define RAS_EVENT_LOG(adev, id, fmt, ...) \ amdgpu_ras_event_log_print((adev), (id), (fmt), ##__VA_ARGS__) +#define amdgpu_ras_mark_ras_event(adev, type) \ + (amdgpu_ras_mark_ras_event_caller((adev), (type), __builtin_return_address(0))) + enum amdgpu_ras_block { AMDGPU_RAS_BLOCK__UMC = 0, AMDGPU_RAS_BLOCK__SDMA, @@ -427,20 +433,25 @@ struct umc_ecc_info { }; enum ras_event_type { - RAS_EVENT_TYPE_INVALID = -1, - RAS_EVENT_TYPE_ISR = 0, + RAS_EVENT_TYPE_INVALID = 0, + RAS_EVENT_TYPE_FATAL, RAS_EVENT_TYPE_COUNT, }; struct ras_event_manager { - atomic64_t seqnos[RAS_EVENT_TYPE_COUNT]; + atomic64_t seqno; + u64 last_seqno[RAS_EVENT_TYPE_COUNT]; }; -struct ras_query_context { +struct ras_event_id { enum ras_event_type type; u64 event_id; }; +struct ras_query_context { + struct ras_event_id evid; +}; + typedef int (*pasid_notify)(struct amdgpu_device *adev, uint16_t pasid, void *data); @@ -947,8 +958,9 @@ void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info, void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status); bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev); -bool amdgpu_ras_event_id_is_valid(struct amdgpu_device *adev, u64 id); u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type); +int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_type type, + const void *caller); int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn); -- cgit v1.2.3 From 5b9de2596f17fb328945676293a956f3d7f53a9d Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Thu, 27 Jun 2024 11:43:09 +0800 Subject: drm/amdgpu: add ras POSION_CREATION event id support add amdgpu ras POSION_CREATION event id support. Signed-off-by: Yang Wang Reviewed-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 17 ++++++++++++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 1 + 2 files changed, 15 insertions(+), 3 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 36917e4e3b19..ff90b8e4bc29 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -2116,8 +2116,17 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager * static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj, struct amdgpu_iv_entry *entry) { - dev_info(obj->adev->dev, - "Poison is created\n"); + struct amdgpu_device *adev = obj->adev; + enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION; + u64 event_id; + int ret; + + ret = amdgpu_ras_mark_ras_event(adev, type); + if (ret) + return; + + event_id = amdgpu_ras_acquire_event_id(adev, type); + RAS_EVENT_LOG(adev, event_id, "Poison is created\n"); if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) { struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev); @@ -2889,6 +2898,7 @@ static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev, uint32_t new_detect_count, total_detect_count; uint32_t need_query_count = poison_creation_count; bool query_data_timeout = false; + enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION; memset(&info, 0, sizeof(info)); info.head.block = AMDGPU_RAS_BLOCK__UMC; @@ -2896,7 +2906,7 @@ static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev, ecc_log = &ras->umc_ecc_log; total_detect_count = 0; do { - ret = amdgpu_ras_query_error_status(adev, &info); + ret = amdgpu_ras_query_error_status_with_event(adev, &info, type); if (ret) return ret; @@ -3975,6 +3985,7 @@ u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type switch (type) { case RAS_EVENT_TYPE_FATAL: + case RAS_EVENT_TYPE_POISON_CREATION: event_mgr = __get_ras_event_mgr(adev); if (!event_mgr) return RAS_EVENT_INVALID_ID; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 7c20def1c4f0..cc7a9be4fc1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -435,6 +435,7 @@ struct umc_ecc_info { enum ras_event_type { RAS_EVENT_TYPE_INVALID = 0, RAS_EVENT_TYPE_FATAL, + RAS_EVENT_TYPE_POISON_CREATION, RAS_EVENT_TYPE_COUNT, }; -- cgit v1.2.3 From 91ba536ead3ad86952979b1944cf3b612a7f944a Mon Sep 17 00:00:00 2001 From: "Stanley.Yang" Date: Wed, 3 Jul 2024 15:34:35 +0800 Subject: drm/amdkfd: Use mode1 reset for GFX v9.4.4 GFX v9.4.4 uses mode1 reset to handle poison consumption. Signed-off-by: Stanley.Yang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 816800555f7f..da95b8ba87e4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -164,7 +164,8 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, case SOC15_IH_CLIENTID_SE3SH: case SOC15_IH_CLIENTID_UTCL2: block = AMDGPU_RAS_BLOCK__GFX; - if (amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) + if (amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || + amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET; else reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; @@ -180,7 +181,8 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, case SOC15_IH_CLIENTID_SDMA3: case SOC15_IH_CLIENTID_SDMA4: block = AMDGPU_RAS_BLOCK__SDMA; - if (amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) + if (amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || + amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET; else reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; -- cgit v1.2.3 From 12b435a40cb5b05378ca244a9d524b125b0c1f6d Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Fri, 28 Jun 2024 16:24:39 +0800 Subject: drm/amdgpu: add ras POSION_CONSUMPTION event id support add amdgpu ras POSION_CONSUMPTION event id support. Signed-off-by: Yang Wang Reviewed-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 16 +++++++++++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 1 + drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 15 ++++++++++++--- 3 files changed, 26 insertions(+), 6 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index ff90b8e4bc29..04278f13fd4b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -2076,10 +2076,17 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager * struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, obj->head.block, 0); struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + enum ras_event_type type = RAS_EVENT_TYPE_POISON_CONSUMPTION; + u64 event_id; + int ret; if (!block_obj || !con) return; + ret = amdgpu_ras_mark_ras_event(adev, type); + if (ret) + return; + /* both query_poison_status and handle_poison_consumption are optional, * but at least one of them should be implemented if we need poison * consumption handler @@ -2104,8 +2111,10 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager * * For RMA case, amdgpu_umc_poison_handler will handle gpu reset. */ if (poison_stat && !con->is_rma) { - dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n", - block_obj->ras_comm.name); + event_id = amdgpu_ras_acquire_event_id(adev, type); + RAS_EVENT_LOG(adev, event_id, + "GPU reset for %s RAS poison consumption is issued!\n", + block_obj->ras_comm.name); amdgpu_ras_reset_gpu(adev); } @@ -2498,7 +2507,7 @@ static enum ras_event_type amdgpu_ras_get_fatal_error_event(struct amdgpu_device if (amdgpu_ras_intr_triggered()) return RAS_EVENT_TYPE_FATAL; else - return RAS_EVENT_TYPE_INVALID; + return RAS_EVENT_TYPE_POISON_CONSUMPTION; } static void amdgpu_ras_do_recovery(struct work_struct *work) @@ -3986,6 +3995,7 @@ u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type switch (type) { case RAS_EVENT_TYPE_FATAL: case RAS_EVENT_TYPE_POISON_CREATION: + case RAS_EVENT_TYPE_POISON_CONSUMPTION: event_mgr = __get_ras_event_mgr(adev); if (!event_mgr) return RAS_EVENT_INVALID_ID; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index cc7a9be4fc1a..925b4df3109a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -436,6 +436,7 @@ enum ras_event_type { RAS_EVENT_TYPE_INVALID = 0, RAS_EVENT_TYPE_FATAL, RAS_EVENT_TYPE_POISON_CREATION, + RAS_EVENT_TYPE_POISON_CONSUMPTION, RAS_EVENT_TYPE_COUNT, }; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index da95b8ba87e4..a9c3580be8c9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -27,6 +27,7 @@ #include "soc15_int.h" #include "kfd_device_queue_manager.h" #include "kfd_smi_events.h" +#include "amdgpu_ras.h" /* * GFX9 SQ Interrupts @@ -144,9 +145,11 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, uint16_t pasid, uint16_t client_id) { enum amdgpu_ras_block block = 0; - int old_poison; uint32_t reset = 0; struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); + enum ras_event_type type = RAS_EVENT_TYPE_POISON_CONSUMPTION; + u64 event_id; + int old_poison, ret; if (!p) return; @@ -193,10 +196,16 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, return; } + ret = amdgpu_ras_mark_ras_event(dev->adev, type); + if (ret) + return; + kfd_signal_poison_consumed_event(dev, pasid); - dev_warn(dev->adev->dev, - "poison is consumed by client %d, kick off gpu reset flow\n", client_id); + event_id = amdgpu_ras_acquire_event_id(dev->adev, type); + + RAS_EVENT_LOG(dev->adev, event_id, + "poison is consumed by client %d, kick off gpu reset flow\n", client_id); amdgpu_amdkfd_ras_pasid_poison_consumption_handler(dev->adev, block, pasid, NULL, NULL, reset); -- cgit v1.2.3 From 1dd34092c1f1fa1e9e3b0a294f7ea0658e676ae0 Mon Sep 17 00:00:00 2001 From: Li Ma Date: Mon, 1 Jul 2024 15:56:12 +0800 Subject: drm/amd/swsmu: enable more Pstates profile levels for SMU v14.0.0 and v14.0.1 V1: This patch enables following UMD stable Pstates profile levels for power_dpm_force_performance_level interface. - profile_peak - profile_min_mclk - profile_min_sclk - profile_standard V2: Fix conflict with commit "drm/amd/pm: smu v14.0.4 reuse smu v14.0.0 dpmtable " V3: Add VCLK1 and DCLK1 support for SMU V14.0.1 And avoid to set VCLK1 and DCLK1 for SMU v14.0.0 Signed-off-by: Li Ma Reviewed-by: Tim Huang Signed-off-by: Alex Deucher --- .../gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c | 152 +++++++++++++++++++-- 1 file changed, 142 insertions(+), 10 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index 3a9d58c036ea..5d47d58944f6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -65,6 +65,10 @@ #define SMU_MALL_PG_CONFIG_DEFAULT SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON +#define SMU_14_0_0_UMD_PSTATE_GFXCLK 700 +#define SMU_14_0_0_UMD_PSTATE_SOCCLK 678 +#define SMU_14_0_0_UMD_PSTATE_FCLK 1800 + #define FEATURE_MASK(feature) (1ULL << feature) #define SMC_DPM_FEATURE ( \ FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ @@ -725,7 +729,7 @@ static int smu_v14_0_common_get_dpm_freq_by_index(struct smu_context *smu, { if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) smu_v14_0_1_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq); - else + else if (clk_type != SMU_VCLK1 && clk_type != SMU_DCLK1) smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq); return 0; @@ -818,9 +822,11 @@ static int smu_v14_0_1_get_dpm_ultimate_freq(struct smu_context *smu, break; case SMU_MCLK: case SMU_UCLK: - case SMU_FCLK: max_dpm_level = 0; break; + case SMU_FCLK: + max_dpm_level = clk_table->NumFclkLevelsEnabled - 1; + break; case SMU_SOCCLK: max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1; break; @@ -855,7 +861,7 @@ static int smu_v14_0_1_get_dpm_ultimate_freq(struct smu_context *smu, min_dpm_level = clk_table->NumMemPstatesEnabled - 1; break; case SMU_FCLK: - min_dpm_level = clk_table->NumFclkLevelsEnabled - 1; + min_dpm_level = 0; break; case SMU_SOCCLK: min_dpm_level = 0; @@ -936,9 +942,11 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu, break; case SMU_MCLK: case SMU_UCLK: - case SMU_FCLK: max_dpm_level = 0; break; + case SMU_FCLK: + max_dpm_level = clk_table->NumFclkLevelsEnabled - 1; + break; case SMU_SOCCLK: max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1; break; @@ -969,7 +977,7 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu, min_dpm_level = clk_table->NumMemPstatesEnabled - 1; break; case SMU_FCLK: - min_dpm_level = clk_table->NumFclkLevelsEnabled - 1; + min_dpm_level = 0; break; case SMU_SOCCLK: min_dpm_level = 0; @@ -1001,7 +1009,7 @@ static int smu_v14_0_common_get_dpm_ultimate_freq(struct smu_context *smu, { if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) smu_v14_0_1_get_dpm_ultimate_freq(smu, clk_type, min, max); - else + else if (clk_type != SMU_VCLK1 && clk_type != SMU_DCLK1) smu_v14_0_0_get_dpm_ultimate_freq(smu, clk_type, min, max); return 0; @@ -1020,9 +1028,15 @@ static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu, case SMU_VCLK: member_type = METRICS_AVERAGE_VCLK; break; + case SMU_VCLK1: + member_type = METRICS_AVERAGE_VCLK1; + break; case SMU_DCLK: member_type = METRICS_AVERAGE_DCLK; break; + case SMU_DCLK1: + member_type = METRICS_AVERAGE_DCLK1; + break; case SMU_MCLK: member_type = METRICS_AVERAGE_UCLK; break; @@ -1106,7 +1120,7 @@ static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu, { if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) smu_v14_0_1_get_dpm_level_count(smu, clk_type, count); - else + else if (clk_type != SMU_VCLK1 && clk_type != SMU_DCLK1) smu_v14_0_0_get_dpm_level_count(smu, clk_type, count); return 0; @@ -1250,6 +1264,8 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu, case SMU_FCLK: case SMU_VCLK: case SMU_DCLK: + case SMU_VCLK1: + case SMU_DCLK1: ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); if (ret) break; @@ -1268,13 +1284,67 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu, return ret; } -static int smu_v14_0_0_set_performance_level(struct smu_context *smu, +static int smu_v14_0_common_get_dpm_profile_freq(struct smu_context *smu, + enum amd_dpm_forced_level level, + enum smu_clk_type clk_type, + uint32_t *min_clk, + uint32_t *max_clk) +{ + uint32_t clk_limit = 0; + int ret = 0; + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + clk_limit = SMU_14_0_0_UMD_PSTATE_GFXCLK; + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &clk_limit); + else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &clk_limit, NULL); + break; + case SMU_SOCCLK: + clk_limit = SMU_14_0_0_UMD_PSTATE_SOCCLK; + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &clk_limit); + break; + case SMU_FCLK: + clk_limit = SMU_14_0_0_UMD_PSTATE_FCLK; + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &clk_limit); + else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &clk_limit, NULL); + break; + case SMU_VCLK: + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &clk_limit); + break; + case SMU_VCLK1: + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK1, NULL, &clk_limit); + break; + case SMU_DCLK: + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &clk_limit); + break; + case SMU_DCLK1: + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK1, NULL, &clk_limit); + break; + default: + ret = -EINVAL; + break; + } + *min_clk = *max_clk = clk_limit; + return ret; +} + +static int smu_v14_0_common_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) { struct amdgpu_device *adev = smu->adev; uint32_t sclk_min = 0, sclk_max = 0; uint32_t fclk_min = 0, fclk_max = 0; uint32_t socclk_min = 0, socclk_max = 0; + uint32_t vclk_min = 0, vclk_max = 0; + uint32_t dclk_min = 0, dclk_max = 0; + uint32_t vclk1_min = 0, vclk1_max = 0; + uint32_t dclk1_min = 0, dclk1_max = 0; int ret = 0; switch (level) { @@ -1282,28 +1352,54 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu, smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max); smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max); smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK1, NULL, &vclk1_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK1, NULL, &dclk1_max); sclk_min = sclk_max; fclk_min = fclk_max; socclk_min = socclk_max; + vclk_min = vclk_max; + dclk_min = dclk_max; + vclk1_min = vclk1_max; + dclk1_min = dclk1_max; break; case AMD_DPM_FORCED_LEVEL_LOW: smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL); smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL); smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, NULL); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, NULL); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK1, &vclk1_min, NULL); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK1, &dclk1_min, NULL); sclk_max = sclk_min; fclk_max = fclk_min; socclk_max = socclk_min; + vclk_max = vclk_min; + dclk_max = dclk_min; + vclk1_max = vclk1_min; + dclk1_max = dclk1_min; break; case AMD_DPM_FORCED_LEVEL_AUTO: smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max); smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max); smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, &vclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, &dclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK1, &vclk1_min, &vclk1_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK1, &dclk1_min, &dclk1_max); break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: - /* Temporarily do nothing since the optimal clocks haven't been provided yet */ + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_SCLK, &sclk_min, &sclk_max); + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_FCLK, &fclk_min, &fclk_max); + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_SOCCLK, &socclk_min, &socclk_max); + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_VCLK, &vclk_min, &vclk_max); + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_DCLK, &dclk_min, &dclk_max); + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_VCLK1, &vclk1_min, &vclk1_max); + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_DCLK1, &dclk1_min, &dclk1_max); break; case AMD_DPM_FORCED_LEVEL_MANUAL: case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: @@ -1343,6 +1439,42 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu, return ret; } + if (vclk_min && vclk_max) { + ret = smu_v14_0_0_set_soft_freq_limited_range(smu, + SMU_VCLK, + vclk_min, + vclk_max); + if (ret) + return ret; + } + + if (vclk1_min && vclk1_max) { + ret = smu_v14_0_0_set_soft_freq_limited_range(smu, + SMU_VCLK1, + vclk1_min, + vclk1_max); + if (ret) + return ret; + } + + if (dclk_min && dclk_max) { + ret = smu_v14_0_0_set_soft_freq_limited_range(smu, + SMU_DCLK, + dclk_min, + dclk_max); + if (ret) + return ret; + } + + if (dclk1_min && dclk1_max) { + ret = smu_v14_0_0_set_soft_freq_limited_range(smu, + SMU_DCLK1, + dclk1_min, + dclk1_max); + if (ret) + return ret; + } + return ret; } @@ -1520,7 +1652,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .od_edit_dpm_table = smu_v14_0_od_edit_dpm_table, .print_clk_levels = smu_v14_0_0_print_clk_levels, .force_clk_levels = smu_v14_0_0_force_clk_levels, - .set_performance_level = smu_v14_0_0_set_performance_level, + .set_performance_level = smu_v14_0_common_set_performance_level, .set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters, .set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu, .dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable, -- cgit v1.2.3 From 59f488be7631513acc9a266e9d006358545b7074 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Wed, 3 Jul 2024 10:23:13 +0800 Subject: drm/amdgpu: add ras event state device attribute support add amdgpu ras 'event_state' sysfs device attribute support Signed-off-by: Yang Wang Reviewed-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 56 ++++++++++++++++++++++++++++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 8 ++++- 2 files changed, 59 insertions(+), 5 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 04278f13fd4b..1ba18655176d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1731,6 +1731,39 @@ static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev, return sysfs_emit(buf, "schema: 0x%x\n", con->schema); } +static struct { + enum ras_event_type type; + const char *name; +} dump_event[] = { + {RAS_EVENT_TYPE_FATAL, "Fatal Error"}, + {RAS_EVENT_TYPE_POISON_CREATION, "Poison Creation"}, + {RAS_EVENT_TYPE_POISON_CONSUMPTION, "Poison Consumption"}, +}; + +static ssize_t amdgpu_ras_sysfs_event_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct amdgpu_ras *con = + container_of(attr, struct amdgpu_ras, event_state_attr); + struct ras_event_manager *event_mgr = con->event_mgr; + struct ras_event_state *event_state; + int i, size = 0; + + if (!event_mgr) + return -EINVAL; + + size += sysfs_emit_at(buf, size, "current seqno: %llu\n", atomic64_read(&event_mgr->seqno)); + for (i = 0; i < ARRAY_SIZE(dump_event); i++) { + event_state = &event_mgr->event_state[dump_event[i].type]; + size += sysfs_emit_at(buf, size, "%s: count:%llu, last_seqno:%llu\n", + dump_event[i].name, + atomic64_read(&event_state->count), + event_state->last_seqno); + } + + return (ssize_t)size; +} + static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); @@ -1748,6 +1781,7 @@ static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev) &con->features_attr.attr, &con->version_attr.attr, &con->schema_attr.attr, + &con->event_state_attr.attr, NULL }; struct attribute_group group = { @@ -1980,6 +2014,8 @@ static DEVICE_ATTR(version, 0444, amdgpu_ras_sysfs_version_show, NULL); static DEVICE_ATTR(schema, 0444, amdgpu_ras_sysfs_schema_show, NULL); +static DEVICE_ATTR(event_state, 0444, + amdgpu_ras_sysfs_event_state_show, NULL); static int amdgpu_ras_fs_init(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); @@ -1990,6 +2026,7 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev) &con->features_attr.attr, &con->version_attr.attr, &con->schema_attr.attr, + &con->event_state_attr.attr, NULL }; struct bin_attribute *bin_attrs[] = { @@ -2012,6 +2049,10 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev) con->schema_attr = dev_attr_schema; sysfs_attr_init(attrs[2]); + /* add event_state entry */ + con->event_state_attr = dev_attr_event_state; + sysfs_attr_init(attrs[3]); + if (amdgpu_bad_page_threshold != 0) { /* add bad_page_features entry */ bin_attr_gpu_vram_bad_pages.private = NULL; @@ -3440,13 +3481,17 @@ static int amdgpu_get_ras_schema(struct amdgpu_device *adev) static void ras_event_mgr_init(struct ras_event_manager *mgr) { + struct ras_event_state *event_state; int i; memset(mgr, 0, sizeof(*mgr)); atomic64_set(&mgr->seqno, 0); - for (i = 0; i < ARRAY_SIZE(mgr->last_seqno); i++) - mgr->last_seqno[i] = RAS_EVENT_INVALID_ID; + for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) { + event_state = &mgr->event_state[i]; + event_state->last_seqno = RAS_EVENT_INVALID_ID; + atomic64_set(&event_state->count, 0); + } } static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev) @@ -3961,6 +4006,7 @@ int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_ const void *caller) { struct ras_event_manager *event_mgr; + struct ras_event_state *event_state; int ret = 0; if (type >= RAS_EVENT_TYPE_COUNT) { @@ -3974,7 +4020,9 @@ int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_ goto out; } - event_mgr->last_seqno[type] = atomic64_inc_return(&event_mgr->seqno); + event_state = &event_mgr->event_state[type]; + event_state->last_seqno = atomic64_inc_return(&event_mgr->seqno); + atomic64_inc(&event_state->count); out: if (ret && caller) @@ -4000,7 +4048,7 @@ u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type if (!event_mgr) return RAS_EVENT_INVALID_ID; - id = event_mgr->last_seqno[type]; + id = event_mgr->event_state[type].last_seqno; break; case RAS_EVENT_TYPE_INVALID: default: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 925b4df3109a..dcf1f3dbb5c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -440,9 +440,14 @@ enum ras_event_type { RAS_EVENT_TYPE_COUNT, }; +struct ras_event_state { + u64 last_seqno; + atomic64_t count; +}; + struct ras_event_manager { atomic64_t seqno; - u64 last_seqno[RAS_EVENT_TYPE_COUNT]; + struct ras_event_state event_state[RAS_EVENT_TYPE_COUNT]; }; struct ras_event_id { @@ -496,6 +501,7 @@ struct amdgpu_ras { struct device_attribute features_attr; struct device_attribute version_attr; struct device_attribute schema_attr; + struct device_attribute event_state_attr; struct bin_attribute badpages_attr; struct dentry *de_ras_eeprom_table; /* block array */ -- cgit v1.2.3 From ee98fb71baa8728156b4d75740f82cb0cfe6d923 Mon Sep 17 00:00:00 2001 From: Zhigang Luo Date: Tue, 25 Jun 2024 13:53:56 -0400 Subject: drm/amdgpu: set CP_HQD_PQ_DOORBELL_CONTROL.DOORBELL_MODE to 1 to avoid reading wrong WPTR from doorbell in sriov vf, set CP_HQD_PQ_DOORBELL_CONTROL.DOORBELL_MODE to 1 to read WPTR from MQD. Signed-off-by: Zhigang Luo Acked-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 3 +++ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 3 +++ 2 files changed, 6 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 1149595a02d8..c908e585b9ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1613,6 +1613,9 @@ static int gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring *ring, int xcc_id) DOORBELL_SOURCE, 0); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_HIT, 0); + if (amdgpu_sriov_vf(adev)) + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_MODE, 1); } else { tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 0); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 399fa2106631..66c73825c0a0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -546,6 +546,9 @@ static void init_mqd_hiq_v9_4_3(struct mqd_manager *mm, void **mqd, m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK | 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT | 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT; + if (amdgpu_sriov_vf(mm->dev->adev)) + m->cp_hqd_pq_doorbell_control |= 1 << + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT; m->cp_mqd_stride_size = kfd_hiq_mqd_stride(mm->dev); if (xcc == 0) { /* Set no_update_rptr = 0 in Master XCC */ -- cgit v1.2.3 From 5d64af40e3a99c3bbe7a66c3cfe23295f8e97130 Mon Sep 17 00:00:00 2001 From: Gavin Wan Date: Mon, 8 Jul 2024 17:07:04 +0000 Subject: drm/amd/amdgpu: fix SDMA IRQ client ID <-> req mapping. sdma has 2 instances in SRIOV cpx mode. Odd numbered VFs have sdma0/sdma1 instances. Even numbered vfs have sdma2/sdma3. For Even numbered vfs, the sdma2 & sdma3 (irq srouce id CLIENTID_SDMA2 and CLIENTID_SDMA3) should map to irq seq 0 & 1. Signed-off-by: Gavin Wan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 01644a869738..2c55bfd935bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -83,7 +83,7 @@ static unsigned sdma_v4_4_2_seq_to_irq_id(int seq_num) } } -static int sdma_v4_4_2_irq_id_to_seq(unsigned client_id) +static int sdma_v4_4_2_irq_id_to_seq(struct amdgpu_device *adev, unsigned client_id) { switch (client_id) { case SOC15_IH_CLIENTID_SDMA0: @@ -91,9 +91,15 @@ static int sdma_v4_4_2_irq_id_to_seq(unsigned client_id) case SOC15_IH_CLIENTID_SDMA1: return 1; case SOC15_IH_CLIENTID_SDMA2: - return 2; + if (amdgpu_sriov_vf(adev) && (adev->gfx.xcc_mask == 0x1)) + return 0; + else + return 2; case SOC15_IH_CLIENTID_SDMA3: - return 3; + if (amdgpu_sriov_vf(adev) && (adev->gfx.xcc_mask == 0x1)) + return 1; + else + return 3; default: return -EINVAL; } @@ -1524,7 +1530,7 @@ static int sdma_v4_4_2_process_trap_irq(struct amdgpu_device *adev, uint32_t instance, i; DRM_DEBUG("IH: SDMA trap\n"); - instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id); + instance = sdma_v4_4_2_irq_id_to_seq(adev, entry->client_id); /* Client id gives the SDMA instance in AID. To know the exact SDMA * instance, interrupt entry gives the node id which corresponds to the AID instance. @@ -1567,7 +1573,7 @@ static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev, if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) goto out; - instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id); + instance = sdma_v4_4_2_irq_id_to_seq(adev, entry->client_id); if (instance < 0) goto out; @@ -1586,7 +1592,7 @@ static int sdma_v4_4_2_process_illegal_inst_irq(struct amdgpu_device *adev, DRM_ERROR("Illegal instruction in SDMA command stream\n"); - instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id); + instance = sdma_v4_4_2_irq_id_to_seq(adev, entry->client_id); if (instance < 0) return 0; @@ -1620,7 +1626,7 @@ static int sdma_v4_4_2_print_iv_entry(struct amdgpu_device *adev, struct amdgpu_task_info *task_info; u64 addr; - instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id); + instance = sdma_v4_4_2_irq_id_to_seq(adev, entry->client_id); if (instance < 0 || instance >= adev->sdma.num_instances) { dev_err(adev->dev, "sdma instance invalid %d\n", instance); return -EINVAL; -- cgit v1.2.3 From d02ddefc7eedaa6394279bad522c70fd5d63e163 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 3 Jul 2024 11:52:47 +0530 Subject: drm/amdgpu: Initialize VF partition mode For SOCs with GFX v9.4.3, a VF may have multiple compute partitions. Fetch the partition information during init and initialize partition nodes. There is no support to switch partition mode in VF mode, hence disable the same. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 12 +++++-- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c | 50 ++++++++++++++++++++++++++++-- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 37 +++++++++++++++++----- 4 files changed, 88 insertions(+), 12 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 6b0416777c5b..ddda94e49db4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -297,6 +297,7 @@ struct amdgpu_gfx_funcs { int (*switch_partition_mode)(struct amdgpu_device *adev, int num_xccs_per_xcp); int (*ih_node_to_logical_xcc)(struct amdgpu_device *adev, int ih_node); + int (*get_xccs_per_xcp)(struct amdgpu_device *adev); }; struct sq_work { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index 2b99eed5ba19..a6d456ec6aeb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -219,7 +219,8 @@ int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) { int mode; - if (xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) + if (!amdgpu_sriov_vf(xcp_mgr->adev) && + xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) return xcp_mgr->mode; if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode) @@ -228,6 +229,12 @@ int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) if (!(flags & AMDGPU_XCP_FL_LOCKED)) mutex_lock(&xcp_mgr->xcp_lock); mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr); + + /* First time query for VF, set the mode here */ + if (amdgpu_sriov_vf(xcp_mgr->adev) && + xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) + xcp_mgr->mode = mode; + if (xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS && mode != xcp_mgr->mode) dev_WARN( xcp_mgr->adev->dev, @@ -282,8 +289,7 @@ int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode, { struct amdgpu_xcp_mgr *xcp_mgr; - if (!xcp_funcs || !xcp_funcs->switch_partition_mode || - !xcp_funcs->get_ip_details) + if (!xcp_funcs || !xcp_funcs->get_ip_details) return -EINVAL; xcp_mgr = kzalloc(sizeof(*xcp_mgr), GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index 2c9a0aa41e2d..228fd4dd32f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -304,13 +304,56 @@ u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id) return ext_offset; } +static enum amdgpu_gfx_partition +__aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr *xcp_mgr) +{ + struct amdgpu_device *adev = xcp_mgr->adev; + int num_xcc, num_xcc_per_xcp = 0, mode = 0; + + num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask); + if (adev->gfx.funcs->get_xccs_per_xcp) + num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev); + if ((num_xcc_per_xcp) && (num_xcc % num_xcc_per_xcp == 0)) + mode = num_xcc / num_xcc_per_xcp; + + if (num_xcc_per_xcp == 1) + return AMDGPU_CPX_PARTITION_MODE; + + switch (mode) { + case 1: + return AMDGPU_SPX_PARTITION_MODE; + case 2: + return AMDGPU_DPX_PARTITION_MODE; + case 3: + return AMDGPU_TPX_PARTITION_MODE; + case 4: + return AMDGPU_QPX_PARTITION_MODE; + default: + return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; + } + + return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; +} + static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) { - enum amdgpu_gfx_partition mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; + enum amdgpu_gfx_partition derv_mode, + mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; struct amdgpu_device *adev = xcp_mgr->adev; - if (adev->nbio.funcs->get_compute_partition_mode) + derv_mode = __aqua_vanjaram_calc_xcp_mode(xcp_mgr); + + if (amdgpu_sriov_vf(adev)) + return derv_mode; + + if (adev->nbio.funcs->get_compute_partition_mode) { mode = adev->nbio.funcs->get_compute_partition_mode(adev); + if (mode != derv_mode) + dev_warn( + adev->dev, + "Mismatch in compute partition mode - reported : %d derived : %d", + mode, derv_mode); + } return mode; } @@ -624,6 +667,9 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) { int ret; + if (amdgpu_sriov_vf(adev)) + aqua_vanjaram_xcp_funcs.switch_partition_mode = NULL; + ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1, &aqua_vanjaram_xcp_funcs); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index c908e585b9ec..20ea6cb01edf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -652,6 +652,15 @@ static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id)); } +static int gfx_v9_4_3_get_xccs_per_xcp(struct amdgpu_device *adev) +{ + u32 xcp_ctl; + + /* Value is expected to be the same on all, fetch from first instance */ + xcp_ctl = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HYP_XCP_CTL); + + return REG_GET_FIELD(xcp_ctl, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP); +} static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, int num_xccs_per_xcp) @@ -706,6 +715,7 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q, .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition, .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst, + .get_xccs_per_xcp = &gfx_v9_4_3_get_xccs_per_xcp, }; static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle, @@ -2050,18 +2060,31 @@ static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id) static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) { - int r = 0, i, num_xcc; + int r = 0, i, num_xcc, num_xcp, num_xcc_per_xcp; + + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + if (amdgpu_sriov_vf(adev)) { + enum amdgpu_gfx_partition mode; - if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr, - AMDGPU_XCP_FL_NONE) == - AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) - r = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, - amdgpu_user_partt_mode); + mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr, + AMDGPU_XCP_FL_NONE); + if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) + return -EINVAL; + num_xcc_per_xcp = gfx_v9_4_3_get_xccs_per_xcp(adev); + adev->gfx.num_xcc_per_xcp = num_xcc_per_xcp; + num_xcp = num_xcc / num_xcc_per_xcp; + r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode); + } else { + if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr, + AMDGPU_XCP_FL_NONE) == + AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) + r = amdgpu_xcp_switch_partition_mode( + adev->xcp_mgr, amdgpu_user_partt_mode); + } if (r) return r; - num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { r = gfx_v9_4_3_xcc_cp_resume(adev, i); if (r) -- cgit v1.2.3 From 708f220567d4a398ee1553ee6519f7e73ae5ed6c Mon Sep 17 00:00:00 2001 From: Danijel Slivka Date: Fri, 5 Jul 2024 14:15:32 +0200 Subject: drm/amd/pm: Ignore initial value in smu response register Why: If the reg mmMP1_SMN_C2PMSG_90 is being written to during amdgpu driver load or driver unload, subsequent amdgpu driver load will fail at smu_hw_init. The default of mmMP1_SMN_C2PMSG_90 register at a clean environment is 0x1 and if value differs from expected, amdgpu driver load will fail. How to fix: Ignore the initial value in smu response register before the first smu message is sent,if smc in SMU_FW_INIT state, just proceed further to send the message. If register holds an unexpected value after smu message was sent set, smc_state to SMU_FW_HANG state and no further smu messages will be sent. v2: Set SMU_FW_INIT state at the start of smu hw_init/resume. Check smc_fw_state before sending smu message if in hang state skip sending message. Set SMU_FW_HANG only in case unexpected value is detected Signed-off-by: Danijel Slivka Reviewed-by: Kenneth Feng Reviewed-by: Lijo Lazar Reviewed-by: Asad Kamal Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 2 ++ drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 7 ++++++ drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c | 36 +++++++++++++++++++++++---- 3 files changed, 40 insertions(+), 5 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index d79bdb1e8cdf..fb8643d25d1b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1755,6 +1755,8 @@ static int smu_start_smc_engine(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; int ret = 0; + smu->smc_fw_state = SMU_FW_INIT; + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) { if (smu->ppt_funcs->load_microcode) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index a34c802f52be..b44a185d07e8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -495,6 +495,12 @@ struct stb_context { spinlock_t lock; }; +enum smu_fw_status { + SMU_FW_INIT = 0, + SMU_FW_RUNTIME, + SMU_FW_HANG, +}; + #define WORKLOAD_POLICY_MAX 7 /* @@ -562,6 +568,7 @@ struct smu_context { uint32_t smc_fw_if_version; uint32_t smc_fw_version; uint32_t smc_fw_caps; + uint8_t smc_fw_state; bool uploading_custom_pp_table; bool dc_controlled_by_gpio; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 5592fd825aa3..88eefef05fae 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -315,11 +315,21 @@ int smu_cmn_send_msg_without_waiting(struct smu_context *smu, if (adev->no_hw_access) return 0; - reg = __smu_cmn_poll_stat(smu); - res = __smu_cmn_reg2errno(smu, reg); - if (reg == SMU_RESP_NONE || - res == -EREMOTEIO) + if (smu->smc_fw_state == SMU_FW_HANG) { + dev_err(adev->dev, "SMU is in hanged state, failed to send smu message!\n"); + res = -EREMOTEIO; goto Out; + } + + if (smu->smc_fw_state == SMU_FW_INIT) { + smu->smc_fw_state = SMU_FW_RUNTIME; + } else { + reg = __smu_cmn_poll_stat(smu); + res = __smu_cmn_reg2errno(smu, reg); + if (reg == SMU_RESP_NONE || res == -EREMOTEIO) + goto Out; + } + __smu_cmn_send_msg(smu, msg_index, param); res = 0; Out: @@ -350,6 +360,9 @@ int smu_cmn_wait_for_response(struct smu_context *smu) reg = __smu_cmn_poll_stat(smu); res = __smu_cmn_reg2errno(smu, reg); + if (res == -EREMOTEIO) + smu->smc_fw_state = SMU_FW_HANG; + if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res && (res != -ETIME)) { amdgpu_device_halt(smu->adev); @@ -418,6 +431,16 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, goto Out; } + if (smu->smc_fw_state == SMU_FW_HANG) { + dev_err(adev->dev, "SMU is in hanged state, failed to send smu message!\n"); + res = -EREMOTEIO; + goto Out; + } else if (smu->smc_fw_state == SMU_FW_INIT) { + /* Ignore initial smu response register value */ + poll = false; + smu->smc_fw_state = SMU_FW_RUNTIME; + } + if (poll) { reg = __smu_cmn_poll_stat(smu); res = __smu_cmn_reg2errno(smu, reg); @@ -429,8 +452,11 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, __smu_cmn_send_msg(smu, (uint16_t) index, param); reg = __smu_cmn_poll_stat(smu); res = __smu_cmn_reg2errno(smu, reg); - if (res != 0) + if (res != 0) { + if (res == -EREMOTEIO) + smu->smc_fw_state = SMU_FW_HANG; __smu_cmn_reg_print_error(smu, reg, index, param, msg); + } if (read_arg) { smu_cmn_read_arg(smu, read_arg); dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x,\ -- cgit v1.2.3 From 948f2828a676d323c18dfa16e6a91c8103a97e4d Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Tue, 9 Jul 2024 10:59:36 +0530 Subject: drm/amdgpu: select compute ME engines dynamically GFX ME right now is one but this could change in future SOC's. Use no of ME for GFX as start point for ME for compute for GFX10. Signed-off-by: Sunil Khatri Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 4bc2abe97087..2957702fca0c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -9332,7 +9332,7 @@ static void gfx_v10_ip_dump(void *handle) for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { /* ME0 is for GFX so start from 1 for CP */ - nv_grbm_select(adev, 1 + i, j, k, 0); + nv_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0); for (reg = 0; reg < reg_count; reg++) { adev->gfx.ip_dump_compute_queues[index + reg] = -- cgit v1.2.3 From 7d570f56f1e1005cf5bb34ceec608432d2acb157 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 8 Jul 2024 15:02:40 -0400 Subject: drm/amdgpu/job: Replace DRM_INFO/ERROR logging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the dev_info/err variants so we get per device logging in multi-GPU cases. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index cf0c4470ab9c..e238f2832f65 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -41,7 +41,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) int r; if (!drm_dev_enter(adev_to_drm(adev), &idx)) { - DRM_INFO("%s - device unplugged skipping recovery on scheduler:%s", + dev_info(adev->dev, "%s - device unplugged skipping recovery on scheduler:%s", __func__, s_job->sched->name); /* Effectively the job is aborted as the device is gone */ @@ -53,19 +53,20 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) if (amdgpu_gpu_recovery && amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { - DRM_ERROR("ring %s timeout, but soft recovered\n", - s_job->sched->name); + dev_err(adev->dev, "ring %s timeout, but soft recovered\n", + s_job->sched->name); goto exit; } - DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n", - job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), - ring->fence_drv.sync_seq); + dev_err(adev->dev, "ring %s timeout, signaled seq=%u, emitted seq=%u\n", + job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), + ring->fence_drv.sync_seq); ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid); if (ti) { - DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n", - ti->process_name, ti->tgid, ti->task_name, ti->pid); + dev_err(adev->dev, + "Process information: process %s pid %d thread %s pid %d\n", + ti->process_name, ti->tgid, ti->task_name, ti->pid); amdgpu_vm_put_task_info(ti); } @@ -82,7 +83,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context); if (r) - DRM_ERROR("GPU Recovery Failed: %d\n", r); + dev_err(adev->dev, "GPU Recovery Failed: %d\n", r); } else { drm_sched_suspend_timeout(&ring->sched); if (amdgpu_sriov_vf(adev)) @@ -274,7 +275,7 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job, while (!fence && job->vm && !job->vmid) { r = amdgpu_vmid_grab(job->vm, ring, job, &fence); if (r) { - DRM_ERROR("Error getting VM ID (%d)\n", r); + dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r); goto error; } } -- cgit v1.2.3 From a85cc86cce4183962c9ab80bf9c9c666aae174f8 Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Tue, 9 Jul 2024 11:24:39 +0530 Subject: drm/amdgpu: select compute ME engines dynamically GFX ME right now is one but this could change in future SOC's. Use no of ME for GFX as start point for ME for compute for GFX11. Signed-off-by: Sunil Khatri Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 4f57cf3dac48..dcef39907449 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -6456,7 +6456,7 @@ static void gfx_v11_ip_dump(void *handle) for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { /* ME0 is for GFX so start from 1 for CP */ - soc21_grbm_select(adev, 1+i, j, k, 0); + soc21_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0); for (reg = 0; reg < reg_count; reg++) { adev->gfx.ip_dump_compute_queues[index + reg] = RREG32(SOC15_REG_ENTRY_OFFSET( -- cgit v1.2.3 From 21e6f6085bbc979b5cc3f97857e66387ec550c48 Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Wed, 3 Jul 2024 16:34:58 +0000 Subject: drm/amd/display: Allow display DCC for DCN401 To enable mesa to use display dcc, DM should expose them in the supported modifiers. Add the best (most efficient) modifiers first. Signed-off-by: Aurabindo Pillai Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c | 31 +++++++++++++++++----- 1 file changed, 25 insertions(+), 6 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index 0320200dae94..a83bd0331c3b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -689,13 +689,32 @@ static void amdgpu_dm_plane_add_gfx12_modifiers(struct amdgpu_device *adev, uint64_t **mods, uint64_t *size, uint64_t *capacity) { uint64_t ver = AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX12); + uint64_t mod_256k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256K_2D); + uint64_t mod_64k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_64K_2D); + uint64_t mod_4k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_4K_2D); + uint64_t mod_256b = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256B_2D); + uint64_t dcc = ver | AMD_FMT_MOD_SET(DCC, 1); + uint8_t max_comp_block[] = {1, 0}; + uint64_t max_comp_block_mod[ARRAY_SIZE(max_comp_block)] = {0}; + uint8_t i = 0, j = 0; + uint64_t gfx12_modifiers[] = {mod_256k, mod_64k, mod_4k, mod_256b, DRM_FORMAT_MOD_LINEAR}; + + for (i = 0; i < ARRAY_SIZE(max_comp_block); i++) + max_comp_block_mod[i] = AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_comp_block[i]); + + /* With DCC: Best choice should be kept first. Hence, add all 256k modifiers of different + * max compressed blocks first and then move on to the next smaller sized layouts. + * Do not add the linear modifier here, and hence the condition of size-1 for the loop + */ + for (j = 0; j < ARRAY_SIZE(gfx12_modifiers) - 1; j++) + for (i = 0; i < ARRAY_SIZE(max_comp_block); i++) + amdgpu_dm_plane_add_modifier(mods, size, capacity, + ver | dcc | max_comp_block_mod[i] | gfx12_modifiers[j]); + + /* Without DCC. Add all modifiers including linear at the end */ + for (i = 0; i < ARRAY_SIZE(gfx12_modifiers); i++) + amdgpu_dm_plane_add_modifier(mods, size, capacity, gfx12_modifiers[i]); - /* Without DCC: */ - amdgpu_dm_plane_add_modifier(mods, size, capacity, ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256K_2D)); - amdgpu_dm_plane_add_modifier(mods, size, capacity, ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_64K_2D)); - amdgpu_dm_plane_add_modifier(mods, size, capacity, ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_4K_2D)); - amdgpu_dm_plane_add_modifier(mods, size, capacity, ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256B_2D)); - amdgpu_dm_plane_add_modifier(mods, size, capacity, DRM_FORMAT_MOD_LINEAR); } static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) -- cgit v1.2.3 From c39385710cfd9ef22f6a2405d01ebcd6019e8767 Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Tue, 9 Jul 2024 11:28:22 +0530 Subject: drm/amdgpu: select compute ME engines dynamically GFX ME right now is one but this could change in future SOC's. Use no of ME for GFX as start point for ME for compute for GFX12. Signed-off-by: Sunil Khatri Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index 084b039eb765..f384be0d1800 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -4946,7 +4946,7 @@ static void gfx_v12_ip_dump(void *handle) for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { /* ME0 is for GFX so start from 1 for CP */ - soc24_grbm_select(adev, 1+i, j, k, 0); + soc24_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0); for (reg = 0; reg < reg_count; reg++) { adev->gfx.ip_dump_compute_queues[index + reg] = RREG32(SOC15_REG_ENTRY_OFFSET( -- cgit v1.2.3 From c04706914ddeb9098a509a5647c0b46c7e07cf11 Mon Sep 17 00:00:00 2001 From: YiPeng Chai Date: Tue, 2 Jul 2024 18:16:52 +0800 Subject: drm/amdgpu: flush all cached ras bad pages to eeprom Before uninstalling gpu driver, flush all cached ras bad pages to eeprom. v2: Put the same code into a function and reuse the function. Signed-off-by: YiPeng Chai Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 35 +++++++++++++++++++++++++++------ 1 file changed, 29 insertions(+), 6 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 1ba18655176d..64bee125f17a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -124,6 +124,8 @@ const char *get_ras_block_str(struct ras_common_if *ras_block) #define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100 //ms +#define MAX_FLUSH_RETIRE_DWORK_TIMES 100 + enum amdgpu_ras_retire_page_reservation { AMDGPU_RAS_RETIRE_PAGE_RESERVED, AMDGPU_RAS_RETIRE_PAGE_PENDING, @@ -2907,6 +2909,23 @@ static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log) ecc_log->prev_de_queried_count = 0; } +static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con, + uint32_t delayed_ms) +{ + int ret; + + mutex_lock(&con->umc_ecc_log.lock); + ret = radix_tree_tagged(&con->umc_ecc_log.de_page_tree, + UMC_ECC_NEW_DETECTED_TAG); + mutex_unlock(&con->umc_ecc_log.lock); + + if (ret) + schedule_delayed_work(&con->page_retirement_dwork, + msecs_to_jiffies(delayed_ms)); + + return ret ? true : false; +} + static void amdgpu_ras_do_page_retirement(struct work_struct *work) { struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, @@ -2928,12 +2947,8 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work) if (err_cnt && con->is_rma) amdgpu_ras_reset_gpu(adev); - mutex_lock(&con->umc_ecc_log.lock); - if (radix_tree_tagged(&con->umc_ecc_log.de_page_tree, - UMC_ECC_NEW_DETECTED_TAG)) - schedule_delayed_work(&con->page_retirement_dwork, - msecs_to_jiffies(AMDGPU_RAS_RETIRE_PAGE_INTERVAL)); - mutex_unlock(&con->umc_ecc_log.lock); + amdgpu_ras_schedule_retirement_dwork(con, + AMDGPU_RAS_RETIRE_PAGE_INTERVAL); } static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev, @@ -3237,11 +3252,19 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data *data = con->eh_data; + int max_flush_timeout = MAX_FLUSH_RETIRE_DWORK_TIMES; + bool ret; /* recovery_init failed to init it, fini is useless */ if (!data) return 0; + /* Save all cached bad pages to eeprom */ + do { + flush_delayed_work(&con->page_retirement_dwork); + ret = amdgpu_ras_schedule_retirement_dwork(con, 0); + } while (ret && max_flush_timeout--); + if (con->page_retirement_thread) kthread_stop(con->page_retirement_thread); -- cgit v1.2.3 From e23300dfffa178b19abc1b1b94ed7de74b0e0930 Mon Sep 17 00:00:00 2001 From: YiPeng Chai Date: Tue, 2 Jul 2024 17:53:02 +0800 Subject: drm/amdgpu: timely save bad pages to eeprom after gpu ras reset is completed The problem case is as follows: 1. GPU A triggers a gpu ras reset, and GPU A drives GPU B to also perform a gpu ras reset. 2. After gpu B ras reset started, gpu B queried a DE data. Since the DE data was queried in the ras reset thread instead of the page retirement thread, bad page retirement work would not be triggered. Then even if all gpu resets are completed, the bad pages will be cached in RAM until GPU B's bad page retirement work is triggered again and then saved to eeprom. This patch can save the bad pages to eeprom in time after gpu ras reset is completed. v2: 1. Add the above description to code comments. 2. Reuse existing function. Signed-off-by: YiPeng Chai Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 6 +++++- drivers/gpu/drm/amd/amdgpu/umc_v12_0.c | 18 ++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 64bee125f17a..d0307c55da50 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -2934,8 +2934,12 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work) struct ras_err_data err_data; unsigned long err_cnt; - if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) + /* If gpu reset is ongoing, delay retiring the bad pages */ + if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) { + amdgpu_ras_schedule_retirement_dwork(con, + AMDGPU_RAS_RETIRE_PAGE_INTERVAL * 3); return; + } amdgpu_ras_error_data_init(&err_data); diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index 0faa21d8a7b4..9dbb13adb661 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -29,6 +29,7 @@ #include "mp/mp_13_0_6_sh_mask.h" #define MAX_ECC_NUM_PER_RETIREMENT 32 +#define DELAYED_TIME_FOR_GPU_RESET 1000 //ms static inline uint64_t get_umc_v12_0_reg_offset(struct amdgpu_device *adev, uint32_t node_inst, @@ -568,6 +569,23 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev, con->umc_ecc_log.de_queried_count++; + /* The problem case is as follows: + * 1. GPU A triggers a gpu ras reset, and GPU A drives + * GPU B to also perform a gpu ras reset. + * 2. After gpu B ras reset started, gpu B queried a DE + * data. Since the DE data was queried in the ras reset + * thread instead of the page retirement thread, bad + * page retirement work would not be triggered. Then + * even if all gpu resets are completed, the bad pages + * will be cached in RAM until GPU B's bad page retirement + * work is triggered again and then saved to eeprom. + * Trigger delayed work to save the bad pages to eeprom in time + * after gpu ras reset is completed. + */ + if (amdgpu_ras_in_recovery(adev)) + schedule_delayed_work(&con->page_retirement_dwork, + msecs_to_jiffies(DELAYED_TIME_FOR_GPU_RESET)); + return 0; } -- cgit v1.2.3 From 8030f6533ef0a40a81ab6113aa6a0ab6a8e981c4 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 29 Apr 2024 18:11:11 -0400 Subject: drm/amdgpu: remove exp hw support check for gfx12 Enable it by default. Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index f927ccd7ec45..b241f61fe9c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -2161,8 +2161,6 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) break; case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 1): - if (!amdgpu_exp_hw_support) - return -EINVAL; amdgpu_device_ip_block_add(adev, &gfx_v12_0_ip_block); break; default: -- cgit v1.2.3 From 7ed58b68ac5c3ff378656eb082c982a266bb658e Mon Sep 17 00:00:00 2001 From: Leo Li Date: Thu, 11 Jul 2024 10:31:09 -0400 Subject: Revert "drm/amd/display: Reset freesync config before update new state" This change caused PSR SU panels to not read from their remote fb, preventing us from entering self-refresh. It is a regression. This reverts commit eb6dfbb7a9c67c7d9bcdb9f9b9131270e2144e3d. Signed-off-by: Leo Li Acked-by: Alex Deucher Signed-off-by: Alex Deucher (cherry picked from commit dc1000bf463d1d89f66d6b5369cf76603f32c4d3) --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 29af22ddccc9..7e7929f24ae4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -10463,7 +10463,6 @@ skip_modeset: } /* Update Freesync settings. */ - reset_freesync_config_for_crtc(dm_new_crtc_state); get_freesync_config_for_crtc(dm_new_crtc_state, dm_new_conn_state); -- cgit v1.2.3 From 478cb8badf1898f22e19e4d21f7d7b70cd44df9e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 8 Jul 2024 17:39:15 -0400 Subject: drm/amdgpu/mes11: update opcode strings Add new packet. Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 1376b6ff1b77..8ce51b9236c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -120,6 +120,9 @@ static const char *mes_v11_0_opcodes[] = { "MISC", "UPDATE_ROOT_PAGE_TABLE", "AMD_LOG", + "unused", + "unused", + "SET_HW_RSRC_1", }; static const char *mes_v11_0_misc_opcodes[] = { -- cgit v1.2.3 From 1cff1010bef6f325d895db0306b59dc7232ed9b7 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 8 Jul 2024 17:40:14 -0400 Subject: drm/amdgpu/mes12: add missing opcode string Fixes the indexing of the string array. Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mes_v12_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c index 106eef1ff5cc..c9f74231ad59 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c @@ -99,6 +99,7 @@ static const char *mes_v12_0_opcodes[] = { "SET_LOG_BUFFER", "CHANGE_GANG_PRORITY", "QUERY_SCHEDULER_STATUS", + "unused", "SET_DEBUG_VMID", "MISC", "UPDATE_ROOT_PAGE_TABLE", -- cgit v1.2.3