diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/umc_v6_1.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/umc_v6_1.c | 112 |
1 files changed, 103 insertions, 9 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index 14d346321a5f..418cf097c918 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -56,24 +56,43 @@ const uint32_t static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev) { - WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, + uint32_t rsmu_umc_addr, rsmu_umc_val; + + rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0, + mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU); + rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4); + + rsmu_umc_val = REG_SET_FIELD(rsmu_umc_val, + RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, RSMU_UMC_INDEX_MODE_EN, 1); + + WREG32_PCIE(rsmu_umc_addr * 4, rsmu_umc_val); } static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev) { - WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, + uint32_t rsmu_umc_addr, rsmu_umc_val; + + rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0, + mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU); + rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4); + + rsmu_umc_val = REG_SET_FIELD(rsmu_umc_val, + RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, RSMU_UMC_INDEX_MODE_EN, 0); + + WREG32_PCIE(rsmu_umc_addr * 4, rsmu_umc_val); } static uint32_t umc_v6_1_get_umc_index_mode_state(struct amdgpu_device *adev) { - uint32_t rsmu_umc_index; + uint32_t rsmu_umc_addr, rsmu_umc_val; - rsmu_umc_index = RREG32_SOC15(RSMU, 0, + rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0, mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU); + rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4); - return REG_GET_FIELD(rsmu_umc_index, + return REG_GET_FIELD(rsmu_umc_val, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, RSMU_UMC_INDEX_MODE_EN); } @@ -85,6 +104,81 @@ static inline uint32_t get_umc_6_reg_offset(struct amdgpu_device *adev, return adev->umc.channel_offs*ch_inst + UMC_6_INST_DIST*umc_inst; } +static void umc_v6_1_clear_error_count_per_channel(struct amdgpu_device *adev, + uint32_t umc_reg_offset) +{ + uint32_t ecc_err_cnt_addr; + uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr; + + if (adev->asic_type == CHIP_ARCTURUS) { + /* UMC 6_1_2 registers */ + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, + mmUMCCH0_0_EccErrCntSel_ARCT); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, + mmUMCCH0_0_EccErrCnt_ARCT); + } else { + /* UMC 6_1_1 registers */ + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, + mmUMCCH0_0_EccErrCntSel); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, + mmUMCCH0_0_EccErrCnt); + } + + /* select the lower chip */ + ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + + umc_reg_offset) * 4); + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, + UMCCH0_0_EccErrCntSel, + EccErrCntCsSel, 0); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, + ecc_err_cnt_sel); + + /* clear lower chip error count */ + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, + UMC_V6_1_CE_CNT_INIT); + + /* select the higher chip */ + ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + + umc_reg_offset) * 4); + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, + UMCCH0_0_EccErrCntSel, + EccErrCntCsSel, 1); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, + ecc_err_cnt_sel); + + /* clear higher chip error count */ + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, + UMC_V6_1_CE_CNT_INIT); +} + +static void umc_v6_1_clear_error_count(struct amdgpu_device *adev) +{ + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + uint32_t rsmu_umc_index_state = + umc_v6_1_get_umc_index_mode_state(adev); + + if (rsmu_umc_index_state) + umc_v6_1_disable_umc_index_mode(adev); + + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_6_reg_offset(adev, + umc_inst, + ch_inst); + + umc_v6_1_clear_error_count_per_channel(adev, + umc_reg_offset); + } + + if (rsmu_umc_index_state) + umc_v6_1_enable_umc_index_mode(adev); +} + static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev, uint32_t umc_reg_offset, unsigned long *error_count) @@ -117,23 +211,21 @@ static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev, ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, EccErrCntCsSel, 0); WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); + ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4); *error_count += (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) - UMC_V6_1_CE_CNT_INIT); - /* clear the lower chip err count */ - WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT); /* select the higher chip and check the err counter */ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, EccErrCntCsSel, 1); WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); + ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4); *error_count += (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) - UMC_V6_1_CE_CNT_INIT); - /* clear the higher chip err count */ - WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT); /* check for SRAM correctable error MCUMC_STATUS is a 64 bit register */ @@ -209,6 +301,8 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev, if (rsmu_umc_index_state) umc_v6_1_enable_umc_index_mode(adev); + + umc_v6_1_clear_error_count(adev); } static void umc_v6_1_query_error_address(struct amdgpu_device *adev, |