diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 471 |
1 files changed, 259 insertions, 212 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index c6363f5ad564..3c6eafb62ee6 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -86,7 +86,7 @@ static int vcn_v2_5_early_init(void *handle) adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS; for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING); + harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING); if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) adev->vcn.harvest_config |= 1 << i; } @@ -165,6 +165,8 @@ static int vcn_v2_5_sw_init(void *handle) return r; for (j = 0; j < adev->vcn.num_vcn_inst; j++) { + volatile struct amdgpu_fw_shared *fw_shared; + if (adev->vcn.harvest_config & (1 << j)) continue; adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET; @@ -175,15 +177,15 @@ static int vcn_v2_5_sw_init(void *handle) adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET; adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET; - adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(UVD, j, mmUVD_SCRATCH9); + adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9); adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET; - adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA0); + adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0); adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET; - adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA1); + adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1); adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET; - adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_CMD); + adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD); adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET; - adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP); + adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP); ring = &adev->vcn.inst[j].ring_dec; ring->use_doorbell = true; @@ -191,7 +193,8 @@ static int vcn_v2_5_sw_init(void *handle) ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + (amdgpu_sriov_vf(adev) ? 2*j : 8*j); sprintf(ring->name, "vcn_dec_%d", j); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, + 0, AMDGPU_RING_PRIO_DEFAULT); if (r) return r; @@ -203,10 +206,15 @@ static int vcn_v2_5_sw_init(void *handle) (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j)); sprintf(ring->name, "vcn_enc_%d.%d", j, i); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0); + r = amdgpu_ring_init(adev, ring, 512, + &adev->vcn.inst[j].irq, 0, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; } + + fw_shared = adev->vcn.inst[j].fw_shared_cpu_addr; + fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG); } if (amdgpu_sriov_vf(adev)) { @@ -230,8 +238,16 @@ static int vcn_v2_5_sw_init(void *handle) */ static int vcn_v2_5_sw_fini(void *handle) { - int r; + int i, r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + volatile struct amdgpu_fw_shared *fw_shared; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; + fw_shared->present_flag_0 = 0; + } if (amdgpu_sriov_vf(adev)) amdgpu_virt_free_mm_table(adev); @@ -308,25 +324,16 @@ done: static int vcn_v2_5_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring; - int i, j; + int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; - ring = &adev->vcn.inst[i].ring_dec; if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || (adev->vcn.cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(VCN, i, mmUVD_STATUS))) vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); - - ring->sched.ready = false; - - for (j = 0; j < adev->vcn.num_enc_rings; ++j) { - ring = &adev->vcn.inst[i].ring_enc[j]; - ring->sched.ready = false; - } } return 0; @@ -392,38 +399,47 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev) continue; /* cache window 0: fw */ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo)); - WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi)); - WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0); + WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0); offset = 0; } else { - WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, lower_32_bits(adev->vcn.inst[i].gpu_addr)); - WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, upper_32_bits(adev->vcn.inst[i].gpu_addr)); offset = size; - WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, + WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3); } - WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size); + WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size); /* cache window 1: stack */ - WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, + WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, lower_32_bits(adev->vcn.inst[i].gpu_addr + offset)); - WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, + WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, upper_32_bits(adev->vcn.inst[i].gpu_addr + offset)); - WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, 0); - WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); + WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0); + WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); /* cache window 2: context */ - WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, + WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); - WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, + WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); - WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, 0); - WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); + WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0); + WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); + + /* non-cache window */ + WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr)); + WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr)); + WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0); + WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0, + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared))); } } @@ -436,88 +452,91 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { if (!indirect) { WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); + VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); } else { WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); + VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); + VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); + VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); } offset = 0; } else { WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); offset = size; WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), + VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect); } if (!indirect) WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect); + VCN, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect); else WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); + VCN, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); /* cache window 1: stack */ if (!indirect) { WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), + VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), + VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); + VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); } else { WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); + VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); + VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); + VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); } WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); + VCN, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); /* cache window 2: context */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), + VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), + VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); + VCN, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); + VCN, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); /* non-cache window */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect); + VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect); + VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); + VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect); + VCN, 0, mmUVD_VCPU_NONCACHE_SIZE0), + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect); /* VCN global tiling registers */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); + VCN, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); } /** @@ -671,19 +690,19 @@ static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev, UVD_CGC_CTRL__VCPU_MODE_MASK | UVD_CGC_CTRL__MMSCH_MODE_MASK); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect); + VCN, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect); /* turn off clock gating */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect); + VCN, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect); /* turn on SUVD clock gating */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect); + VCN, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect); /* turn on sw mode in UVD_SUVD_CGC_CTRL */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect); + VCN, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect); } /** @@ -750,17 +769,18 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev) static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) { + volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr; struct amdgpu_ring *ring; uint32_t rb_bufsz, tmp; /* disable register anti-hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 1, + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1, ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); /* enable dynamic power gating mode */ - tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS); + tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS); tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK; - WREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS, tmp); + WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp); if (indirect) adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr; @@ -773,11 +793,11 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; tmp |= UVD_VCPU_CNTL__BLK_RST_MASK; WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect); + VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect); /* disable master interupt */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect); + VCN, 0, mmUVD_MASTINT_EN), 0, 0, indirect); /* setup mmUVD_LMI_CTRL */ tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | @@ -789,28 +809,28 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | 0x00100000L); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect); + VCN, 0, mmUVD_LMI_CTRL), tmp, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_MPC_CNTL), + VCN, 0, mmUVD_MPC_CNTL), 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_MPC_SET_MUXA0), + VCN, 0, mmUVD_MPC_SET_MUXA0), ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_MPC_SET_MUXB0), + VCN, 0, mmUVD_MPC_SET_MUXB0), ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_MPC_SET_MUX), + VCN, 0, mmUVD_MPC_SET_MUX), ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect); @@ -818,26 +838,26 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect); + VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect); + VCN, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect); /* enable LMI MC and UMC channels */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_LMI_CTRL2), 0, 0, indirect); + VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect); /* unblock VCPU register access */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect); + VCN, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect); tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect); + VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect); /* enable master interrupt */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_MASTINT_EN), + VCN, 0, mmUVD_MASTINT_EN), UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); if (indirect) @@ -853,30 +873,41 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); - WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_CNTL, tmp); + WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp); + + /* Stall DPG before WPTR/RPTR reset */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), + UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, + ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); + fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET; /* set the write pointer delay */ - WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0); + WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0); /* set the wb address */ - WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR_ADDR, + WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); /* programm the RB_BASE for ring buffer */ - WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, + WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, + WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr)); /* Initialize the ring buffer's read and write pointers */ - WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR, 0); + WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0); - WREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2, 0); + WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0); - ring->wptr = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR); - WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR, + ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR); + WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); + fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET; + /* Unstall DPG */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), + 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); + return 0; } @@ -898,12 +929,12 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) } /* disable register anti-hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0, + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0, ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); /* set uvd status busy */ - tmp = RREG32_SOC15(UVD, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY; - WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp); + tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY; + WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp); } if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) @@ -916,44 +947,44 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) if (adev->vcn.harvest_config & (1 << i)) continue; /* enable VCPU clock */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); /* disable master interrupt */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), 0, + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0, ~UVD_MASTINT_EN__VCPU_EN_MASK); /* setup mmUVD_LMI_CTRL */ - tmp = RREG32_SOC15(UVD, i, mmUVD_LMI_CTRL); + tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL); tmp &= ~0xff; - WREG32_SOC15(UVD, i, mmUVD_LMI_CTRL, tmp | 0x8| + WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8| UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | UVD_LMI_CTRL__MASK_MC_URGENT_MASK | UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); /* setup mmUVD_MPC_CNTL */ - tmp = RREG32_SOC15(UVD, i, mmUVD_MPC_CNTL); + tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL); tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp); /* setup UVD_MPC_SET_MUXA0 */ - WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXA0, + WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0, ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); /* setup UVD_MPC_SET_MUXB0 */ - WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXB0, + WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0, ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); /* setup mmUVD_MPC_SET_MUX */ - WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUX, + WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX, ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); @@ -962,30 +993,31 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) vcn_v2_5_mc_resume(adev); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; if (adev->vcn.harvest_config & (1 << i)) continue; /* VCN global tiling registers */ - WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG, + WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG, adev->gfx.config.gb_addr_config); - WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG, + WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG, adev->gfx.config.gb_addr_config); /* enable LMI MC and UMC channels */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0, + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); /* unblock VCPU register access */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), 0, + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0, ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0, + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0, ~UVD_VCPU_CNTL__BLK_RST_MASK); for (k = 0; k < 10; ++k) { uint32_t status; for (j = 0; j < 100; ++j) { - status = RREG32_SOC15(UVD, i, mmUVD_STATUS); + status = RREG32_SOC15(VCN, i, mmUVD_STATUS); if (status & 2) break; if (amdgpu_emu_mode == 1) @@ -998,11 +1030,11 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) break; DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n"); - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), UVD_VCPU_CNTL__BLK_RST_MASK, ~UVD_VCPU_CNTL__BLK_RST_MASK); mdelay(10); - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0, + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0, ~UVD_VCPU_CNTL__BLK_RST_MASK); mdelay(10); @@ -1015,15 +1047,15 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) } /* enable master interrupt */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), UVD_MASTINT_EN__VCPU_EN_MASK, ~UVD_MASTINT_EN__VCPU_EN_MASK); /* clear the busy bit of VCN_STATUS */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0, + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); - WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_VMID, 0); + WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0); ring = &adev->vcn.inst[i].ring_dec; /* force RBC into idle state */ @@ -1033,33 +1065,40 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); - WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp); + WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp); + fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET; /* programm the RB_BASE for ring buffer */ - WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, + WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, + WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr)); /* Initialize the ring buffer's read and write pointers */ - WREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR, 0); + WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0); - ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR); - WREG32_SOC15(UVD, i, mmUVD_RBC_RB_WPTR, + ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR); + WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); - ring = &adev->vcn.inst[i].ring_enc[0]; - WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr); - WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4); + fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET; + fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET; + ring = &adev->vcn.inst[i].ring_enc[0]; + WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr); + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4); + fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET; + + fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET; ring = &adev->vcn.inst[i].ring_enc[1]; - WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr); - WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4); + WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr); + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4); + fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET; } return 0; @@ -1079,33 +1118,33 @@ static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev, * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of * memory descriptor location */ - WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr)); - WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr)); + WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr)); + WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr)); /* 2, update vmid of descriptor */ - data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID); + data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID); data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; /* use domain0 for MM scheduler */ data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); - WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data); + WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data); /* 3, notify mmsch about the size of this descriptor */ - WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size); + WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size); /* 4, set resp to zero */ - WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0); + WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0); /* * 5, kick off the initialization and wait until * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */ - WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001); + WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001); - data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP); + data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP); loop = 10; while ((data & 0x10000002) != 0x10000002) { udelay(100); - data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP); + data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP); loop--; if (!loop) break; @@ -1128,14 +1167,12 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev) uint32_t table_size = 0; struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } }; struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } }; - struct mmsch_v1_0_cmd_direct_polling direct_poll = { { 0 } }; struct mmsch_v1_0_cmd_end end = { { 0 } }; uint32_t *init_table = adev->virt.mm_table.cpu_addr; struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table; direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE; direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE; - direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING; end.cmd_header.command_type = MMSCH_COMMAND__END; header->version = MMSCH_VERSION; @@ -1150,93 +1187,93 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev) table_size = 0; MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT( - SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), + SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); /* mc resume*/ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { MMSCH_V1_0_INSERT_DIRECT_WT( - SOC15_REG_OFFSET(UVD, i, + SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo); MMSCH_V1_0_INSERT_DIRECT_WT( - SOC15_REG_OFFSET(UVD, i, + SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi); offset = 0; MMSCH_V1_0_INSERT_DIRECT_WT( - SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), 0); + SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), 0); } else { MMSCH_V1_0_INSERT_DIRECT_WT( - SOC15_REG_OFFSET(UVD, i, + SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), lower_32_bits(adev->vcn.inst[i].gpu_addr)); MMSCH_V1_0_INSERT_DIRECT_WT( - SOC15_REG_OFFSET(UVD, i, + SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), upper_32_bits(adev->vcn.inst[i].gpu_addr)); offset = size; MMSCH_V1_0_INSERT_DIRECT_WT( - SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), + SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), AMDGPU_UVD_FIRMWARE_OFFSET >> 3); } MMSCH_V1_0_INSERT_DIRECT_WT( - SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), + SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE0), size); MMSCH_V1_0_INSERT_DIRECT_WT( - SOC15_REG_OFFSET(UVD, i, + SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(adev->vcn.inst[i].gpu_addr + offset)); MMSCH_V1_0_INSERT_DIRECT_WT( - SOC15_REG_OFFSET(UVD, i, + SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(adev->vcn.inst[i].gpu_addr + offset)); MMSCH_V1_0_INSERT_DIRECT_WT( - SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), + SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET1), 0); MMSCH_V1_0_INSERT_DIRECT_WT( - SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), + SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE); MMSCH_V1_0_INSERT_DIRECT_WT( - SOC15_REG_OFFSET(UVD, i, + SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); MMSCH_V1_0_INSERT_DIRECT_WT( - SOC15_REG_OFFSET(UVD, i, + SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); MMSCH_V1_0_INSERT_DIRECT_WT( - SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), + SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET2), 0); MMSCH_V1_0_INSERT_DIRECT_WT( - SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2), + SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE); ring = &adev->vcn.inst[i].ring_enc[0]; ring->wptr = 0; MMSCH_V1_0_INSERT_DIRECT_WT( - SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), + SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_LO), lower_32_bits(ring->gpu_addr)); MMSCH_V1_0_INSERT_DIRECT_WT( - SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), + SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr)); MMSCH_V1_0_INSERT_DIRECT_WT( - SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), + SOC15_REG_OFFSET(VCN, i, mmUVD_RB_SIZE), ring->ring_size / 4); ring = &adev->vcn.inst[i].ring_dec; ring->wptr = 0; MMSCH_V1_0_INSERT_DIRECT_WT( - SOC15_REG_OFFSET(UVD, i, + SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW), lower_32_bits(ring->gpu_addr)); MMSCH_V1_0_INSERT_DIRECT_WT( - SOC15_REG_OFFSET(UVD, i, + SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH), upper_32_bits(ring->gpu_addr)); @@ -1248,7 +1285,7 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); MMSCH_V1_0_INSERT_DIRECT_WT( - SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp); + SOC15_REG_OFFSET(VCN, i, mmUVD_RBC_RB_CNTL), tmp); /* add end packet */ memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end)); @@ -1269,24 +1306,24 @@ static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) uint32_t tmp; /* Wait for power status to be 1 */ - SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1, + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); /* wait for read ptr to be equal to write ptr */ - tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR); - SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); + tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR); + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); - tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2); - SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code); + tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2); + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code); - tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; - SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); + tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); - SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1, + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); /* disable dynamic power gating mode */ - WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 0, + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); return 0; @@ -1330,17 +1367,17 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev) return r; /* block VCPU register access */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), UVD_RB_ARB_CTRL__VCPU_DIS_MASK, ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); /* reset VCPU */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), UVD_VCPU_CNTL__BLK_RST_MASK, ~UVD_VCPU_CNTL__BLK_RST_MASK); /* disable VCPU clock */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0, + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0, ~(UVD_VCPU_CNTL__CLK_EN_MASK)); /* clear status */ @@ -1349,7 +1386,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev) vcn_v2_5_enable_clock_gating(adev); /* enable register anti-hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); } @@ -1365,55 +1402,69 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev, { struct amdgpu_ring *ring; uint32_t reg_data = 0; - int ret_code; + int ret_code = 0; /* pause/unpause if state is changed */ if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) { DRM_DEBUG("dpg pause state changed %d -> %d", adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based); - reg_data = RREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE) & + reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) & (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { - ret_code = 0; - SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 0x1, + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); if (!ret_code) { + volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr; + /* pause DPG */ reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; - WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data); + WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data); /* wait for ACK */ - SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_DPG_PAUSE, + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE, UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code); + /* Stall DPG before WPTR/RPTR reset */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), + UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, + ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); + /* Restore */ + fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET; ring = &adev->vcn.inst[inst_idx].ring_enc[0]; - WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr); - WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4); - WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); - + ring->wptr = 0; + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET; + + fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET; ring = &adev->vcn.inst[inst_idx].ring_enc[1]; - WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr); - WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4); - WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); - - WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR, - RREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF); - - SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, + ring->wptr = 0; + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET; + + /* Unstall DPG */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), + 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); + + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); } } else { - /* unpause dpg, no need to wait */ reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; - WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data); + WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data); + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); } adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based; } @@ -1432,7 +1483,7 @@ static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR); + return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR); } /** @@ -1449,7 +1500,7 @@ static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring) if (ring->use_doorbell) return adev->wb.wb[ring->wptr_offs]; else - return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR); + return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR); } /** @@ -1463,15 +1514,11 @@ static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) - WREG32_SOC15(UVD, ring->me, mmUVD_SCRATCH2, - lower_32_bits(ring->wptr) | 0x80000000); - if (ring->use_doorbell) { adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); } else { - WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); } } @@ -1517,9 +1564,9 @@ static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) - return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR); + return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR); else - return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2); + return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2); } /** @@ -1537,12 +1584,12 @@ static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring) if (ring->use_doorbell) return adev->wb.wb[ring->wptr_offs]; else - return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR); + return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR); } else { if (ring->use_doorbell) return adev->wb.wb[ring->wptr_offs]; else - return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2); + return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2); } } @@ -1562,14 +1609,14 @@ static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring) adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); } else { - WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); } } else { if (ring->use_doorbell) { adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); } else { - WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); } } } |