diff options
author | Kevin Wang <kevin1.wang@amd.com> | 2019-04-19 10:31:18 +0800 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2019-06-21 18:59:29 -0500 |
commit | db439ca21b81f0c1f1b0f1f1b6f0efc8af3a0cf4 (patch) | |
tree | 849bfe31a0aea3760563e2e65c2e059df0959b58 /drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | |
parent | 3366561767209326d3a6d4d2c727788beca7fcea (diff) | |
download | lwn-db439ca21b81f0c1f1b0f1f1b6f0efc8af3a0cf4.tar.gz lwn-db439ca21b81f0c1f1b0f1f1b6f0efc8af3a0cf4.zip |
drm/amd/powerplay: add function force_clk_levels for navi10
add sysfs interface of force_clk_levels sysfs for navi10.
Signed-off-by: Kevin Wang <kevin1.wang@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 6f33b9678457..d78d9cbd0ee9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -901,7 +901,7 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, PP_SCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); @@ -948,7 +948,7 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, PP_MCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); @@ -988,7 +988,7 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, PP_SOCCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask); @@ -1028,7 +1028,7 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, PP_FCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask); @@ -1068,7 +1068,7 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, PP_DCEFCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask); @@ -1108,7 +1108,7 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, PP_PCIE, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); @@ -1152,7 +1152,7 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, } if (is_support_sw_smu(adev)) { - value = smu_set_od_percentage(&(adev->smu), OD_SCLK, (uint32_t)value); + value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value); } else { if (adev->powerplay.pp_funcs->set_sclk_od) amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); @@ -1203,7 +1203,7 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, } if (is_support_sw_smu(adev)) { - value = smu_set_od_percentage(&(adev->smu), OD_MCLK, (uint32_t)value); + value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value); } else { if (adev->powerplay.pp_funcs->set_mclk_od) amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); |