diff options
Diffstat (limited to 'drivers/gpu/drm/amd/pm')
36 files changed, 1763 insertions, 725 deletions
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 7a22aef6e59c..81e9b443ca0a 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -716,6 +716,33 @@ int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev) ret = smu_send_rma_reason(smu); mutex_unlock(&adev->pm.mutex); + if (adev->cper.enabled) + if (amdgpu_cper_generate_bp_threshold_record(adev)) + dev_warn(adev->dev, "fail to generate bad page threshold cper records\n"); + + return ret; +} + +/** + * amdgpu_dpm_reset_sdma_is_supported - Check if SDMA reset is supported + * @adev: amdgpu_device pointer + * + * This function checks if the SMU supports resetting the SDMA engine. + * It returns false if the hardware does not support software SMU or + * if the feature is not supported. + */ +bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + bool ret; + + if (!is_support_sw_smu(adev)) + return false; + + mutex_lock(&adev->pm.mutex); + ret = smu_reset_sdma_is_supported(smu); + mutex_unlock(&adev->pm.mutex); + return ret; } diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index e8ae7681bf0a..922def51685b 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -98,6 +98,85 @@ const char * const amdgpu_pp_profile_name[] = { }; /** + * amdgpu_pm_dev_state_check - Check if device can be accessed. + * @adev: Target device. + * @runpm: Check runpm status for suspend state checks. + * + * Checks the state of the @adev for access. Return 0 if the device is + * accessible or a negative error code otherwise. + */ +static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev, bool runpm) +{ + bool runpm_check = runpm ? adev->in_runpm : false; + + if (amdgpu_in_reset(adev)) + return -EPERM; + if (adev->in_suspend && !runpm_check) + return -EPERM; + + return 0; +} + +/** + * amdgpu_pm_get_access - Check if device can be accessed, resume if needed. + * @adev: Target device. + * + * Checks the state of the @adev for access. Use runtime pm API to resume if + * needed. Return 0 if the device is accessible or a negative error code + * otherwise. + */ +static int amdgpu_pm_get_access(struct amdgpu_device *adev) +{ + int ret; + + ret = amdgpu_pm_dev_state_check(adev, true); + if (ret) + return ret; + + return pm_runtime_resume_and_get(adev->dev); +} + +/** + * amdgpu_pm_get_access_if_active - Check if device is active for access. + * @adev: Target device. + * + * Checks the state of the @adev for access. Use runtime pm API to determine + * if device is active. Allow access only if device is active.Return 0 if the + * device is accessible or a negative error code otherwise. + */ +static int amdgpu_pm_get_access_if_active(struct amdgpu_device *adev) +{ + int ret; + + /* Ignore runpm status. If device is in suspended state, deny access */ + ret = amdgpu_pm_dev_state_check(adev, false); + if (ret) + return ret; + + /* + * Allow only if device is active. If runpm is disabled also, as in + * kernels without CONFIG_PM, allow access. + */ + ret = pm_runtime_get_if_active(adev->dev); + if (!ret) + return -EPERM; + + return 0; +} + +/** + * amdgpu_pm_put_access - Put to auto suspend mode after a device access. + * @adev: Target device. + * + * Should be paired with amdgpu_pm_get_access* calls + */ +static inline void amdgpu_pm_put_access(struct amdgpu_device *adev) +{ + pm_runtime_mark_last_busy(adev->dev); + pm_runtime_put_autosuspend(adev->dev); +} + +/** * DOC: power_dpm_state * * The power_dpm_state file is a legacy interface and is only provided for @@ -140,18 +219,13 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev, enum amd_pm_state_type pm; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_if_active(ddev->dev); - if (ret <= 0) - return ret ?: -EPERM; + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) + return ret; amdgpu_dpm_get_current_power_state(adev, &pm); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return sysfs_emit(buf, "%s\n", (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : @@ -168,11 +242,6 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev, enum amd_pm_state_type state; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - if (strncmp("battery", buf, strlen("battery")) == 0) state = POWER_STATE_TYPE_BATTERY; else if (strncmp("balanced", buf, strlen("balanced")) == 0) @@ -182,14 +251,13 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev, else return -EINVAL; - ret = pm_runtime_resume_and_get(ddev->dev); + ret = amdgpu_pm_get_access(adev); if (ret < 0) return ret; amdgpu_dpm_set_power_state(adev, state); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return count; } @@ -263,18 +331,13 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev, enum amd_dpm_forced_level level = 0xff; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_if_active(ddev->dev); - if (ret <= 0) - return ret ?: -EPERM; + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) + return ret; level = amdgpu_dpm_get_performance_level(adev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return sysfs_emit(buf, "%s\n", (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : @@ -299,11 +362,6 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, enum amd_dpm_forced_level level; int ret = 0; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - if (strncmp("low", buf, strlen("low")) == 0) { level = AMD_DPM_FORCED_LEVEL_LOW; } else if (strncmp("high", buf, strlen("high")) == 0) { @@ -328,14 +386,13 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, return -EINVAL; } - ret = pm_runtime_resume_and_get(ddev->dev); + ret = amdgpu_pm_get_access(adev); if (ret < 0) return ret; mutex_lock(&adev->pm.stable_pstate_ctx_lock); if (amdgpu_dpm_force_performance_level(adev, level)) { - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); mutex_unlock(&adev->pm.stable_pstate_ctx_lock); return -EINVAL; } @@ -343,8 +400,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, adev->pm.stable_pstate_ctx = NULL; mutex_unlock(&adev->pm.stable_pstate_ctx_lock); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return count; } @@ -359,19 +415,14 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, uint32_t i; int buf_len, ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_if_active(ddev->dev); - if (ret <= 0) - return ret ?: -EPERM; + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) + return ret; if (amdgpu_dpm_get_pp_num_states(adev, &data)) memset(&data, 0, sizeof(data)); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); buf_len = sysfs_emit(buf, "states: %d\n", data.nums); for (i = 0; i < data.nums; i++) @@ -394,20 +445,15 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, enum amd_pm_state_type pm = 0; int i = 0, ret = 0; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_if_active(ddev->dev); - if (ret <= 0) - return ret ?: -EPERM; + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) + return ret; amdgpu_dpm_get_current_power_state(adev, &pm); ret = amdgpu_dpm_get_pp_num_states(adev, &data); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); if (ret) return ret; @@ -430,11 +476,6 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - if (adev->pm.pp_force_state_enabled) return amdgpu_get_pp_cur_state(dev, attr, buf); else @@ -453,11 +494,6 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, unsigned long idx; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - adev->pm.pp_force_state_enabled = false; if (strlen(buf) == 1) @@ -469,7 +505,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, idx = array_index_nospec(idx, ARRAY_SIZE(data.states)); - ret = pm_runtime_resume_and_get(ddev->dev); + ret = amdgpu_pm_get_access(adev); if (ret < 0) return ret; @@ -490,14 +526,13 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, adev->pm.pp_force_state_enabled = true; } - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return count; err_out: - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); + return ret; } @@ -521,18 +556,13 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, char *table = NULL; int size, ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_if_active(ddev->dev); - if (ret <= 0) - return ret ?: -EPERM; + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) + return ret; size = amdgpu_dpm_get_pp_table(adev, &table); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); if (size <= 0) return size; @@ -554,19 +584,13 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, struct amdgpu_device *adev = drm_to_adev(ddev); int ret = 0; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_resume_and_get(ddev->dev); + ret = amdgpu_pm_get_access(adev); if (ret < 0) return ret; ret = amdgpu_dpm_set_pp_table(adev, buf, count); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); if (ret) return ret; @@ -735,11 +759,6 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, const char delimiter[3] = {' ', '\n', '\0'}; uint32_t type; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - if (count > 127 || count == 0) return -EINVAL; @@ -785,7 +804,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, tmp_str++; } - ret = pm_runtime_resume_and_get(ddev->dev); + ret = amdgpu_pm_get_access(adev); if (ret < 0) return ret; @@ -806,14 +825,13 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, goto err_out; } - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return count; err_out: - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); + return -EINVAL; } @@ -835,14 +853,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, }; uint clk_index; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_if_active(ddev->dev); - if (ret <= 0) - return ret ?: -EPERM; + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) + return ret; for (clk_index = 0 ; clk_index < 6 ; clk_index++) { ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size); @@ -861,7 +874,7 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, if (size == 0) size = sysfs_emit(buf, "\n"); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return size; } @@ -892,23 +905,17 @@ static ssize_t amdgpu_set_pp_features(struct device *dev, uint64_t featuremask; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - ret = kstrtou64(buf, 0, &featuremask); if (ret) return -EINVAL; - ret = pm_runtime_resume_and_get(ddev->dev); + ret = amdgpu_pm_get_access(adev); if (ret < 0) return ret; ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); if (ret) return -EINVAL; @@ -925,20 +932,15 @@ static ssize_t amdgpu_get_pp_features(struct device *dev, ssize_t size; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_if_active(ddev->dev); - if (ret <= 0) - return ret ?: -EPERM; + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) + return ret; size = amdgpu_dpm_get_ppfeature_status(adev, buf); if (size <= 0) size = sysfs_emit(buf, "\n"); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return size; } @@ -991,14 +993,9 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev, int size = 0; int ret = 0; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_if_active(ddev->dev); - if (ret <= 0) - return ret ?: -EPERM; + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) + return ret; ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size); if (ret == -ENOENT) @@ -1007,7 +1004,7 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev, if (size == 0) size = sysfs_emit(buf, "\n"); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return size; } @@ -1057,23 +1054,17 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev, int ret; uint32_t mask = 0; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - ret = amdgpu_read_mask(buf, count, &mask); if (ret) return ret; - ret = pm_runtime_resume_and_get(ddev->dev); + ret = amdgpu_pm_get_access(adev); if (ret < 0) return ret; ret = amdgpu_dpm_force_clock_level(adev, type, mask); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); if (ret) return -EINVAL; @@ -1240,18 +1231,13 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, uint32_t value = 0; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_if_active(ddev->dev); - if (ret <= 0) - return ret ?: -EPERM; + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) + return ret; value = amdgpu_dpm_get_sclk_od(adev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return sysfs_emit(buf, "%d\n", value); } @@ -1266,24 +1252,18 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, int ret; long int value; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - ret = kstrtol(buf, 0, &value); if (ret) return -EINVAL; - ret = pm_runtime_resume_and_get(ddev->dev); + ret = amdgpu_pm_get_access(adev); if (ret < 0) return ret; amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return count; } @@ -1297,18 +1277,13 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, uint32_t value = 0; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_if_active(ddev->dev); - if (ret <= 0) - return ret ?: -EPERM; + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) + return ret; value = amdgpu_dpm_get_mclk_od(adev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return sysfs_emit(buf, "%d\n", value); } @@ -1323,24 +1298,18 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, int ret; long int value; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - ret = kstrtol(buf, 0, &value); if (ret) return -EINVAL; - ret = pm_runtime_resume_and_get(ddev->dev); + ret = amdgpu_pm_get_access(adev); if (ret < 0) return ret; amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return count; } @@ -1378,20 +1347,15 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, ssize_t size; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_if_active(ddev->dev); - if (ret <= 0) - return ret ?: -EPERM; + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) + return ret; size = amdgpu_dpm_get_power_profile_mode(adev, buf); if (size <= 0) size = sysfs_emit(buf, "\n"); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return size; } @@ -1414,11 +1378,6 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, long int profile_mode = 0; const char delimiter[3] = {' ', '\n', '\0'}; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - tmp[0] = *(buf); tmp[1] = '\0'; ret = kstrtol(tmp, 0, &profile_mode); @@ -1445,14 +1404,13 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, } parameter[parameter_size] = profile_mode; - ret = pm_runtime_resume_and_get(ddev->dev); + ret = amdgpu_pm_get_access(adev); if (ret < 0) return ret; ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); if (!ret) return count; @@ -1466,19 +1424,14 @@ static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev, { int r, size = sizeof(uint32_t); - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - r = pm_runtime_get_if_active(adev->dev); - if (r <= 0) - return r ?: -EPERM; + r = amdgpu_pm_get_access_if_active(adev); + if (r) + return r; /* get the sensor value */ r = amdgpu_dpm_read_sensor(adev, sensor, query, &size); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); return r; } @@ -1576,24 +1529,19 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev, uint64_t count0 = 0, count1 = 0; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - if (adev->flags & AMD_IS_APU) return -ENODATA; if (!adev->asic_funcs->get_pcie_usage) return -ENODATA; - ret = pm_runtime_get_if_active(ddev->dev); - if (ret <= 0) - return ret ?: -EPERM; + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) + return ret; amdgpu_asic_get_pcie_usage(adev, &count0, &count1); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return sysfs_emit(buf, "%llu %llu %i\n", count0, count1, pcie_get_mps(adev->pdev)); @@ -1616,11 +1564,6 @@ static ssize_t amdgpu_get_unique_id(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - if (adev->unique_id) return sysfs_emit(buf, "%016llx\n", adev->unique_id); @@ -1715,9 +1658,9 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - ret = pm_runtime_get_if_active(ddev->dev); - if (ret <= 0) - return ret ?: -EPERM; + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) + return ret; ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit); if (!ret) @@ -1725,7 +1668,7 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev, else size = sysfs_emit(buf, "failed to get thermal limit\n"); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return size; } @@ -1749,20 +1692,18 @@ static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev, return -EINVAL; } - ret = pm_runtime_resume_and_get(ddev->dev); + ret = amdgpu_pm_get_access(adev); if (ret < 0) return ret; ret = amdgpu_dpm_set_apu_thermal_limit(adev, value); if (ret) { - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); dev_err(dev, "failed to update thermal limit\n"); return ret; } - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return count; } @@ -1786,18 +1727,13 @@ static ssize_t amdgpu_get_pm_metrics(struct device *dev, ssize_t size = 0; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_if_active(ddev->dev); - if (ret <= 0) - return ret ?: -EPERM; + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) + return ret; size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return size; } @@ -1824,14 +1760,9 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev, ssize_t size = 0; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_if_active(ddev->dev); - if (ret <= 0) - return ret ?: -EPERM; + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) + return ret; size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics); if (size <= 0) @@ -1843,7 +1774,7 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev, memcpy(buf, gpu_metrics, size); out: - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return size; } @@ -1939,19 +1870,14 @@ static ssize_t amdgpu_set_smartshift_bias(struct device *dev, int r = 0; int bias = 0; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - r = pm_runtime_resume_and_get(ddev->dev); - if (r < 0) - return r; - r = kstrtoint(buf, 10, &bias); if (r) goto out; + r = amdgpu_pm_get_access(adev); + if (r < 0) + return r; + if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS) bias = AMDGPU_SMARTSHIFT_MAX_BIAS; else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS) @@ -1963,8 +1889,8 @@ static ssize_t amdgpu_set_smartshift_bias(struct device *dev, /* TODO: update bias level with SMU message */ out: - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); + return r; } @@ -2006,10 +1932,11 @@ static int pp_od_clk_voltage_attr_update(struct amdgpu_device *adev, struct amdg return 0; } - /* Enable pp_od_clk_voltage node for gc 9.4.3 SRIOV/BM support */ + /* Enable pp_od_clk_voltage node for gc 9.4.3, 9.4.4, 9.5.0 SRIOV/BM support */ if (gc_ver == IP_VERSION(9, 4, 3) || - gc_ver == IP_VERSION(9, 4, 4)) { - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + gc_ver == IP_VERSION(9, 4, 4) || + gc_ver == IP_VERSION(9, 5, 0)) { + if (amdgpu_sriov_multi_vf_mode(adev)) *states = ATTR_STATE_UNSUPPORTED; return 0; } @@ -2044,7 +1971,7 @@ static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_ * setting should not be allowed from VF if not in one VF mode. */ if (gc_ver >= IP_VERSION(10, 0, 0) || - (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))) { + (amdgpu_sriov_multi_vf_mode(adev))) { dev_attr->attr.mode &= ~S_IWUGO; dev_attr->store = NULL; } @@ -2087,7 +2014,8 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd gc_ver == IP_VERSION(11, 0, 2) || gc_ver == IP_VERSION(11, 0, 3) || gc_ver == IP_VERSION(9, 4, 3) || - gc_ver == IP_VERSION(9, 4, 4))) + gc_ver == IP_VERSION(9, 4, 4) || + gc_ver == IP_VERSION(9, 5, 0))) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) { if (!((gc_ver == IP_VERSION(10, 3, 1) || @@ -2109,7 +2037,8 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd gc_ver == IP_VERSION(11, 0, 2) || gc_ver == IP_VERSION(11, 0, 3) || gc_ver == IP_VERSION(9, 4, 3) || - gc_ver == IP_VERSION(9, 4, 4))) + gc_ver == IP_VERSION(9, 4, 4) || + gc_ver == IP_VERSION(9, 5, 0))) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) { if (!((gc_ver == IP_VERSION(10, 3, 1) || @@ -2120,7 +2049,8 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) { if (gc_ver == IP_VERSION(9, 4, 2) || gc_ver == IP_VERSION(9, 4, 3) || - gc_ver == IP_VERSION(9, 4, 4)) + gc_ver == IP_VERSION(9, 4, 4) || + gc_ver == IP_VERSION(9, 5, 0)) *states = ATTR_STATE_UNSUPPORTED; } @@ -2218,11 +2148,6 @@ static ssize_t amdgpu_get_pm_policy_attr(struct device *dev, policy_attr = container_of(attr, struct amdgpu_pm_policy_attr, dev_attr); - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - return amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, buf); } @@ -2239,11 +2164,6 @@ static ssize_t amdgpu_set_pm_policy_attr(struct device *dev, char *tmp, *param; long val; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - count = min(count, sizeof(tmp_buf)); memcpy(tmp_buf, buf, count); tmp_buf[count - 1] = '\0'; @@ -2269,14 +2189,13 @@ static ssize_t amdgpu_set_pm_policy_attr(struct device *dev, policy_attr = container_of(attr, struct amdgpu_pm_policy_attr, dev_attr); - ret = pm_runtime_resume_and_get(ddev->dev); + ret = amdgpu_pm_get_access(adev); if (ret < 0) return ret; ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); if (ret) return ret; @@ -2395,13 +2314,22 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ gc_ver == IP_VERSION(9, 0, 1)) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(vcn_busy_percent)) { - if (!(gc_ver == IP_VERSION(10, 3, 1) || - gc_ver == IP_VERSION(10, 3, 3) || - gc_ver == IP_VERSION(10, 3, 6) || - gc_ver == IP_VERSION(10, 3, 7) || - gc_ver == IP_VERSION(11, 0, 1) || - gc_ver == IP_VERSION(11, 0, 4) || - gc_ver == IP_VERSION(11, 5, 0))) + if (!(gc_ver == IP_VERSION(9, 3, 0) || + gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 3) || + gc_ver == IP_VERSION(10, 3, 6) || + gc_ver == IP_VERSION(10, 3, 7) || + gc_ver == IP_VERSION(11, 0, 0) || + gc_ver == IP_VERSION(11, 0, 1) || + gc_ver == IP_VERSION(11, 0, 2) || + gc_ver == IP_VERSION(11, 0, 3) || + gc_ver == IP_VERSION(11, 0, 4) || + gc_ver == IP_VERSION(11, 5, 0) || + gc_ver == IP_VERSION(11, 5, 1) || + gc_ver == IP_VERSION(11, 5, 2) || + gc_ver == IP_VERSION(11, 5, 3) || + gc_ver == IP_VERSION(12, 0, 0) || + gc_ver == IP_VERSION(12, 0, 1))) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pcie_bw)) { /* PCIe Perf counters won't work on APU nodes */ @@ -2416,11 +2344,14 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ case IP_VERSION(9, 4, 2): case IP_VERSION(9, 4, 3): case IP_VERSION(9, 4, 4): + case IP_VERSION(9, 5, 0): case IP_VERSION(10, 3, 0): case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): + case IP_VERSION(12, 0, 0): + case IP_VERSION(12, 0, 1): *states = ATTR_STATE_SUPPORTED; break; default: @@ -2699,18 +2630,13 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, u32 pwm_mode = 0; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_if_active(adev->dev); - if (ret <= 0) - return ret ?: -EPERM; + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) + return ret; ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); if (ret) return -EINVAL; @@ -2728,11 +2654,6 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, u32 pwm_mode; int value; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - err = kstrtoint(buf, 10, &value); if (err) return err; @@ -2746,14 +2667,13 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, else return -EINVAL; - ret = pm_runtime_resume_and_get(adev->dev); + ret = amdgpu_pm_get_access(adev); if (ret < 0) return ret; ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); if (ret) return -EINVAL; @@ -2784,16 +2704,11 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, u32 value; u32 pwm_mode; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - err = kstrtou32(buf, 10, &value); if (err) return err; - err = pm_runtime_resume_and_get(adev->dev); + err = amdgpu_pm_get_access(adev); if (err < 0) return err; @@ -2810,8 +2725,7 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, err = amdgpu_dpm_set_fan_speed_pwm(adev, value); out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); if (err) return err; @@ -2827,18 +2741,13 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, int err; u32 speed = 0; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - err = pm_runtime_get_if_active(adev->dev); - if (err <= 0) - return err ?: -EPERM; + err = amdgpu_pm_get_access_if_active(adev); + if (err) + return err; err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); if (err) return err; @@ -2854,18 +2763,13 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, int err; u32 speed = 0; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - err = pm_runtime_get_if_active(adev->dev); - if (err <= 0) - return err ?: -EPERM; + err = amdgpu_pm_get_access_if_active(adev); + if (err) + return err; err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); if (err) return err; @@ -2915,18 +2819,13 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, int err; u32 rpm = 0; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - err = pm_runtime_get_if_active(adev->dev); - if (err <= 0) - return err ?: -EPERM; + err = amdgpu_pm_get_access_if_active(adev); + if (err) + return err; err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); if (err) return err; @@ -2943,16 +2842,11 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, u32 value; u32 pwm_mode; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - err = kstrtou32(buf, 10, &value); if (err) return err; - err = pm_runtime_resume_and_get(adev->dev); + err = amdgpu_pm_get_access(adev); if (err < 0) return err; @@ -2968,8 +2862,7 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, err = amdgpu_dpm_set_fan_speed_rpm(adev, value); out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); if (err) return err; @@ -2985,18 +2878,13 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, u32 pwm_mode = 0; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_if_active(adev->dev); - if (ret <= 0) - return ret ?: -EPERM; + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) + return ret; ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); if (ret) return -EINVAL; @@ -3014,11 +2902,6 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, int value; u32 pwm_mode; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - err = kstrtoint(buf, 10, &value); if (err) return err; @@ -3030,14 +2913,13 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, else return -EINVAL; - err = pm_runtime_resume_and_get(adev->dev); + err = amdgpu_pm_get_access(adev); if (err < 0) return err; err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); if (err) return -EINVAL; @@ -3152,14 +3034,9 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev, ssize_t size; int r; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - r = pm_runtime_get_if_active(adev->dev); - if (r <= 0) - return r ?: -EPERM; + r = amdgpu_pm_get_access_if_active(adev); + if (r) + return r; r = amdgpu_dpm_get_power_limit(adev, &limit, pp_limit_level, power_type); @@ -3169,7 +3046,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev, else size = sysfs_emit(buf, "\n"); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); return size; } @@ -3230,11 +3107,6 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, int err; u32 value; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - if (amdgpu_sriov_vf(adev)) return -EINVAL; @@ -3245,14 +3117,13 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, value = value / 1000000; /* convert to Watt */ value |= limit_type << 24; - err = pm_runtime_resume_and_get(adev->dev); + err = amdgpu_pm_get_access(adev); if (err < 0) return err; err = amdgpu_dpm_set_power_limit(adev, value); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); if (err) return err; @@ -3530,7 +3401,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, /* Skip crit temp on APU */ if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) || - (gc_ver == IP_VERSION(9, 4, 3) || gc_ver == IP_VERSION(9, 4, 4))) && + (gc_ver == IP_VERSION(9, 4, 3) || gc_ver == IP_VERSION(9, 4, 4) || + gc_ver == IP_VERSION(9, 5, 0))) && (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) return 0; @@ -3605,7 +3477,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */ adev->family == AMDGPU_FAMILY_KV || /* not implemented yet */ (gc_ver == IP_VERSION(9, 4, 3) || - gc_ver == IP_VERSION(9, 4, 4))) && + gc_ver == IP_VERSION(9, 4, 4) || + gc_ver == IP_VERSION(9, 5, 0))) && (attr == &sensor_dev_attr_in0_input.dev_attr.attr || attr == &sensor_dev_attr_in0_label.dev_attr.attr)) return 0; @@ -3613,7 +3486,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, /* only APUs other than gc 9,4,3 have vddnb */ if ((!(adev->flags & AMD_IS_APU) || (gc_ver == IP_VERSION(9, 4, 3) || - gc_ver == IP_VERSION(9, 4, 4))) && + gc_ver == IP_VERSION(9, 4, 4) || + gc_ver == IP_VERSION(9, 5, 0))) && (attr == &sensor_dev_attr_in1_input.dev_attr.attr || attr == &sensor_dev_attr_in1_label.dev_attr.attr)) return 0; @@ -3636,7 +3510,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, /* hotspot temperature for gc 9,4,3*/ if (gc_ver == IP_VERSION(9, 4, 3) || - gc_ver == IP_VERSION(9, 4, 4)) { + gc_ver == IP_VERSION(9, 4, 4) || + gc_ver == IP_VERSION(9, 5, 0)) { if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr || attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr || attr == &sensor_dev_attr_temp1_label.dev_attr.attr) @@ -3686,20 +3561,15 @@ static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev, int size = 0; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_if_active(adev->dev); - if (ret <= 0) - return ret ?: -EPERM; + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) + return ret; size = amdgpu_dpm_print_clock_levels(adev, od_type, buf); if (size == 0) size = sysfs_emit(buf, "\n"); - pm_runtime_put_autosuspend(adev->dev); + amdgpu_pm_put_access(adev); return size; } @@ -3767,11 +3637,6 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev, long parameter[64]; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - ret = parse_input_od_command_lines(in_buf, count, &cmd_type, @@ -3780,7 +3645,7 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev, if (ret) return ret; - ret = pm_runtime_resume_and_get(adev->dev); + ret = amdgpu_pm_get_access(adev); if (ret < 0) return ret; @@ -3799,14 +3664,12 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev, goto err_out; } - pm_runtime_mark_last_busy(adev->dev); - pm_runtime_put_autosuspend(adev->dev); + amdgpu_pm_put_access(adev); return count; err_out: - pm_runtime_mark_last_busy(adev->dev); - pm_runtime_put_autosuspend(adev->dev); + amdgpu_pm_put_access(adev); return ret; } @@ -4776,16 +4639,10 @@ static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags) static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused) { struct amdgpu_device *adev = (struct amdgpu_device *)m->private; - struct drm_device *dev = adev_to_drm(adev); u64 flags = 0; int r; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - r = pm_runtime_resume_and_get(dev->dev); + r = amdgpu_pm_get_access(adev); if (r < 0) return r; @@ -4802,7 +4659,7 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused) seq_printf(m, "\n"); out: - pm_runtime_put_autosuspend(dev->dev); + amdgpu_pm_put_access(adev); return r; } @@ -4822,10 +4679,9 @@ static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf, void *smu_prv_buf; int ret = 0; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; + ret = amdgpu_pm_dev_state_check(adev, true); + if (ret) + return ret; ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size); if (ret) diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 1f5ac7e0230d..f93d287dbf13 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -295,7 +295,8 @@ enum ip_power_state { }; /* Used to mask smu debug modes */ -#define SMU_DEBUG_HALT_ON_ERROR 0x1 +#define SMU_DEBUG_HALT_ON_ERROR BIT(0) +#define SMU_DEBUG_POOL_USE_VRAM BIT(1) #define MAX_SMU_I2C_BUSES 2 @@ -603,5 +604,6 @@ int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type, ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev, enum pp_pm_policy p_type, char *buf); int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask); +bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c index e237ea1185a7..59fae668dc3f 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -3107,7 +3107,7 @@ static int kv_dpm_resume(struct amdgpu_ip_block *ip_block) return ret; } -static bool kv_dpm_is_idle(void *handle) +static bool kv_dpm_is_idle(struct amdgpu_ip_block *ip_block) { return true; } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index d6dfe2599ebe..1c25f3023e93 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -7852,7 +7852,7 @@ static int si_dpm_resume(struct amdgpu_ip_block *ip_block) return ret; } -static bool si_dpm_is_idle(void *handle) +static bool si_dpm_is_idle(struct amdgpu_ip_block *ip_block) { /* XXX */ return true; diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 686345f75f26..b48a031cbba0 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -51,6 +51,11 @@ static int amd_powerplay_create(struct amdgpu_device *adev) hwmgr->adev = adev; hwmgr->not_vf = !amdgpu_sriov_vf(adev); hwmgr->device = amdgpu_cgs_create_device(adev); + if (!hwmgr->device) { + kfree(hwmgr); + return -ENOMEM; + } + mutex_init(&hwmgr->msg_lock); hwmgr->chip_family = adev->family; hwmgr->chip_id = adev->asic_type; @@ -239,7 +244,7 @@ static void pp_late_fini(struct amdgpu_ip_block *ip_block) } -static bool pp_is_idle(void *handle) +static bool pp_is_idle(struct amdgpu_ip_block *ip_block) { return false; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c index 90452b66e107..a59677cf8dfc 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c @@ -149,16 +149,6 @@ int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr) return 0; } -int phm_powerdown_uvd(struct pp_hwmgr *hwmgr) -{ - PHM_FUNC_CHECK(hwmgr); - - if (hwmgr->hwmgr_func->powerdown_uvd != NULL) - return hwmgr->hwmgr_func->powerdown_uvd(hwmgr); - return 0; -} - - int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr) { PHM_FUNC_CHECK(hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c index 82d540334318..6120f14caab0 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c @@ -158,84 +158,6 @@ int pp_atomfwctrl_get_voltage_table_v4(struct pp_hwmgr *hwmgr, return result; } - -static struct atom_gpio_pin_lut_v2_1 *pp_atomfwctrl_get_gpio_lookup_table( - struct pp_hwmgr *hwmgr) -{ - const void *table_address; - uint16_t idx; - - idx = GetIndexIntoMasterDataTable(gpio_pin_lut); - table_address = smu_atom_get_data_table(hwmgr->adev, - idx, NULL, NULL, NULL); - PP_ASSERT_WITH_CODE(table_address, - "Error retrieving BIOS Table Address!", - return NULL); - - return (struct atom_gpio_pin_lut_v2_1 *)table_address; -} - -static bool pp_atomfwctrl_lookup_gpio_pin( - struct atom_gpio_pin_lut_v2_1 *gpio_lookup_table, - const uint32_t pin_id, - struct pp_atomfwctrl_gpio_pin_assignment *gpio_pin_assignment) -{ - unsigned int size = le16_to_cpu( - gpio_lookup_table->table_header.structuresize); - unsigned int offset = - offsetof(struct atom_gpio_pin_lut_v2_1, gpio_pin[0]); - unsigned long start = (unsigned long)gpio_lookup_table; - - while (offset < size) { - const struct atom_gpio_pin_assignment *pin_assignment = - (const struct atom_gpio_pin_assignment *)(start + offset); - - if (pin_id == pin_assignment->gpio_id) { - gpio_pin_assignment->uc_gpio_pin_bit_shift = - pin_assignment->gpio_bitshift; - gpio_pin_assignment->us_gpio_pin_aindex = - le16_to_cpu(pin_assignment->data_a_reg_index); - return true; - } - offset += offsetof(struct atom_gpio_pin_assignment, gpio_id) + 1; - } - return false; -} - -/* - * Returns TRUE if the given pin id find in lookup table. - */ -bool pp_atomfwctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, - const uint32_t pin_id, - struct pp_atomfwctrl_gpio_pin_assignment *gpio_pin_assignment) -{ - bool ret = false; - struct atom_gpio_pin_lut_v2_1 *gpio_lookup_table = - pp_atomfwctrl_get_gpio_lookup_table(hwmgr); - - /* If we cannot find the table do NOT try to control this voltage. */ - PP_ASSERT_WITH_CODE(gpio_lookup_table, - "Could not find GPIO lookup Table in BIOS.", - return false); - - ret = pp_atomfwctrl_lookup_gpio_pin(gpio_lookup_table, - pin_id, gpio_pin_assignment); - - return ret; -} - -/* - * Enter to SelfRefresh mode. - * @param hwmgr - */ -int pp_atomfwctrl_enter_self_refresh(struct pp_hwmgr *hwmgr) -{ - /* 0 - no action - * 1 - leave power to video memory always on - */ - return 0; -} - /** pp_atomfwctrl_get_gpu_pll_dividers_vega10(). * * @param hwmgr input parameter: pointer to HwMgr diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h index e86e05c786d9..0d62903d5676 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h @@ -217,9 +217,6 @@ struct pp_atomfwctrl_smc_dpm_parameters { int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr, uint32_t clock_type, uint32_t clock_value, struct pp_atomfwctrl_clock_dividers_soc15 *dividers); -int pp_atomfwctrl_enter_self_refresh(struct pp_hwmgr *hwmgr); -bool pp_atomfwctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pin_id, - struct pp_atomfwctrl_gpio_pin_assignment *gpio_pin_assignment); int pp_atomfwctrl_get_voltage_table_v4(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode, struct pp_atomfwctrl_voltage_table *voltage_table); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c index a8c732e07006..9a821563bc8e 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c @@ -1642,7 +1642,6 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = { .apply_state_adjust_rules = smu10_apply_state_adjust_rules, .force_dpm_level = smu10_dpm_force_dpm_level, .get_power_state_size = smu10_get_power_state_size, - .powerdown_uvd = NULL, .powergate_uvd = smu10_powergate_vcn, .powergate_vce = NULL, .get_mclk = smu10_dpm_get_mclk, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c index f2bda3bcbbde..5e4c80f7b20a 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c @@ -55,7 +55,7 @@ static int smu7_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate) return smu7_enable_disable_vce_dpm(hwmgr, !bgate); } -int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr) +static int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr) { if (phm_cf_want_uvd_power_gating(hwmgr)) return smum_send_msg_to_smc(hwmgr, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h index fc8f8a6acc72..e56abbadc78b 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h @@ -28,7 +28,6 @@ void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); -int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr); int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate); int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr); int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index 632a25957477..8da882c51856 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -5754,7 +5754,6 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .patch_boot_state = smu7_dpm_patch_boot_state, .get_pp_table_entry = smu7_get_pp_table_entry, .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries, - .powerdown_uvd = smu7_powerdown_uvd, .powergate_uvd = smu7_powergate_uvd, .powergate_vce = smu7_powergate_vce, .disable_clock_power_gating = smu7_disable_clock_power_gating, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c index 7e1197420873..9d3b33446adc 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c @@ -2044,7 +2044,6 @@ static const struct pp_hwmgr_func smu8_hwmgr_funcs = { .apply_state_adjust_rules = smu8_apply_state_adjust_rules, .force_dpm_level = smu8_dpm_force_dpm_level, .get_power_state_size = smu8_get_power_state_size, - .powerdown_uvd = smu8_dpm_powerdown_uvd, .powergate_uvd = smu8_dpm_powergate_uvd, .powergate_vce = smu8_dpm_powergate_vce, .powergate_acp = smu8_dpm_powergate_acp, diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h index f4f9a104d170..915f1b8e4dba 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h @@ -396,7 +396,6 @@ struct phm_odn_clock_levels { }; extern int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr); -extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr); extern int phm_setup_asic(struct pp_hwmgr *hwmgr); extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr); extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h index 227bf0e84a13..c661185753b4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h @@ -257,7 +257,6 @@ struct pp_hwmgr_func { int (*get_pp_table_entry)(struct pp_hwmgr *hwmgr, unsigned long, struct pp_power_state *); int (*get_num_of_pp_table_entries)(struct pp_hwmgr *hwmgr); - int (*powerdown_uvd)(struct pp_hwmgr *hwmgr); void (*powergate_vce)(struct pp_hwmgr *hwmgr, bool bgate); void (*powergate_uvd)(struct pp_hwmgr *hwmgr, bool bgate); void (*powergate_acp)(struct pp_hwmgr *hwmgr, bool bgate); diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index ed9dac00ebfb..033c3229b555 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -694,6 +694,7 @@ static int smu_set_funcs(struct amdgpu_device *adev) renoir_set_ppt_funcs(smu); break; case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 2): vangogh_set_ppt_funcs(smu); break; case IP_VERSION(13, 0, 1): @@ -739,6 +740,7 @@ static int smu_set_funcs(struct amdgpu_device *adev) case IP_VERSION(14, 0, 0): case IP_VERSION(14, 0, 1): case IP_VERSION(14, 0, 4): + case IP_VERSION(14, 0, 5): smu_v14_0_0_set_ppt_funcs(smu); break; case IP_VERSION(14, 0, 2): @@ -1025,7 +1027,10 @@ static int smu_alloc_memory_pool(struct smu_context *smu) memory_pool->size = pool_size; memory_pool->align = PAGE_SIZE; - memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT; + memory_pool->domain = + (adev->pm.smu_debug_mask & SMU_DEBUG_POOL_USE_VRAM) ? + AMDGPU_GEM_DOMAIN_VRAM : + AMDGPU_GEM_DOMAIN_GTT; switch (pool_size) { case SMU_MEMORY_POOL_SIZE_256_MB: @@ -1252,26 +1257,11 @@ static void smu_init_xgmi_plpd_mode(struct smu_context *smu) } } -static bool smu_is_workload_profile_available(struct smu_context *smu, - u32 profile) -{ - if (profile >= PP_SMC_POWER_PROFILE_COUNT) - return false; - return smu->workload_map && smu->workload_map[profile].valid_mapping; -} - static void smu_init_power_profile(struct smu_context *smu) { - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN) { - if (smu->is_apu || - !smu_is_workload_profile_available( - smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) - smu->power_profile_mode = - PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; - else - smu->power_profile_mode = - PP_SMC_POWER_PROFILE_FULLSCREEN3D; - } + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN) + smu->power_profile_mode = + PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; smu_power_profile_mode_get(smu, smu->power_profile_mode); } @@ -1580,6 +1570,7 @@ static int smu_smc_hw_setup(struct smu_context *smu) case IP_VERSION(11, 0, 7): case IP_VERSION(11, 0, 11): case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 2): case IP_VERSION(11, 0, 12): if (adev->in_suspend && smu_is_dpm_running(smu)) { dev_info(adev->dev, "dpm has been enabled\n"); @@ -1826,7 +1817,7 @@ static int smu_hw_init(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { + if (amdgpu_sriov_multi_vf_mode(adev)) { smu->pm_enabled = false; return 0; } @@ -1933,6 +1924,7 @@ static int smu_disable_dpms(struct smu_context *smu) case IP_VERSION(11, 0, 7): case IP_VERSION(11, 0, 11): case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 2): case IP_VERSION(11, 0, 12): case IP_VERSION(11, 0, 13): return 0; @@ -2049,18 +2041,18 @@ static int smu_hw_fini(struct amdgpu_ip_block *ip_block) struct smu_context *smu = adev->powerplay.pp_handle; int i, ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + if (amdgpu_sriov_multi_vf_mode(adev)) return 0; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { smu_dpm_set_vcn_enable(smu, false, i); + adev->vcn.inst[i].cur_state = AMD_PG_STATE_GATE; + } smu_dpm_set_jpeg_enable(smu, false); + adev->jpeg.cur_state = AMD_PG_STATE_GATE; smu_dpm_set_vpe_enable(smu, false); smu_dpm_set_umsch_mm_enable(smu, false); - adev->vcn.cur_state = AMD_PG_STATE_GATE; - adev->jpeg.cur_state = AMD_PG_STATE_GATE; - if (!smu->pm_enabled) return 0; @@ -2117,7 +2109,7 @@ static int smu_suspend(struct amdgpu_ip_block *ip_block) int ret; uint64_t count; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + if (amdgpu_sriov_multi_vf_mode(adev)) return 0; if (!smu->pm_enabled) @@ -2153,7 +2145,7 @@ static int smu_resume(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; - if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) + if (amdgpu_sriov_multi_vf_mode(adev)) return 0; if (!smu->pm_enabled) @@ -2326,7 +2318,12 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, if (smu_dpm_ctx->dpm_level != level) { ret = smu_asic_set_performance_level(smu, level); if (ret) { - dev_err(smu->adev->dev, "Failed to set performance level!"); + if (ret == -EOPNOTSUPP) + dev_info(smu->adev->dev, "set performance level %d not supported", + level); + else + dev_err(smu->adev->dev, "Failed to set performance level %d", + level); return ret; } @@ -2802,6 +2799,7 @@ int smu_get_power_limit(void *handle, switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(13, 0, 2): case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 12): case IP_VERSION(13, 0, 14): case IP_VERSION(11, 0, 7): case IP_VERSION(11, 0, 11): @@ -3917,6 +3915,23 @@ int smu_send_rma_reason(struct smu_context *smu) return ret; } +/** + * smu_reset_sdma_is_supported - Check if SDMA reset is supported by SMU + * @smu: smu_context pointer + * + * This function checks if the SMU supports resetting the SDMA engine. + * It returns true if supported, false otherwise. + */ +bool smu_reset_sdma_is_supported(struct smu_context *smu) +{ + bool ret = false; + + if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma_is_supported) + ret = smu->ppt_funcs->reset_sdma_is_supported(smu); + + return ret; +} + int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask) { int ret = 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 3630593bce61..3ba169639f54 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -1376,6 +1376,10 @@ struct pptable_funcs { * @reset_sdma: message SMU to soft reset sdma instance. */ int (*reset_sdma)(struct smu_context *smu, uint32_t inst_mask); + /** + * @reset_sdma_is_supported: Check if support resets the SDMA engine. + */ + bool (*reset_sdma_is_supported)(struct smu_context *smu); /** * @get_ecc_table: message SMU to get ECC INFO table. @@ -1637,6 +1641,7 @@ int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size); int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size); int smu_send_rma_reason(struct smu_context *smu); int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask); +bool smu_reset_sdma_is_supported(struct smu_context *smu); int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type, int level); ssize_t smu_get_pm_policy_info(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h new file mode 100644 index 000000000000..d7505cfc433a --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h @@ -0,0 +1,281 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef SMU_13_0_12_PMFW_H +#define SMU_13_0_12_PMFW_H + +#define NUM_VCLK_DPM_LEVELS 4 +#define NUM_DCLK_DPM_LEVELS 4 +#define NUM_SOCCLK_DPM_LEVELS 4 +#define NUM_LCLK_DPM_LEVELS 4 +#define NUM_UCLK_DPM_LEVELS 4 +#define NUM_FCLK_DPM_LEVELS 4 +#define NUM_XGMI_DPM_LEVELS 2 +#define NUM_CXL_BITRATES 4 +#define NUM_PCIE_BITRATES 4 +#define NUM_XGMI_BITRATES 4 +#define NUM_XGMI_WIDTHS 3 +#define NUM_TDP_GROUPS 4 +#define NUM_SOC_P2S_TABLES 6 +#define NUM_GFX_P2S_TABLES 8 +#define NUM_PSM_DIDT_THRESHOLDS 3 +#define NUM_XVMIN_VMIN_THRESHOLDS 3 + +#define PRODUCT_MODEL_NUMBER_LEN 20 +#define PRODUCT_NAME_LEN 64 +#define PRODUCT_SERIAL_LEN 20 +#define PRODUCT_MANUFACTURER_NAME_LEN 32 +#define PRODUCT_FRU_ID_LEN 32 + +typedef enum { +/*0*/ FEATURE_DATA_CALCULATION = 0, +/*1*/ FEATURE_DPM_FCLK = 1, +/*2*/ FEATURE_DPM_GFXCLK = 2, +/*3*/ FEATURE_DPM_LCLK = 3, +/*4*/ FEATURE_DPM_SOCCLK = 4, +/*5*/ FEATURE_DPM_UCLK = 5, +/*6*/ FEATURE_DPM_VCN = 6, +/*7*/ FEATURE_DPM_XGMI = 7, +/*8*/ FEATURE_DS_FCLK = 8, +/*9*/ FEATURE_DS_GFXCLK = 9, +/*10*/ FEATURE_DS_LCLK = 10, +/*11*/ FEATURE_DS_MP0CLK = 11, +/*12*/ FEATURE_DS_MP1CLK = 12, +/*13*/ FEATURE_DS_MPIOCLK = 13, +/*14*/ FEATURE_DS_SOCCLK = 14, +/*15*/ FEATURE_DS_VCN = 15, +/*16*/ FEATURE_APCC_DFLL = 16, +/*17*/ FEATURE_APCC_PLUS = 17, +/*18*/ FEATURE_PPT = 18, +/*19*/ FEATURE_TDC = 19, +/*20*/ FEATURE_THERMAL = 20, +/*21*/ FEATURE_SOC_PCC = 21, +/*22*/ FEATURE_PROCHOT = 22, +/*23*/ FEATURE_FDD_AID_HBM = 23, +/*24*/ FEATURE_FDD_AID_SOC = 24, +/*25*/ FEATURE_FDD_XCD_EDC = 25, +/*26*/ FEATURE_FDD_XCD_XVMIN = 26, +/*27*/ FEATURE_FW_CTF = 27, +/*28*/ FEATURE_SMU_CG = 28, +/*29*/ FEATURE_PSI7 = 29, +/*30*/ FEATURE_XGMI_PER_LINK_PWR_DOWN = 30, +/*31*/ FEATURE_SOC_DC_RTC = 31, +/*32*/ FEATURE_GFX_DC_RTC = 32, +/*33*/ FEATURE_DVM_MIN_PSM = 33, +/*34*/ FEATURE_PRC = 34, +/*35*/ FEATURE_PSM_SQ_THROTTLER = 35, +/*36*/ FEATURE_PIT = 36, +/*37*/ FEATURE_DVO = 37, +/*38*/ FEATURE_XVMINORPSM_CLKSTOP_DS = 38, + +/*39*/ NUM_FEATURES = 39 +} FEATURE_LIST_e; + +//enum for MPIO PCIe gen speed msgs +typedef enum { + PCIE_LINK_SPEED_INDEX_TABLE_RESERVED, + PCIE_LINK_SPEED_INDEX_TABLE_GEN1, + PCIE_LINK_SPEED_INDEX_TABLE_GEN2, + PCIE_LINK_SPEED_INDEX_TABLE_GEN3, + PCIE_LINK_SPEED_INDEX_TABLE_GEN4, + PCIE_LINK_SPEED_INDEX_TABLE_GEN5, + PCIE_LINK_SPEED_INDEX_TABLE_COUNT +} PCIE_LINK_SPEED_INDEX_TABLE_e; + +typedef enum { + GFX_GUARDBAND_OFFSET_0, + GFX_GUARDBAND_OFFSET_1, + GFX_GUARDBAND_OFFSET_2, + GFX_GUARDBAND_OFFSET_3, + GFX_GUARDBAND_OFFSET_4, + GFX_GUARDBAND_OFFSET_5, + GFX_GUARDBAND_OFFSET_6, + GFX_GUARDBAND_OFFSET_7, + GFX_GUARDBAND_OFFSET_COUNT +} GFX_GUARDBAND_OFFSET_e; + +typedef enum { + GFX_DVM_MARGINHI_0, + GFX_DVM_MARGINHI_1, + GFX_DVM_MARGINHI_2, + GFX_DVM_MARGINHI_3, + GFX_DVM_MARGINHI_4, + GFX_DVM_MARGINHI_5, + GFX_DVM_MARGINHI_6, + GFX_DVM_MARGINHI_7, + GFX_DVM_MARGINLO_0, + GFX_DVM_MARGINLO_1, + GFX_DVM_MARGINLO_2, + GFX_DVM_MARGINLO_3, + GFX_DVM_MARGINLO_4, + GFX_DVM_MARGINLO_5, + GFX_DVM_MARGINLO_6, + GFX_DVM_MARGINLO_7, + GFX_DVM_MARGIN_COUNT +} GFX_DVM_MARGIN_e; + +#define SMU_METRICS_TABLE_VERSION 0x12 + +typedef struct __attribute__((packed, aligned(4))) { + uint64_t AccumulationCounter; + + //TEMPERATURE + uint32_t MaxSocketTemperature; + uint32_t MaxVrTemperature; + uint32_t MaxHbmTemperature; + uint64_t MaxSocketTemperatureAcc; + uint64_t MaxVrTemperatureAcc; + uint64_t MaxHbmTemperatureAcc; + + //POWER + uint32_t SocketPowerLimit; + uint32_t SocketPower; + + //ENERGY + uint64_t Timestamp; + uint64_t SocketEnergyAcc; + uint64_t XcdEnergyAcc; + uint64_t AidEnergyAcc; + uint64_t HbmEnergyAcc; + + //FREQUENCY + uint32_t GfxclkFrequencyLimit; + uint32_t FclkFrequency; + uint32_t UclkFrequency; + uint32_t SocclkFrequency[4]; + uint32_t VclkFrequency[4]; + uint32_t DclkFrequency[4]; + uint32_t LclkFrequency[4]; + uint64_t GfxclkFrequencyAcc[8]; + + //FREQUENCY RANGE + uint32_t MaxLclkDpmRange; + uint32_t MinLclkDpmRange; + + //XGMI + uint32_t XgmiWidth; + uint32_t XgmiBitrate; + uint64_t XgmiReadBandwidthAcc[8]; + uint64_t XgmiWriteBandwidthAcc[8]; + + //ACTIVITY + uint32_t SocketGfxBusy; + uint32_t DramBandwidthUtilization; + uint64_t SocketGfxBusyAcc; + uint64_t DramBandwidthAcc; + uint32_t MaxDramBandwidth; + uint64_t DramBandwidthUtilizationAcc; + uint64_t PcieBandwidthAcc[4]; + + //THROTTLERS + uint32_t ProchotResidencyAcc; + uint32_t PptResidencyAcc; + uint32_t SocketThmResidencyAcc; + uint32_t VrThmResidencyAcc; + uint32_t HbmThmResidencyAcc; + uint32_t GfxLockXCDMak; + + // New Items at end to maintain driver compatibility + uint32_t GfxclkFrequency[8]; + + //XGMI Data tranfser size + uint64_t XgmiReadDataSizeAcc[8];//in KByte + uint64_t XgmiWriteDataSizeAcc[8];//in KByte + + //PCIE BW Data and error count + uint32_t PcieBandwidth[4]; + uint32_t PCIeL0ToRecoveryCountAcc; // The Pcie counter itself is accumulated + uint32_t PCIenReplayAAcc; // The Pcie counter itself is accumulated + uint32_t PCIenReplayARolloverCountAcc; // The Pcie counter itself is accumulated + uint32_t PCIeNAKSentCountAcc; // The Pcie counter itself is accumulated + uint32_t PCIeNAKReceivedCountAcc; // The Pcie counter itself is accumulated + + // VCN/JPEG ACTIVITY + uint32_t VcnBusy[4]; + uint32_t JpegBusy[40]; + + // PCIE LINK Speed and width + uint32_t PCIeLinkSpeed; + uint32_t PCIeLinkWidth; + + // PER XCD ACTIVITY + uint32_t GfxBusy[8]; + uint64_t GfxBusyAcc[8]; + + //PCIE BW Data and error count + uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated + + //Total App Clock Counter + uint64_t GfxclkBelowHostLimitPptAcc[8]; + uint64_t GfxclkBelowHostLimitThmAcc[8]; + uint64_t GfxclkBelowHostLimitTotalAcc[8]; + uint64_t GfxclkLowUtilizationAcc[8]; +} MetricsTable_t; + +#define SMU_VF_METRICS_TABLE_MASK (1 << 31) +#define SMU_VF_METRICS_TABLE_VERSION (0x6 | SMU_VF_METRICS_TABLE_MASK) + +typedef struct __attribute__((packed, aligned(4))) { + uint32_t AccumulationCounter; + uint32_t InstGfxclk_TargFreq; + uint64_t AccGfxclk_TargFreq; + uint64_t AccGfxRsmuDpm_Busy; + uint64_t AccGfxclkBelowHostLimitPpt; + uint64_t AccGfxclkBelowHostLimitThm; + uint64_t AccGfxclkBelowHostLimitTotal; + uint64_t AccGfxclkLowUtilization; +} VfMetricsTable_t; + +/* FRU product information */ +typedef struct __attribute__((packed, aligned(4))) { + uint8_t ModelNumber[PRODUCT_MODEL_NUMBER_LEN]; + uint8_t Name[PRODUCT_NAME_LEN]; + uint8_t Serial[PRODUCT_SERIAL_LEN]; + uint8_t ManufacturerName[PRODUCT_MANUFACTURER_NAME_LEN]; + uint8_t FruId[PRODUCT_FRU_ID_LEN]; +} FRUProductInfo_t; + +#pragma pack(push, 4) +typedef struct { + //FRU PRODUCT INFO + FRUProductInfo_t ProductInfo; + + //POWER + uint32_t MaxSocketPowerLimit; + + //FREQUENCY RANGE + uint32_t MaxGfxclkFrequency; + uint32_t MinGfxclkFrequency; + uint32_t FclkFrequencyTable[4]; + uint32_t UclkFrequencyTable[4]; + uint32_t SocclkFrequencyTable[4]; + uint32_t VclkFrequencyTable[4]; + uint32_t DclkFrequencyTable[4]; + uint32_t LclkFrequencyTable[4]; + + //PSNs + uint64_t PublicSerialNumber_AID[4]; + uint64_t PublicSerialNumber_XCD[8]; +} StaticMetricsTable_t; +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h new file mode 100644 index 000000000000..e1f490b6ce64 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h @@ -0,0 +1,143 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef SMU_13_0_12_PPSMC_H +#define SMU_13_0_12_PPSMC_H + +// SMU Response Codes: +#define PPSMC_Result_OK 0x1 +#define PPSMC_Result_Failed 0xFF +#define PPSMC_Result_UnknownCmd 0xFE +#define PPSMC_Result_CmdRejectedPrereq 0xFD +#define PPSMC_Result_CmdRejectedBusy 0xFC + +// Message Definitions: +#define PPSMC_MSG_TestMessage 0x1 +#define PPSMC_MSG_GetSmuVersion 0x2 +#define PPSMC_MSG_GfxDriverReset 0x3 +#define PPSMC_MSG_GetDriverIfVersion 0x4 +#define PPSMC_MSG_EnableAllSmuFeatures 0x5 +#define PPSMC_MSG_DisableAllSmuFeatures 0x6 +#define PPSMC_MSG_RequestI2cTransaction 0x7 +#define PPSMC_MSG_GetMetricsVersion 0x8 +#define PPSMC_MSG_GetMetricsTable 0x9 +#define PPSMC_MSG_GetEccInfoTable 0xA +#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xB +#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xC +#define PPSMC_MSG_SetDriverDramAddrHigh 0xD +#define PPSMC_MSG_SetDriverDramAddrLow 0xE +#define PPSMC_MSG_SetToolsDramAddrHigh 0xF +#define PPSMC_MSG_SetToolsDramAddrLow 0x10 +#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x11 +#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x12 +#define PPSMC_MSG_SetSoftMinByFreq 0x13 +#define PPSMC_MSG_SetSoftMaxByFreq 0x14 +#define PPSMC_MSG_GetMinDpmFreq 0x15 +#define PPSMC_MSG_GetMaxDpmFreq 0x16 +#define PPSMC_MSG_GetDpmFreqByIndex 0x17 +#define PPSMC_MSG_SetPptLimit 0x18 +#define PPSMC_MSG_GetPptLimit 0x19 +#define PPSMC_MSG_DramLogSetDramAddrHigh 0x1A +#define PPSMC_MSG_DramLogSetDramAddrLow 0x1B +#define PPSMC_MSG_DramLogSetDramSize 0x1C +#define PPSMC_MSG_GetDebugData 0x1D +#define PPSMC_MSG_HeavySBR 0x1E +#define PPSMC_MSG_SetNumBadHbmPagesRetired 0x1F +#define PPSMC_MSG_DFCstateControl 0x20 +#define PPSMC_MSG_GetGmiPwrDnHyst 0x21 +#define PPSMC_MSG_SetGmiPwrDnHyst 0x22 +#define PPSMC_MSG_GmiPwrDnControl 0x23 +#define PPSMC_MSG_EnterGfxoff 0x24 +#define PPSMC_MSG_ExitGfxoff 0x25 +#define PPSMC_MSG_EnableDeterminism 0x26 +#define PPSMC_MSG_DisableDeterminism 0x27 +#define PPSMC_MSG_DumpSTBtoDram 0x28 +#define PPSMC_MSG_STBtoDramLogSetDramAddrHigh 0x29 +#define PPSMC_MSG_STBtoDramLogSetDramAddrLow 0x2A +#define PPSMC_MSG_STBtoDramLogSetDramSize 0x2B +#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrHigh 0x2C +#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow 0x2D +#define PPSMC_MSG_GfxDriverResetRecovery 0x2E +#define PPSMC_MSG_TriggerVFFLR 0x2F +#define PPSMC_MSG_SetSoftMinGfxClk 0x30 +#define PPSMC_MSG_SetSoftMaxGfxClk 0x31 +#define PPSMC_MSG_GetMinGfxDpmFreq 0x32 +#define PPSMC_MSG_GetMaxGfxDpmFreq 0x33 +#define PPSMC_MSG_PrepareForDriverUnload 0x34 +#define PPSMC_MSG_ReadThrottlerLimit 0x35 +#define PPSMC_MSG_QueryValidMcaCount 0x36 +#define PPSMC_MSG_McaBankDumpDW 0x37 +#define PPSMC_MSG_GetCTFLimit 0x38 +#define PPSMC_MSG_ClearMcaOnRead 0x39 +#define PPSMC_MSG_QueryValidMcaCeCount 0x3A +#define PPSMC_MSG_McaBankCeDumpDW 0x3B +#define PPSMC_MSG_SelectPLPDMode 0x40 +#define PPSMC_MSG_PmLogReadSample 0x41 +#define PPSMC_MSG_PmLogGetTableVersion 0x42 +#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43 +#define PPSMC_MSG_SetThrottlingPolicy 0x44 +#define PPSMC_MSG_SetPhaseDetectCSBWThreshold 0x45 +#define PPSMC_MSG_SetPhaseDetectFreqHigh 0x46 +#define PPSMC_MSG_SetPhaseDetectFreqLow 0x47 +#define PPSMC_MSG_SetPhaseDetectDownHysterisis 0x48 +#define PPSMC_MSG_SetPhaseDetectAlphaX1e6 0x49 +#define PPSMC_MSG_SetPhaseDetectOnOff 0x4A +#define PPSMC_MSG_GetPhaseDetectResidency 0x4B +#define PPSMC_MSG_UpdatePccWaitDecMaxStr 0x4C +#define PPSMC_MSG_ResetSDMA 0x4D +#define PPSMC_MSG_GetRasTableVersion 0x4E +#define PPSMC_MSG_GetRmaStatus 0x4F +#define PPSMC_MSG_GetErrorCount 0x50 +#define PPSMC_MSG_GetBadPageCount 0x51 +#define PPSMC_MSG_GetBadPageInfo 0x52 +#define PPSMC_MSG_GetBadPagePaAddrLoHi 0x53 +#define PPSMC_MSG_SetTimestampLoHi 0x54 +#define PPSMC_MSG_GetTimestampLoHi 0x55 +#define PPSMC_MSG_GetRasPolicy 0x56 +#define PPSMC_MSG_DumpErrorRecord 0x57 +#define PPSMC_MSG_EraseRasTable 0x58 +#define PPSMC_MSG_GetStaticMetricsTable 0x59 +#define PPSMC_Message_Count 0x5A + +//PPSMC Reset Types for driver msg argument +#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1 +#define PPSMC_RESET_TYPE_DRIVER_MODE_2_RESET 0x2 +#define PPSMC_RESET_TYPE_DRIVER_MODE_3_RESET 0x3 + +//PPSMC Reset Types for driver msg argument +#define PPSMC_THROTTLING_LIMIT_TYPE_SOCKET 0x1 +#define PPSMC_THROTTLING_LIMIT_TYPE_HBM 0x2 + +//CTF/Throttle Limit types +#define PPSMC_AID_THM_TYPE 0x1 +#define PPSMC_CCD_THM_TYPE 0x2 +#define PPSMC_XCD_THM_TYPE 0x3 +#define PPSMC_HBM_THM_TYPE 0x4 + +//PLPD modes +#define PPSMC_PLPD_MODE_DEFAULT 0x1 +#define PPSMC_PLPD_MODE_OPTIMIZED 0x2 + +typedef uint32_t PPSMC_Result; +typedef uint32_t PPSMC_MSG; + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h index 274b3e1cc4fb..f8ed45857878 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h @@ -129,6 +129,7 @@ typedef enum { #define SMU_METRICS_TABLE_VERSION 0xF +// Unified metrics table for smu_v13_0_6 typedef struct __attribute__((packed, aligned(4))) { uint32_t AccumulationCounter; @@ -241,8 +242,9 @@ typedef struct __attribute__((packed, aligned(4))) { //Total App Clock Counter uint64_t GfxclkBelowHostLimitAcc[8]; -} MetricsTableX_t; +} MetricsTableV0_t; +// Metrics table for smu_v13_0_6 APUS typedef struct __attribute__((packed, aligned(4))) { uint32_t AccumulationCounter; @@ -333,7 +335,116 @@ typedef struct __attribute__((packed, aligned(4))) { // VCN/JPEG ACTIVITY uint32_t VcnBusy[4]; uint32_t JpegBusy[32]; -} MetricsTableA_t; +} MetricsTableV1_t; + +// Metrics table for smu_v13_0_12 +typedef struct __attribute__((packed, aligned(4))) { + uint64_t AccumulationCounter; + + //TEMPERATURE + uint32_t MaxSocketTemperature; + uint32_t MaxVrTemperature; + uint32_t MaxHbmTemperature; + uint64_t MaxSocketTemperatureAcc; + uint64_t MaxVrTemperatureAcc; + uint64_t MaxHbmTemperatureAcc; + + //POWER + uint32_t SocketPowerLimit; + uint32_t MaxSocketPowerLimit; + uint32_t SocketPower; + + //ENERGY + uint64_t Timestamp; + uint64_t SocketEnergyAcc; + uint64_t CcdEnergyAcc; + uint64_t XcdEnergyAcc; + uint64_t AidEnergyAcc; + uint64_t HbmEnergyAcc; + + //FREQUENCY + uint32_t GfxclkFrequencyLimit; + uint32_t FclkFrequency; + uint32_t UclkFrequency; + uint32_t SocclkFrequency[4]; + uint32_t VclkFrequency[4]; + uint32_t DclkFrequency[4]; + uint32_t LclkFrequency[4]; + uint64_t GfxclkFrequencyAcc[8]; + + //FREQUENCY RANGE + uint32_t MaxGfxclkFrequency; + uint32_t MinGfxclkFrequency; + uint32_t FclkFrequencyTable[4]; + uint32_t UclkFrequencyTable[4]; + uint32_t SocclkFrequencyTable[4]; + uint32_t VclkFrequencyTable[4]; + uint32_t DclkFrequencyTable[4]; + uint32_t LclkFrequencyTable[4]; + uint32_t MaxLclkDpmRange; + uint32_t MinLclkDpmRange; + + //XGMI + uint32_t XgmiWidth; + uint32_t XgmiBitrate; + uint64_t XgmiReadBandwidthAcc[8]; + uint64_t XgmiWriteBandwidthAcc[8]; + + //ACTIVITY + uint32_t SocketGfxBusy; + uint32_t DramBandwidthUtilization; + uint64_t SocketC0ResidencyAcc; + uint64_t SocketGfxBusyAcc; + uint64_t DramBandwidthAcc; + uint32_t MaxDramBandwidth; + uint64_t DramBandwidthUtilizationAcc; + uint64_t PcieBandwidthAcc[4]; + + //THROTTLERS + uint32_t ProchotResidencyAcc; + uint32_t PptResidencyAcc; + uint32_t SocketThmResidencyAcc; + uint32_t VrThmResidencyAcc; + uint32_t HbmThmResidencyAcc; + uint32_t GfxLockXCDMak; + + // New Items at end to maintain driver compatibility + uint32_t GfxclkFrequency[8]; + + //PSNs + uint64_t PublicSerialNumber_AID[4]; + uint64_t PublicSerialNumber_XCD[8]; + + //XGMI Data tranfser size + uint64_t XgmiReadDataSizeAcc[8];//in KByte + uint64_t XgmiWriteDataSizeAcc[8];//in KByte + + //PCIE BW Data and error count + uint32_t PcieBandwidth[4]; + uint32_t PCIeL0ToRecoveryCountAcc; // The Pcie counter itself is accumulated + uint32_t PCIenReplayAAcc; // The Pcie counter itself is accumulated + uint32_t PCIenReplayARolloverCountAcc; // The Pcie counter itself is accumulated + uint32_t PCIeNAKSentCountAcc; // The Pcie counter itself is accumulated + uint32_t PCIeNAKReceivedCountAcc; // The Pcie counter itself is accumulated + + // VCN/JPEG ACTIVITY + uint32_t VcnBusy[4]; + uint32_t JpegBusy[32]; + + // PCIE LINK Speed and width + uint32_t PCIeLinkSpeed; + uint32_t PCIeLinkWidth; + + // PER XCD ACTIVITY + uint32_t GfxBusy[8]; + uint64_t GfxBusyAcc[8]; + + //PCIE BW Data and error count + uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated + + //Total App Clock Counter + uint64_t GfxclkBelowHostLimitAcc[8]; +} MetricsTableV2_t; #define SMU_VF_METRICS_TABLE_VERSION 0x5 diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h index 147bfb12fd75..288b2576432b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h @@ -92,7 +92,7 @@ #define PPSMC_MSG_McaBankCeDumpDW 0x3B #define PPSMC_MSG_SelectPLPDMode 0x40 #define PPSMC_MSG_RmaDueToBadPageThreshold 0x43 -#define PPSMC_MSG_SelectPstatePolicy 0x44 +#define PPSMC_MSG_SetThrottlingPolicy 0x44 #define PPSMC_MSG_ResetSDMA 0x4D #define PPSMC_Message_Count 0x4E diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h index e4cd6a0d13da..c9dee09395e3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -273,10 +273,11 @@ __SMU_DUMMY_MAP(GetMetricsVersion), \ __SMU_DUMMY_MAP(EnableUCLKShadow), \ __SMU_DUMMY_MAP(RmaDueToBadPageThreshold), \ - __SMU_DUMMY_MAP(SelectPstatePolicy), \ + __SMU_DUMMY_MAP(SetThrottlingPolicy), \ __SMU_DUMMY_MAP(MALLPowerController), \ __SMU_DUMMY_MAP(MALLPowerState), \ - __SMU_DUMMY_MAP(ResetSDMA), + __SMU_DUMMY_MAP(ResetSDMA), \ + __SMU_DUMMY_MAP(GetStaticMetricsTable), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type @@ -355,6 +356,7 @@ enum smu_clk_type { __SMU_DUMMY_MAP(DS_FCLK), \ __SMU_DUMMY_MAP(DS_MP1CLK), \ __SMU_DUMMY_MAP(DS_MP0CLK), \ + __SMU_DUMMY_MAP(DS_MPIOCLK), \ __SMU_DUMMY_MAP(XGMI_PER_LINK_PWR_DWN), \ __SMU_DUMMY_MAP(DPM_GFX_PACE), \ __SMU_DUMMY_MAP(MEM_VDDCI_SCALING), \ @@ -451,7 +453,8 @@ enum smu_clk_type { __SMU_DUMMY_MAP(APT_PF_DCS), \ __SMU_DUMMY_MAP(GFX_EDC_XVMIN), \ __SMU_DUMMY_MAP(GFX_DIDT_XVMIN), \ - __SMU_DUMMY_MAP(FAN_ABNORMAL), + __SMU_DUMMY_MAP(FAN_ABNORMAL), \ + __SMU_DUMMY_MAP(PIT), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(feature) SMU_FEATURE_##feature##_BIT diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index 8d4a96e23326..cd03caffe317 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -53,6 +53,10 @@ #define SMU_13_VCLK_SHIFT 16 +#define SMUQ10_TO_UINT(x) ((x) >> 10) +#define SMUQ10_FRAC(x) ((x) & 0x3ff) +#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200)) + extern const int pmfw_decoded_link_speed[5]; extern const int pmfw_decoded_link_width[7]; @@ -306,5 +310,14 @@ int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu, uint32_t *value); void smu_v13_0_interrupt_work(struct smu_context *smu); +bool smu_v13_0_12_is_dpm_running(struct smu_context *smu); +int smu_v13_0_12_get_max_metrics_size(void); +int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu); +int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, + uint32_t *value); +ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table); +extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[]; +extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[]; #endif #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 189c6a32b6bd..78391d8f35a9 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -227,6 +227,7 @@ int smu_v11_0_check_fw_version(struct smu_context *smu) smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder; break; case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 2): smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VANGOGH; break; case IP_VERSION(11, 0, 12): @@ -471,10 +472,11 @@ int smu_v11_0_init_power(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; struct smu_power_context *smu_power = &smu->smu_power; - size_t size = amdgpu_ip_version(adev, MP1_HWIP, 0) == - IP_VERSION(11, 5, 0) ? - sizeof(struct smu_11_5_power_context) : - sizeof(struct smu_11_0_power_context); + u32 ip_version = amdgpu_ip_version(adev, MP1_HWIP, 0); + size_t size = ((ip_version == IP_VERSION(11, 5, 0)) || + (ip_version == IP_VERSION(11, 5, 2))) ? + sizeof(struct smu_11_5_power_context) : + sizeof(struct smu_11_0_power_context); smu_power->power_context = kzalloc(size, GFP_KERNEL); if (!smu_power->power_context) @@ -731,6 +733,7 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) */ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 11) || amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 0) || + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 2) || amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 12) || amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 13)) return 0; @@ -1110,6 +1113,7 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) case IP_VERSION(11, 0, 12): case IP_VERSION(11, 0, 13): case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 2): if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; if (enable) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 37d82a71a2d7..9481f897432d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -1285,6 +1285,12 @@ static int renoir_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = renoir_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_EDGE_TEMP: ret = renoir_get_smu_metrics_data(smu, METRICS_TEMPERATURE_EDGE, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile index 7f3493b6c53c..51f1fa9789ab 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile @@ -24,7 +24,7 @@ # It provides the smu management services for the driver. SMU13_MGR = smu_v13_0.o aldebaran_ppt.o yellow_carp_ppt.o smu_v13_0_0_ppt.o smu_v13_0_4_ppt.o \ - smu_v13_0_5_ppt.o smu_v13_0_7_ppt.o smu_v13_0_6_ppt.o + smu_v13_0_5_ppt.o smu_v13_0_7_ppt.o smu_v13_0_6_ppt.o smu_v13_0_12_ppt.o AMD_SWSMU_SMU13MGR = $(addprefix $(AMD_SWSMU_PATH)/smu13/,$(SMU13_MGR)) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index fbbdfa54f6a2..0915d6377613 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -267,10 +267,7 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) smu_major = (smu_version >> 16) & 0xff; smu_minor = (smu_version >> 8) & 0xff; smu_debug = (smu_version >> 0) & 0xff; - if (smu->is_apu || - amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6) || - amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 14)) - adev->pm.fw_version = smu_version; + adev->pm.fw_version = smu_version; /* only for dGPU w/ SMU13*/ if (adev->pm.fw) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 0551a3311217..5a9711e8cf68 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -126,7 +126,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1), MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1), MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1), - MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1), + MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 0), MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), @@ -140,7 +140,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0), MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1), MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), - MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1), + MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 0), MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0), MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), @@ -149,7 +149,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), - MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1), + MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 0), MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0), MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), @@ -836,6 +836,10 @@ static int smu_v13_0_0_get_smu_metrics_data(struct smu_context *smu, case METRICS_AVERAGE_MEMACTIVITY: *value = metrics->AverageUclkActivity; break; + case METRICS_AVERAGE_VCNACTIVITY: + *value = max(metrics->Vcn0ActivityPercentage, + metrics->Vcn1ActivityPercentage); + break; case METRICS_AVERAGE_SOCKETPOWER: *value = metrics->AverageSocketPower << 8; break; @@ -962,6 +966,12 @@ static int smu_v13_0_0_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = smu_v13_0_0_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = smu_v13_0_0_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, @@ -3234,4 +3244,9 @@ void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu) smu->workload_map = smu_v13_0_0_workload_map; smu->smc_driver_if_version = SMU13_0_0_DRIVER_IF_VERSION; smu_v13_0_0_set_smu_mailbox_registers(smu); + + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == + IP_VERSION(13, 0, 10) && + !amdgpu_device_has_display_hardware(smu->adev)) + smu->adev->pm.pp_feature &= ~PP_GFXOFF_MASK; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c new file mode 100644 index 000000000000..238bd71baa6d --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c @@ -0,0 +1,490 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#define SWSMU_CODE_LAYER_L2 + +#include <linux/firmware.h> +#include "amdgpu.h" +#include "amdgpu_smu.h" +#include "smu_v13_0_12_pmfw.h" +#include "smu_v13_0_6_ppt.h" +#include "smu_v13_0_12_ppsmc.h" +#include "smu_v13_0.h" +#include "amdgpu_xgmi.h" +#include "amdgpu_fru_eeprom.h" +#include <linux/pci.h> +#include "smu_cmn.h" + +#undef MP1_Public +#undef smnMP1_FIRMWARE_FLAGS + +/* + * DO NOT use these for err/warn/info/debug messages. + * Use dev_err, dev_warn, dev_info and dev_dbg instead. + * They are more MGPU friendly. + */ +#undef pr_err +#undef pr_warn +#undef pr_info +#undef pr_debug + +#define SMU_13_0_12_FEA_MAP(smu_feature, smu_13_0_12_feature) \ + [smu_feature] = { 1, (smu_13_0_12_feature) } + +#define FEATURE_MASK(feature) (1ULL << feature) +#define SMC_DPM_FEATURE \ + (FEATURE_MASK(FEATURE_DATA_CALCULATION) | \ + FEATURE_MASK(FEATURE_DPM_GFXCLK) | FEATURE_MASK(FEATURE_DPM_FCLK)) + +#define NUM_JPEG_RINGS_FW 10 +#define NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics) \ + (ARRAY_SIZE(gpu_metrics->xcp_stats[0].jpeg_busy) / 4) + +const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[SMU_FEATURE_COUNT] = { + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, FEATURE_DATA_CALCULATION), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, FEATURE_DPM_GFXCLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, FEATURE_DPM_FCLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, FEATURE_DS_GFXCLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, FEATURE_DS_SOCCLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, FEATURE_DS_LCLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, FEATURE_DS_FCLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_PPT_BIT, FEATURE_PPT), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_TDC_BIT, FEATURE_TDC), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_APCC_DFLL_BIT, FEATURE_APCC_DFLL), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, FEATURE_SMU_CG), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_SOC_PCC_BIT, FEATURE_SOC_PCC), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DOWN), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_VCN_BIT, FEATURE_DS_VCN), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MP1CLK_BIT, FEATURE_DS_MP1CLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MPIOCLK_BIT, FEATURE_DS_MPIOCLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MP0CLK_BIT, FEATURE_DS_MP0CLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_PIT_BIT, FEATURE_PIT), +}; + +// clang-format off +const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] = { + MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), + MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), + MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), + MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0), + MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0), + MSG_MAP(RequestI2cTransaction, PPSMC_MSG_RequestI2cTransaction, 0), + MSG_MAP(GetMetricsTable, PPSMC_MSG_GetMetricsTable, 1), + MSG_MAP(GetMetricsVersion, PPSMC_MSG_GetMetricsVersion, 1), + MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1), + MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1), + MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), + MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), + MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), + MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), + MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0), + MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), + MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), + MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), + MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), + MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), + MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1), + MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI), + MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), + MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), + MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), + MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData, 0), + MSG_MAP(SetNumBadHbmPagesRetired, PPSMC_MSG_SetNumBadHbmPagesRetired, 0), + MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl, 0), + MSG_MAP(GetGmiPwrDnHyst, PPSMC_MSG_GetGmiPwrDnHyst, 0), + MSG_MAP(SetGmiPwrDnHyst, PPSMC_MSG_SetGmiPwrDnHyst, 0), + MSG_MAP(GmiPwrDnControl, PPSMC_MSG_GmiPwrDnControl, 0), + MSG_MAP(EnterGfxoff, PPSMC_MSG_EnterGfxoff, 0), + MSG_MAP(ExitGfxoff, PPSMC_MSG_ExitGfxoff, 0), + MSG_MAP(EnableDeterminism, PPSMC_MSG_EnableDeterminism, 0), + MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0), + MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0), + MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 1), + MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 1), + MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 1), + MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1), + MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0), + MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0), + MSG_MAP(GetThermalLimit, PPSMC_MSG_ReadThrottlerLimit, 0), + MSG_MAP(ClearMcaOnRead, PPSMC_MSG_ClearMcaOnRead, 0), + MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, SMU_MSG_RAS_PRI), + MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, SMU_MSG_RAS_PRI), + MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, SMU_MSG_RAS_PRI), + MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, SMU_MSG_RAS_PRI), + MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0), + MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0), + MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0), + MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0), + MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1), +}; + +static int smu_v13_0_12_get_enabled_mask(struct smu_context *smu, + uint64_t *feature_mask) +{ + int ret; + + ret = smu_cmn_get_enabled_mask(smu, feature_mask); + + if (ret == -EIO) { + *feature_mask = 0; + ret = 0; + } + + return ret; +} + +static int smu_v13_0_12_fru_get_product_info(struct smu_context *smu, + StaticMetricsTable_t *static_metrics) +{ + struct amdgpu_fru_info *fru_info; + struct amdgpu_device *adev = smu->adev; + + if (!adev->fru_info) { + adev->fru_info = kzalloc(sizeof(*adev->fru_info), GFP_KERNEL); + if (!adev->fru_info) + return -ENOMEM; + } + + fru_info = adev->fru_info; + strscpy(fru_info->product_number, static_metrics->ProductInfo.ModelNumber, + sizeof(fru_info->product_number)); + strscpy(fru_info->product_name, static_metrics->ProductInfo.Name, + sizeof(fru_info->product_name)); + strscpy(fru_info->serial, static_metrics->ProductInfo.Serial, + sizeof(fru_info->serial)); + strscpy(fru_info->manufacturer_name, static_metrics->ProductInfo.ManufacturerName, + sizeof(fru_info->manufacturer_name)); + strscpy(fru_info->fru_id, static_metrics->ProductInfo.FruId, + sizeof(fru_info->fru_id)); + + return 0; +} + +int smu_v13_0_12_get_max_metrics_size(void) +{ + return max(sizeof(StaticMetricsTable_t), sizeof(MetricsTable_t)); +} + +static int smu_v13_0_12_get_static_metrics_table(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size; + struct smu_table *table = &smu_table->driver_table; + int ret; + + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetStaticMetricsTable, NULL); + if (ret) { + dev_info(smu->adev->dev, + "Failed to export static metrics table!\n"); + return ret; + } + + amdgpu_asic_invalidate_hdp(smu->adev, NULL); + memcpy(smu_table->metrics_table, table->cpu_addr, table_size); + + return 0; +} + +int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table; + struct PPTable_t *pptable = + (struct PPTable_t *)smu_table->driver_pptable; + uint32_t table_version; + int ret, i; + + if (!pptable->Init) { + ret = smu_v13_0_12_get_static_metrics_table(smu); + if (ret) + return ret; + + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsVersion, + &table_version); + if (ret) + return ret; + smu_table->tables[SMU_TABLE_SMU_METRICS].version = + table_version; + + pptable->MaxSocketPowerLimit = + SMUQ10_ROUND(static_metrics->MaxSocketPowerLimit); + pptable->MaxGfxclkFrequency = + SMUQ10_ROUND(static_metrics->MaxGfxclkFrequency); + pptable->MinGfxclkFrequency = + SMUQ10_ROUND(static_metrics->MinGfxclkFrequency); + + for (i = 0; i < 4; ++i) { + pptable->FclkFrequencyTable[i] = + SMUQ10_ROUND(static_metrics->FclkFrequencyTable[i]); + pptable->UclkFrequencyTable[i] = + SMUQ10_ROUND(static_metrics->UclkFrequencyTable[i]); + pptable->SocclkFrequencyTable[i] = + SMUQ10_ROUND(static_metrics->SocclkFrequencyTable[i]); + pptable->VclkFrequencyTable[i] = + SMUQ10_ROUND(static_metrics->VclkFrequencyTable[i]); + pptable->DclkFrequencyTable[i] = + SMUQ10_ROUND(static_metrics->DclkFrequencyTable[i]); + pptable->LclkFrequencyTable[i] = + SMUQ10_ROUND(static_metrics->LclkFrequencyTable[i]); + } + + /* use AID0 serial number by default */ + pptable->PublicSerialNumber_AID = + static_metrics->PublicSerialNumber_AID[0]; + ret = smu_v13_0_12_fru_get_product_info(smu, static_metrics); + if (ret) + return ret; + + pptable->Init = true; + } + + return 0; +} + +bool smu_v13_0_12_is_dpm_running(struct smu_context *smu) +{ + int ret; + uint64_t feature_enabled; + + ret = smu_v13_0_12_get_enabled_mask(smu, &feature_enabled); + + if (ret) + return false; + + return !!(feature_enabled & SMC_DPM_FEATURE); +} + +int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, + uint32_t *value) +{ + struct smu_table_context *smu_table = &smu->smu_table; + MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table; + struct amdgpu_device *adev = smu->adev; + int ret = 0; + int xcc_id; + + /* For clocks with multiple instances, only report the first one */ + switch (member) { + case METRICS_CURR_GFXCLK: + case METRICS_AVERAGE_GFXCLK: + xcc_id = GET_INST(GC, 0); + *value = SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]); + break; + case METRICS_CURR_SOCCLK: + case METRICS_AVERAGE_SOCCLK: + *value = SMUQ10_ROUND(metrics->SocclkFrequency[0]); + break; + case METRICS_CURR_UCLK: + case METRICS_AVERAGE_UCLK: + *value = SMUQ10_ROUND(metrics->UclkFrequency); + break; + case METRICS_CURR_VCLK: + *value = SMUQ10_ROUND(metrics->VclkFrequency[0]); + break; + case METRICS_CURR_DCLK: + *value = SMUQ10_ROUND(metrics->DclkFrequency[0]); + break; + case METRICS_CURR_FCLK: + *value = SMUQ10_ROUND(metrics->FclkFrequency); + break; + case METRICS_AVERAGE_GFXACTIVITY: + *value = SMUQ10_ROUND(metrics->SocketGfxBusy); + break; + case METRICS_AVERAGE_MEMACTIVITY: + *value = SMUQ10_ROUND(metrics->DramBandwidthUtilization); + break; + case METRICS_CURR_SOCKETPOWER: + *value = SMUQ10_ROUND(metrics->SocketPower) << 8; + break; + case METRICS_TEMPERATURE_HOTSPOT: + *value = SMUQ10_ROUND(metrics->MaxSocketTemperature) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_MEM: + *value = SMUQ10_ROUND(metrics->MaxHbmTemperature) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + /* This is the max of all VRs and not just SOC VR. + * No need to define another data type for the same. + */ + case METRICS_TEMPERATURE_VRSOC: + *value = SMUQ10_ROUND(metrics->MaxVrTemperature) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + default: + *value = UINT_MAX; + break; + } + + return ret; +} + +ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct gpu_metrics_v1_7 *gpu_metrics = + (struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table; + int ret = 0, xcc_id, inst, i, j, k, idx; + struct amdgpu_device *adev = smu->adev; + u8 num_jpeg_rings_gpu_metrics; + MetricsTable_t *metrics; + struct amdgpu_xcp *xcp; + u32 inst_mask; + + metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL); + memcpy(metrics, smu_table->metrics_table, sizeof(MetricsTable_t)); + + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 7); + + gpu_metrics->temperature_hotspot = + SMUQ10_ROUND(metrics->MaxSocketTemperature); + /* Individual HBM stack temperature is not reported */ + gpu_metrics->temperature_mem = + SMUQ10_ROUND(metrics->MaxHbmTemperature); + /* Reports max temperature of all voltage rails */ + gpu_metrics->temperature_vrsoc = + SMUQ10_ROUND(metrics->MaxVrTemperature); + + gpu_metrics->average_gfx_activity = + SMUQ10_ROUND(metrics->SocketGfxBusy); + gpu_metrics->average_umc_activity = + SMUQ10_ROUND(metrics->DramBandwidthUtilization); + + gpu_metrics->mem_max_bandwidth = + SMUQ10_ROUND(metrics->MaxDramBandwidth); + + gpu_metrics->curr_socket_power = + SMUQ10_ROUND(metrics->SocketPower); + /* Energy counter reported in 15.259uJ (2^-16) units */ + gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc; + + for (i = 0; i < MAX_GFX_CLKS; i++) { + xcc_id = GET_INST(GC, i); + if (xcc_id >= 0) + gpu_metrics->current_gfxclk[i] = + SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]); + + if (i < MAX_CLKS) { + gpu_metrics->current_socclk[i] = + SMUQ10_ROUND(metrics->SocclkFrequency[i]); + inst = GET_INST(VCN, i); + if (inst >= 0) { + gpu_metrics->current_vclk0[i] = + SMUQ10_ROUND(metrics->VclkFrequency[inst]); + gpu_metrics->current_dclk0[i] = + SMUQ10_ROUND(metrics->DclkFrequency[inst]); + } + } + } + + gpu_metrics->current_uclk = SMUQ10_ROUND(metrics->UclkFrequency); + + /* Total accumulated cycle counter */ + gpu_metrics->accumulation_counter = metrics->AccumulationCounter; + + /* Accumulated throttler residencies */ + gpu_metrics->prochot_residency_acc = metrics->ProchotResidencyAcc; + gpu_metrics->ppt_residency_acc = metrics->PptResidencyAcc; + gpu_metrics->socket_thm_residency_acc = metrics->SocketThmResidencyAcc; + gpu_metrics->vr_thm_residency_acc = metrics->VrThmResidencyAcc; + gpu_metrics->hbm_thm_residency_acc = metrics->HbmThmResidencyAcc; + + /* Clock Lock Status. Each bit corresponds to each GFXCLK instance */ + gpu_metrics->gfxclk_lock_status = metrics->GfxLockXCDMak >> GET_INST(GC, 0); + + gpu_metrics->pcie_link_width = metrics->PCIeLinkWidth; + gpu_metrics->pcie_link_speed = + pcie_gen_to_speed(metrics->PCIeLinkSpeed); + gpu_metrics->pcie_bandwidth_acc = + SMUQ10_ROUND(metrics->PcieBandwidthAcc[0]); + gpu_metrics->pcie_bandwidth_inst = + SMUQ10_ROUND(metrics->PcieBandwidth[0]); + gpu_metrics->pcie_l0_to_recov_count_acc = metrics->PCIeL0ToRecoveryCountAcc; + gpu_metrics->pcie_replay_count_acc = metrics->PCIenReplayAAcc; + gpu_metrics->pcie_replay_rover_count_acc = + metrics->PCIenReplayARolloverCountAcc; + gpu_metrics->pcie_nak_sent_count_acc = metrics->PCIeNAKSentCountAcc; + gpu_metrics->pcie_nak_rcvd_count_acc = metrics->PCIeNAKReceivedCountAcc; + gpu_metrics->pcie_lc_perf_other_end_recovery = metrics->PCIeOtherEndRecoveryAcc; + + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); + + gpu_metrics->gfx_activity_acc = SMUQ10_ROUND(metrics->SocketGfxBusyAcc); + gpu_metrics->mem_activity_acc = SMUQ10_ROUND(metrics->DramBandwidthUtilizationAcc); + + for (i = 0; i < NUM_XGMI_LINKS; i++) { + gpu_metrics->xgmi_read_data_acc[i] = + SMUQ10_ROUND(metrics->XgmiReadDataSizeAcc[i]); + gpu_metrics->xgmi_write_data_acc[i] = + SMUQ10_ROUND(metrics->XgmiWriteDataSizeAcc[i]); + ret = amdgpu_get_xgmi_link_status(adev, i); + if (ret >= 0) + gpu_metrics->xgmi_link_status[i] = ret; + } + + gpu_metrics->num_partition = adev->xcp_mgr->num_xcps; + + num_jpeg_rings_gpu_metrics = NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics); + for_each_xcp(adev->xcp_mgr, xcp, i) { + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + /* Both JPEG and VCN has same instances */ + inst = GET_INST(VCN, k); + + for (j = 0; j < num_jpeg_rings_gpu_metrics; ++j) { + gpu_metrics->xcp_stats[i].jpeg_busy + [(idx * num_jpeg_rings_gpu_metrics) + j] = + SMUQ10_ROUND(metrics->JpegBusy + [(inst * NUM_JPEG_RINGS_FW) + j]); + } + gpu_metrics->xcp_stats[i].vcn_busy[idx] = + SMUQ10_ROUND(metrics->VcnBusy[inst]); + idx++; + } + + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + inst = GET_INST(GC, k); + gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] = + SMUQ10_ROUND(metrics->GfxBusy[inst]); + gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] = + SMUQ10_ROUND(metrics->GfxBusyAcc[inst]); + idx++; + } + } + + gpu_metrics->xgmi_link_width = metrics->XgmiWidth; + gpu_metrics->xgmi_link_speed = metrics->XgmiBitrate; + + gpu_metrics->firmware_timestamp = metrics->Timestamp; + + *table = (void *)gpu_metrics; + kfree(metrics); + + return sizeof(*gpu_metrics); +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index da7bd9227afe..682646068000 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -105,7 +105,6 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin"); enum smu_v13_0_6_caps { SMU_CAP(DPM), - SMU_CAP(UNI_METRICS), SMU_CAP(DPM_POLICY), SMU_CAP(OTHER_END_METRICS), SMU_CAP(SET_UCLK_MAX), @@ -117,6 +116,7 @@ enum smu_v13_0_6_caps { SMU_CAP(RMA_MSG), SMU_CAP(ACA_SYND), SMU_CAP(SDMA_RESET), + SMU_CAP(STATIC_METRICS), SMU_CAP(ALL), }; @@ -193,7 +193,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, SMU_MSG_RAS_PRI), MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0), MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0), - MSG_MAP(SelectPstatePolicy, PPSMC_MSG_SelectPstatePolicy, 0), + MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0), MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0), }; @@ -231,7 +231,11 @@ static const struct cmn2asic_mapping smu_v13_0_6_feature_mask_map[SMU_FEATURE_CO SMU_13_0_6_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF), SMU_13_0_6_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL), SMU_13_0_6_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DOWN), - SMU_13_0_6_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, FEATURE_DF_CSTATE), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, FEATURE_DF_CSTATE), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_VCN_BIT, FEATURE_DS_VCN), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_MP1CLK_BIT, FEATURE_DS_MP1CLK), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_MPIOCLK_BIT, FEATURE_DS_MPIOCLK), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_MP0CLK_BIT, FEATURE_DS_MP0CLK), }; #define TABLE_PMSTATUSLOG 0 @@ -253,27 +257,13 @@ static const uint8_t smu_v13_0_6_throttler_map[] = { [THROTTLER_PROCHOT_BIT] = (SMU_THROTTLER_PROCHOT_GFX_BIT), }; -struct PPTable_t { - uint32_t MaxSocketPowerLimit; - uint32_t MaxGfxclkFrequency; - uint32_t MinGfxclkFrequency; - uint32_t FclkFrequencyTable[4]; - uint32_t UclkFrequencyTable[4]; - uint32_t SocclkFrequencyTable[4]; - uint32_t VclkFrequencyTable[4]; - uint32_t DclkFrequencyTable[4]; - uint32_t LclkFrequencyTable[4]; - uint32_t MaxLclkDpmRange; - uint32_t MinLclkDpmRange; - uint64_t PublicSerialNumber_AID; - bool Init; -}; - -#define SMUQ10_TO_UINT(x) ((x) >> 10) -#define SMUQ10_FRAC(x) ((x) & 0x3ff) -#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200)) -#define GET_METRIC_FIELD(field, flag) ((flag) ?\ - (metrics_a->field) : (metrics_x->field)) +#define GET_GPU_METRIC_FIELD(field, version) ((version == METRICS_VERSION_V0) ?\ + (metrics_v0->field) : (metrics_v2->field)) +#define GET_METRIC_FIELD(field, version) ((version == METRICS_VERSION_V1) ?\ + (metrics_v1->field) : GET_GPU_METRIC_FIELD(field, version)) +#define METRICS_TABLE_SIZE (max3(sizeof(MetricsTableV0_t),\ + sizeof(MetricsTableV1_t),\ + sizeof(MetricsTableV2_t))) struct smu_v13_0_6_dpm_map { enum smu_clk_type clk_type; @@ -282,6 +272,18 @@ struct smu_v13_0_6_dpm_map { uint32_t *freq_table; }; +static inline int smu_v13_0_6_get_metrics_version(struct smu_context *smu) +{ + if ((smu->adev->flags & AMD_IS_APU) && + smu->smc_fw_version <= 0x4556900) + return METRICS_VERSION_V1; + else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == + IP_VERSION(13, 0, 12)) + return METRICS_VERSION_V2; + + return METRICS_VERSION_V0; +} + static inline void smu_v13_0_6_cap_set(struct smu_context *smu, enum smu_v13_0_6_caps cap) { @@ -309,7 +311,6 @@ static inline bool smu_v13_0_6_cap_supported(struct smu_context *smu, static void smu_v13_0_14_init_caps(struct smu_context *smu) { enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM), - SMU_CAP(UNI_METRICS), SMU_CAP(SET_UCLK_MAX), SMU_CAP(DPM_POLICY), SMU_CAP(PCIE_METRICS), @@ -335,12 +336,14 @@ static void smu_v13_0_14_init_caps(struct smu_context *smu) static void smu_v13_0_12_init_caps(struct smu_context *smu) { enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM), - SMU_CAP(UNI_METRICS), SMU_CAP(PCIE_METRICS), SMU_CAP(CTF_LIMIT), SMU_CAP(MCA_DEBUG_MODE), SMU_CAP(RMA_MSG), - SMU_CAP(ACA_SYND) }; + SMU_CAP(ACA_SYND), + SMU_CAP(OTHER_END_METRICS), + SMU_CAP(HST_LIMIT_METRICS), + SMU_CAP(PER_INST_METRICS) }; uint32_t fw_ver = smu->smc_fw_version; for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++) @@ -351,12 +354,14 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu) if (fw_ver >= 0x00561700) smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET)); + + if (fw_ver >= 0x00561E00) + smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS)); } static void smu_v13_0_6_init_caps(struct smu_context *smu) { enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM), - SMU_CAP(UNI_METRICS), SMU_CAP(SET_UCLK_MAX), SMU_CAP(DPM_POLICY), SMU_CAP(PCIE_METRICS), @@ -382,8 +387,6 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu) smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG)); smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND)); - if (fw_ver <= 0x4556900) - smu_v13_0_6_cap_clear(smu, SMU_CAP(UNI_METRICS)); if (fw_ver >= 0x04556F00) smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); if (fw_ver >= 0x04556A00) @@ -450,8 +453,9 @@ static int smu_v13_0_6_init_microcode(struct smu_context *smu) int var = (adev->pdev->device & 0xF); char ucode_prefix[15]; - /* No need to load P2S tables in IOV mode */ - if (amdgpu_sriov_vf(adev)) + /* No need to load P2S tables in IOV mode or for smu v13.0.12 */ + if (amdgpu_sriov_vf(adev) || + (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12))) return 0; if (!(adev->flags & AMD_IS_APU)) { @@ -508,13 +512,15 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; struct amdgpu_device *adev = smu->adev; + int gpu_metrcs_size = METRICS_TABLE_SIZE; if (!(adev->flags & AMD_IS_APU)) SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, - max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), + max(gpu_metrcs_size, + smu_v13_0_12_get_max_metrics_size()), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT); @@ -522,8 +528,7 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT); - smu_table->metrics_table = kzalloc(max(sizeof(MetricsTableX_t), - sizeof(MetricsTableA_t)), GFP_KERNEL); + smu_table->metrics_table = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); if (!smu_table->metrics_table) return -ENOMEM; smu_table->metrics_time = 0; @@ -570,7 +575,7 @@ static int smu_v13_0_6_select_policy_soc_pstate(struct smu_context *smu, return -EINVAL; } - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SelectPstatePolicy, + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetThrottlingPolicy, param, NULL); if (ret) @@ -740,10 +745,8 @@ static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu, memset(&pm_metrics->common_header, 0, sizeof(pm_metrics->common_header)); - if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6)) - pm_metrics->common_header.mp1_ip_discovery_version = IP_VERSION(13, 0, 6); - if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 14)) - pm_metrics->common_header.mp1_ip_discovery_version = IP_VERSION(13, 0, 14); + pm_metrics->common_header.mp1_ip_discovery_version = + amdgpu_ip_version(smu->adev, MP1_HWIP, 0); pm_metrics->common_header.pmfw_version = pmfw_version; pm_metrics->common_header.pmmetrics_version = table_version; pm_metrics->common_header.structure_size = @@ -755,14 +758,18 @@ static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu, static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; - MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table; - MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table; + MetricsTableV0_t *metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table; + MetricsTableV1_t *metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table; + MetricsTableV2_t *metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table; struct PPTable_t *pptable = (struct PPTable_t *)smu_table->driver_pptable; - bool flag = !smu_v13_0_6_cap_supported(smu, SMU_CAP(UNI_METRICS)); + int version = smu_v13_0_6_get_metrics_version(smu); int ret, i, retry = 100; uint32_t table_version; + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) + return smu_v13_0_12_setup_driver_pptable(smu); + /* Store one-time values in driver PPTable */ if (!pptable->Init) { while (--retry) { @@ -771,7 +778,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) return ret; /* Ensure that metrics have been updated */ - if (GET_METRIC_FIELD(AccumulationCounter, flag)) + if (GET_METRIC_FIELD(AccumulationCounter, version)) break; usleep_range(1000, 1100); @@ -788,29 +795,30 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) table_version; pptable->MaxSocketPowerLimit = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit, flag)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit, version)); pptable->MaxGfxclkFrequency = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, flag)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, version)); pptable->MinGfxclkFrequency = - SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, flag)); + SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, version)); for (i = 0; i < 4; ++i) { pptable->FclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable, flag)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable, version)[i]); pptable->UclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable, flag)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable, version)[i]); pptable->SocclkFrequencyTable[i] = SMUQ10_ROUND( - GET_METRIC_FIELD(SocclkFrequencyTable, flag)[i]); + GET_METRIC_FIELD(SocclkFrequencyTable, version)[i]); pptable->VclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable, flag)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable, version)[i]); pptable->DclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable, flag)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable, version)[i]); pptable->LclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable, flag)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable, version)[i]); } /* use AID0 serial number by default */ - pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID, flag)[0]; + pptable->PublicSerialNumber_AID = + GET_METRIC_FIELD(PublicSerialNumber_AID, version)[0]; pptable->Init = true; } @@ -1130,9 +1138,10 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, uint32_t *value) { struct smu_table_context *smu_table = &smu->smu_table; - MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table; - MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table; - bool flag = !smu_v13_0_6_cap_supported(smu, SMU_CAP(UNI_METRICS)); + MetricsTableV0_t *metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table; + MetricsTableV1_t *metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table; + MetricsTableV2_t *metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table; + int version = smu_v13_0_6_get_metrics_version(smu); struct amdgpu_device *adev = smu->adev; int ret = 0; int xcc_id; @@ -1141,56 +1150,59 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, if (ret) return ret; + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) + return smu_v13_0_12_get_smu_metrics_data(smu, member, value); + /* For clocks with multiple instances, only report the first one */ switch (member) { case METRICS_CURR_GFXCLK: case METRICS_AVERAGE_GFXCLK: if (smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM))) { xcc_id = GET_INST(GC, 0); - *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, flag)[xcc_id]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, version)[xcc_id]); } else { *value = 0; } break; case METRICS_CURR_SOCCLK: case METRICS_AVERAGE_SOCCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, flag)[0]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, version)[0]); break; case METRICS_CURR_UCLK: case METRICS_AVERAGE_UCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, flag)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version)); break; case METRICS_CURR_VCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, flag)[0]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, version)[0]); break; case METRICS_CURR_DCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, flag)[0]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, version)[0]); break; case METRICS_CURR_FCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency, flag)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency, version)); break; case METRICS_AVERAGE_GFXACTIVITY: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, flag)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, version)); break; case METRICS_AVERAGE_MEMACTIVITY: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, flag)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, version)); break; case METRICS_CURR_SOCKETPOWER: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, flag)) << 8; + *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, version)) << 8; break; case METRICS_TEMPERATURE_HOTSPOT: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, flag)) * + *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version)) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; case METRICS_TEMPERATURE_MEM: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, flag)) * + *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, version)) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; /* This is the max of all VRs and not just SOC VR. * No need to define another data type for the same. */ case METRICS_TEMPERATURE_VRSOC: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, flag)) * + *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, version)) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; default: @@ -1354,6 +1366,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, break; case SMU_OD_MCLK: + if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(SET_UCLK_MAX))) + return 0; + size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK"); size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n", pstate_table->uclk_pstate.curr.min, @@ -1929,7 +1944,7 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu, break; } - return -EINVAL; + return -EOPNOTSUPP; } static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu, @@ -2195,6 +2210,9 @@ static bool smu_v13_0_6_is_dpm_running(struct smu_context *smu) int ret; uint64_t feature_enabled; + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) + return smu_v13_0_12_is_dpm_running(smu); + ret = smu_v13_0_6_get_enabled_mask(smu, &feature_enabled); if (ret) @@ -2478,82 +2496,92 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table struct smu_table_context *smu_table = &smu->smu_table; struct gpu_metrics_v1_7 *gpu_metrics = (struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table; - bool flag = !smu_v13_0_6_cap_supported(smu, SMU_CAP(UNI_METRICS)); + int version = smu_v13_0_6_get_metrics_version(smu); int ret = 0, xcc_id, inst, i, j, k, idx; struct amdgpu_device *adev = smu->adev; - MetricsTableX_t *metrics_x; - MetricsTableA_t *metrics_a; + MetricsTableV0_t *metrics_v0; + MetricsTableV1_t *metrics_v1; + MetricsTableV2_t *metrics_v2; struct amdgpu_xcp *xcp; u16 link_width_level; + u8 num_jpeg_rings; u32 inst_mask; bool per_inst; - metrics_x = kzalloc(max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), GFP_KERNEL); - ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true); + metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); + ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, true); if (ret) { - kfree(metrics_x); + kfree(metrics_v0); return ret; } - metrics_a = (MetricsTableA_t *)metrics_x; + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) + return smu_v13_0_12_get_gpu_metrics(smu, table); + + metrics_v1 = (MetricsTableV1_t *)metrics_v0; + metrics_v2 = (MetricsTableV2_t *)metrics_v0; smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 7); gpu_metrics->temperature_hotspot = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, flag)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version)); /* Individual HBM stack temperature is not reported */ gpu_metrics->temperature_mem = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, flag)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, version)); /* Reports max temperature of all voltage rails */ gpu_metrics->temperature_vrsoc = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, flag)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, version)); gpu_metrics->average_gfx_activity = - SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, flag)); + SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, version)); gpu_metrics->average_umc_activity = - SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, flag)); + SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, version)); gpu_metrics->mem_max_bandwidth = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxDramBandwidth, flag)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxDramBandwidth, version)); gpu_metrics->curr_socket_power = - SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, flag)); + SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, version)); /* Energy counter reported in 15.259uJ (2^-16) units */ - gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc, flag); + gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc, version); for (i = 0; i < MAX_GFX_CLKS; i++) { xcc_id = GET_INST(GC, i); if (xcc_id >= 0) gpu_metrics->current_gfxclk[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, flag)[xcc_id]); + SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, version)[xcc_id]); if (i < MAX_CLKS) { gpu_metrics->current_socclk[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, flag)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, version)[i]); inst = GET_INST(VCN, i); if (inst >= 0) { gpu_metrics->current_vclk0[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, flag)[inst]); + SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, + version)[inst]); gpu_metrics->current_dclk0[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, flag)[inst]); + SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, + version)[inst]); } } } - gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, flag)); + gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version)); /* Total accumulated cycle counter */ - gpu_metrics->accumulation_counter = GET_METRIC_FIELD(AccumulationCounter, flag); + gpu_metrics->accumulation_counter = GET_METRIC_FIELD(AccumulationCounter, version); /* Accumulated throttler residencies */ - gpu_metrics->prochot_residency_acc = GET_METRIC_FIELD(ProchotResidencyAcc, flag); - gpu_metrics->ppt_residency_acc = GET_METRIC_FIELD(PptResidencyAcc, flag); - gpu_metrics->socket_thm_residency_acc = GET_METRIC_FIELD(SocketThmResidencyAcc, flag); - gpu_metrics->vr_thm_residency_acc = GET_METRIC_FIELD(VrThmResidencyAcc, flag); - gpu_metrics->hbm_thm_residency_acc = GET_METRIC_FIELD(HbmThmResidencyAcc, flag); + gpu_metrics->prochot_residency_acc = GET_METRIC_FIELD(ProchotResidencyAcc, version); + gpu_metrics->ppt_residency_acc = GET_METRIC_FIELD(PptResidencyAcc, version); + gpu_metrics->socket_thm_residency_acc = GET_METRIC_FIELD(SocketThmResidencyAcc, version); + gpu_metrics->vr_thm_residency_acc = GET_METRIC_FIELD(VrThmResidencyAcc, version); + gpu_metrics->hbm_thm_residency_acc = + GET_METRIC_FIELD(HbmThmResidencyAcc, version); /* Clock Lock Status. Each bit corresponds to each GFXCLK instance */ - gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak, flag) >> GET_INST(GC, 0); + gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak, + version) >> GET_INST(GC, 0); if (!(adev->flags & AMD_IS_APU)) { /*Check smu version, PCIE link speed and width will be reported from pmfw metric @@ -2561,9 +2589,9 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table * for pf from registers */ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PCIE_METRICS))) { - gpu_metrics->pcie_link_width = metrics_x->PCIeLinkWidth; + gpu_metrics->pcie_link_width = GET_GPU_METRIC_FIELD(PCIeLinkWidth, version); gpu_metrics->pcie_link_speed = - pcie_gen_to_speed(metrics_x->PCIeLinkSpeed); + pcie_gen_to_speed(GET_GPU_METRIC_FIELD(PCIeLinkSpeed, version)); } else if (!amdgpu_sriov_vf(adev)) { link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu); if (link_width_level > MAX_LINK_WIDTH) @@ -2576,37 +2604,37 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table } gpu_metrics->pcie_bandwidth_acc = - SMUQ10_ROUND(metrics_x->PcieBandwidthAcc[0]); + SMUQ10_ROUND(GET_GPU_METRIC_FIELD(PcieBandwidthAcc, version)[0]); gpu_metrics->pcie_bandwidth_inst = - SMUQ10_ROUND(metrics_x->PcieBandwidth[0]); + SMUQ10_ROUND(GET_GPU_METRIC_FIELD(PcieBandwidth, version)[0]); gpu_metrics->pcie_l0_to_recov_count_acc = - metrics_x->PCIeL0ToRecoveryCountAcc; + GET_GPU_METRIC_FIELD(PCIeL0ToRecoveryCountAcc, version); gpu_metrics->pcie_replay_count_acc = - metrics_x->PCIenReplayAAcc; + GET_GPU_METRIC_FIELD(PCIenReplayAAcc, version); gpu_metrics->pcie_replay_rover_count_acc = - metrics_x->PCIenReplayARolloverCountAcc; + GET_GPU_METRIC_FIELD(PCIenReplayARolloverCountAcc, version); gpu_metrics->pcie_nak_sent_count_acc = - metrics_x->PCIeNAKSentCountAcc; + GET_GPU_METRIC_FIELD(PCIeNAKSentCountAcc, version); gpu_metrics->pcie_nak_rcvd_count_acc = - metrics_x->PCIeNAKReceivedCountAcc; + GET_GPU_METRIC_FIELD(PCIeNAKReceivedCountAcc, version); if (smu_v13_0_6_cap_supported(smu, SMU_CAP(OTHER_END_METRICS))) gpu_metrics->pcie_lc_perf_other_end_recovery = - metrics_x->PCIeOtherEndRecoveryAcc; + GET_GPU_METRIC_FIELD(PCIeOtherEndRecoveryAcc, version); } gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); gpu_metrics->gfx_activity_acc = - SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc, flag)); + SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc, version)); gpu_metrics->mem_activity_acc = - SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc, flag)); + SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc, version)); for (i = 0; i < NUM_XGMI_LINKS; i++) { gpu_metrics->xgmi_read_data_acc[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc, flag)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc, version)[i]); gpu_metrics->xgmi_write_data_acc[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc, flag)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc, version)[i]); ret = amdgpu_get_xgmi_link_status(adev, i); if (ret >= 0) gpu_metrics->xgmi_link_status[i] = ret; @@ -2616,6 +2644,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS)); + num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3; for_each_xcp(adev->xcp_mgr, xcp, i) { amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); idx = 0; @@ -2623,14 +2652,14 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table /* Both JPEG and VCN has same instances */ inst = GET_INST(VCN, k); - for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { + for (j = 0; j < num_jpeg_rings; ++j) { gpu_metrics->xcp_stats[i].jpeg_busy - [(idx * adev->jpeg.num_jpeg_rings) + j] = - SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy, flag) - [(inst * adev->jpeg.num_jpeg_rings) + j]); + [(idx * num_jpeg_rings) + j] = + SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy, version) + [(inst * num_jpeg_rings) + j]); } gpu_metrics->xcp_stats[i].vcn_busy[idx] = - SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, flag)[inst]); + SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]); idx++; } @@ -2641,27 +2670,29 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table for_each_inst(k, inst_mask) { inst = GET_INST(GC, k); gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] = - SMUQ10_ROUND(metrics_x->GfxBusy[inst]); + SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]); gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] = - SMUQ10_ROUND(metrics_x->GfxBusyAcc[inst]); + SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusyAcc, + version)[inst]); if (smu_v13_0_6_cap_supported( smu, SMU_CAP(HST_LIMIT_METRICS))) gpu_metrics->xcp_stats[i].gfx_below_host_limit_acc[idx] = - SMUQ10_ROUND(metrics_x->GfxclkBelowHostLimitAcc + SMUQ10_ROUND(GET_GPU_METRIC_FIELD + (GfxclkBelowHostLimitAcc, version) [inst]); idx++; } } } - gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth, flag)); - gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate, flag)); + gpu_metrics->xgmi_link_width = GET_METRIC_FIELD(XgmiWidth, version); + gpu_metrics->xgmi_link_speed = GET_METRIC_FIELD(XgmiBitrate, version); - gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, flag); + gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, version); *table = (void *)gpu_metrics; - kfree(metrics_x); + kfree(metrics_v0); return sizeof(*gpu_metrics); } @@ -2871,11 +2902,31 @@ static int smu_v13_0_6_send_rma_reason(struct smu_context *smu) return ret; } +/** + * smu_v13_0_6_reset_sdma_is_supported - Check if SDMA reset is supported + * @smu: smu_context pointer + * + * This function checks if the SMU supports resetting the SDMA engine. + * It returns false if the capability is not supported. + */ +static bool smu_v13_0_6_reset_sdma_is_supported(struct smu_context *smu) +{ + bool ret = true; + + if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(SDMA_RESET))) { + dev_info(smu->adev->dev, + "SDMA reset capability is not supported\n"); + ret = false; + } + + return ret; +} + static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask) { int ret = 0; - if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(SDMA_RESET))) + if (!smu_v13_0_6_reset_sdma_is_supported(smu)) return -EOPNOTSUPP; ret = smu_cmn_send_smc_msg_with_param(smu, @@ -3559,14 +3610,17 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num, .send_rma_reason = smu_v13_0_6_send_rma_reason, .reset_sdma = smu_v13_0_6_reset_sdma, + .reset_sdma_is_supported = smu_v13_0_6_reset_sdma_is_supported, }; void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) { smu->ppt_funcs = &smu_v13_0_6_ppt_funcs; - smu->message_map = smu_v13_0_6_message_map; + smu->message_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) ? + smu_v13_0_12_message_map : smu_v13_0_6_message_map; smu->clock_map = smu_v13_0_6_clk_map; - smu->feature_map = smu_v13_0_6_feature_mask_map; + smu->feature_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) ? + smu_v13_0_12_feature_mask_map : smu_v13_0_6_feature_mask_map; smu->table_map = smu_v13_0_6_table_map; smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION; smu->smc_fw_caps |= SMU_FW_CAP_RAS_PRI; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h index f0fa42a645c0..83745909e564 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h @@ -27,6 +27,30 @@ #define SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL 0x4 #define SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL 0x2 +typedef enum { +/*0*/ METRICS_VERSION_V0 = 0, +/*1*/ METRICS_VERSION_V1 = 1, +/*2*/ METRICS_VERSION_V2 = 2, + +/*3*/ NUM_METRICS = 3 +} METRICS_LIST_e; + +struct PPTable_t { + uint32_t MaxSocketPowerLimit; + uint32_t MaxGfxclkFrequency; + uint32_t MinGfxclkFrequency; + uint32_t FclkFrequencyTable[4]; + uint32_t UclkFrequencyTable[4]; + uint32_t SocclkFrequencyTable[4]; + uint32_t VclkFrequencyTable[4]; + uint32_t DclkFrequencyTable[4]; + uint32_t LclkFrequencyTable[4]; + uint32_t MaxLclkDpmRange; + uint32_t MinLclkDpmRange; + uint64_t PublicSerialNumber_AID; + bool Init; +}; + extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu); #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 55ef18517b0f..c8f4f6fb4083 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -807,6 +807,10 @@ static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu, else *value = metrics->AverageMemclkFrequencyPreDs; break; + case METRICS_AVERAGE_VCNACTIVITY: + *value = max(metrics->Vcn0ActivityPercentage, + metrics->Vcn1ActivityPercentage); + break; case METRICS_AVERAGE_VCLK: *value = metrics->AverageVclk0Frequency; break; @@ -951,6 +955,12 @@ static int smu_v13_0_7_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = smu_v13_0_7_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = smu_v13_0_7_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, @@ -2811,5 +2821,4 @@ void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu) smu->workload_map = smu_v13_0_7_workload_map; smu->smc_driver_if_version = SMU13_0_7_DRIVER_IF_VERSION; smu_v13_0_set_smu_mailbox_registers(smu); - smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index ddb6444406d2..76c1adda83db 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -245,6 +245,7 @@ int smu_v14_0_check_fw_version(struct smu_context *smu) switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(14, 0, 0): case IP_VERSION(14, 0, 4): + case IP_VERSION(14, 0, 5): smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0; break; case IP_VERSION(14, 0, 1): @@ -769,6 +770,7 @@ int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable) case IP_VERSION(14, 0, 2): case IP_VERSION(14, 0, 3): case IP_VERSION(14, 0, 4): + case IP_VERSION(14, 0, 5): if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; if (enable) @@ -948,6 +950,14 @@ static int smu_v14_0_irq_process(struct amdgpu_device *adev, uint32_t client_id = entry->client_id; uint32_t src_id = entry->src_id; + /* + * ctxid is used to distinguish different + * events for SMCToHost interrupt. + */ + uint32_t ctxid = entry->src_data[0]; + uint32_t data; + uint32_t high; + if (client_id == SOC15_IH_CLIENTID_THM) { switch (src_id) { case THM_11_0__SRCID__THM_DIG_THERM_L2H: @@ -962,6 +972,50 @@ static int smu_v14_0_irq_process(struct amdgpu_device *adev, src_id); break; } + } else if (client_id == SOC15_IH_CLIENTID_MP1) { + if (src_id == SMU_IH_INTERRUPT_ID_TO_DRIVER) { + /* ACK SMUToHost interrupt */ + data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); + data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data); + + switch (ctxid) { + case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL: + high = smu->thermal_range.software_shutdown_temp + + smu->thermal_range.software_shutdown_temp_offset; + high = min_t(typeof(high), + SMU_THERMAL_MAXIMUM_ALERT_TEMP, + high); + dev_emerg(adev->dev, "Reduce soft CTF limit to %d (by an offset %d)\n", + high, + smu->thermal_range.software_shutdown_temp_offset); + + data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL); + data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL, + DIG_THERM_INTH, + (high & 0xff)); + data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); + WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data); + break; + case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY: + high = min_t(typeof(high), + SMU_THERMAL_MAXIMUM_ALERT_TEMP, + smu->thermal_range.software_shutdown_temp); + dev_emerg(adev->dev, "Recover soft CTF limit to %d\n", high); + + data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL); + data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL, + DIG_THERM_INTH, + (high & 0xff)); + data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); + WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data); + break; + default: + dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n", + ctxid, client_id); + break; + } + } } return 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index 5cad09c5f2ff..f7cfe1f35cae 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -756,6 +756,10 @@ static int smu_v14_0_2_get_smu_metrics_data(struct smu_context *smu, case METRICS_AVERAGE_MEMACTIVITY: *value = metrics->AverageUclkActivity; break; + case METRICS_AVERAGE_VCNACTIVITY: + *value = max(metrics->AverageVcn0ActivityPercentage, + metrics->Vcn1ActivityPercentage); + break; case METRICS_AVERAGE_SOCKETPOWER: *value = metrics->AverageSocketPower << 8; break; @@ -882,6 +886,12 @@ static int smu_v14_0_2_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = smu_v14_0_2_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, @@ -1193,16 +1203,9 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, PP_OD_FEATURE_GFXCLK_BIT)) break; - PPTable_t *pptable = smu->smu_table.driver_pptable; - const OverDriveLimits_t * const overdrive_upperlimits = - &pptable->SkuTable.OverDriveLimitsBasicMax; - const OverDriveLimits_t * const overdrive_lowerlimits = - &pptable->SkuTable.OverDriveLimitsBasicMin; - size += sysfs_emit_at(buf, size, "OD_SCLK_OFFSET:\n"); - size += sysfs_emit_at(buf, size, "0: %dMhz\n1: %uMhz\n", - overdrive_lowerlimits->GfxclkFoffset, - overdrive_upperlimits->GfxclkFoffset); + size += sysfs_emit_at(buf, size, "%dMhz\n", + od_table->OverDriveTable.GfxclkFoffset); break; case SMU_OD_MCLK: @@ -1337,12 +1340,8 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) { smu_v14_0_2_get_od_setting_limits(smu, - PP_OD_FEATURE_GFXCLK_FMIN, - &min_value, - NULL); - smu_v14_0_2_get_od_setting_limits(smu, PP_OD_FEATURE_GFXCLK_FMAX, - NULL, + &min_value, &max_value); size += sysfs_emit_at(buf, size, "SCLK_OFFSET: %7dMhz %10uMhz\n", min_value, max_value); @@ -1627,6 +1626,39 @@ out: adev->unique_id = ((uint64_t)upper32 << 32) | lower32; } +static int smu_v14_0_2_get_fan_speed_pwm(struct smu_context *smu, + uint32_t *speed) +{ + int ret; + + if (!speed) + return -EINVAL; + + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_CURR_FANPWM, + speed); + if (ret) { + dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!"); + return ret; + } + + /* Convert the PMFW output which is in percent to pwm(255) based */ + *speed = min(*speed * 255 / 100, (uint32_t)255); + + return 0; +} + +static int smu_v14_0_2_get_fan_speed_rpm(struct smu_context *smu, + uint32_t *speed) +{ + if (!speed) + return -EINVAL; + + return smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_CURR_FANSPEED, + speed); +} + static int smu_v14_0_2_get_power_limit(struct smu_context *smu, uint32_t *current_power_limit, uint32_t *default_power_limit, @@ -2417,36 +2449,24 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu, return -ENOTSUPP; } - for (i = 0; i < size; i += 2) { - if (i + 2 > size) { - dev_info(adev->dev, "invalid number of input parameters %d\n", size); - return -EINVAL; - } - - switch (input[i]) { - case 1: - smu_v14_0_2_get_od_setting_limits(smu, - PP_OD_FEATURE_GFXCLK_FMAX, - &minimum, - &maximum); - if (input[i + 1] < minimum || - input[i + 1] > maximum) { - dev_info(adev->dev, "GfxclkFmax (%ld) must be within [%u, %u]!\n", - input[i + 1], minimum, maximum); - return -EINVAL; - } - - od_table->OverDriveTable.GfxclkFoffset = input[i + 1]; - od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; - break; + if (size != 1) { + dev_info(adev->dev, "invalid number of input parameters %d\n", size); + return -EINVAL; + } - default: - dev_info(adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]); - dev_info(adev->dev, "Supported indices: [0:min,1:max]\n"); - return -EINVAL; - } + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_GFXCLK_FMAX, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "GfxclkFoffset must be within [%d, %u]!\n", + minimum, maximum); + return -EINVAL; } + od_table->OverDriveTable.GfxclkFoffset = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; break; case PP_OD_EDIT_MCLK_VDDC_TABLE: @@ -2804,6 +2824,8 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = { .set_performance_level = smu_v14_0_set_performance_level, .gfx_off_control = smu_v14_0_gfx_off_control, .get_unique_id = smu_v14_0_2_get_unique_id, + .get_fan_speed_pwm = smu_v14_0_2_get_fan_speed_pwm, + .get_fan_speed_rpm = smu_v14_0_2_get_fan_speed_rpm, .get_power_limit = smu_v14_0_2_get_power_limit, .set_power_limit = smu_v14_0_2_set_power_limit, .get_power_profile_mode = smu_v14_0_2_get_power_profile_mode, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 9f55207ea9bc..d834d134ad2b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -459,8 +459,7 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, } if (read_arg) { smu_cmn_read_arg(smu, read_arg); - dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x,\ - readval: 0x%08x\n", + dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x, readval: 0x%08x\n", smu_get_message_name(smu, msg), index, param, reg, *read_arg); } else { dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x\n", |