summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/pm/amdgpu_pm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/pm/amdgpu_pm.c')
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c670
1 files changed, 263 insertions, 407 deletions
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index e8ae7681bf0a..922def51685b 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -98,6 +98,85 @@ const char * const amdgpu_pp_profile_name[] = {
};
/**
+ * amdgpu_pm_dev_state_check - Check if device can be accessed.
+ * @adev: Target device.
+ * @runpm: Check runpm status for suspend state checks.
+ *
+ * Checks the state of the @adev for access. Return 0 if the device is
+ * accessible or a negative error code otherwise.
+ */
+static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev, bool runpm)
+{
+ bool runpm_check = runpm ? adev->in_runpm : false;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+ if (adev->in_suspend && !runpm_check)
+ return -EPERM;
+
+ return 0;
+}
+
+/**
+ * amdgpu_pm_get_access - Check if device can be accessed, resume if needed.
+ * @adev: Target device.
+ *
+ * Checks the state of the @adev for access. Use runtime pm API to resume if
+ * needed. Return 0 if the device is accessible or a negative error code
+ * otherwise.
+ */
+static int amdgpu_pm_get_access(struct amdgpu_device *adev)
+{
+ int ret;
+
+ ret = amdgpu_pm_dev_state_check(adev, true);
+ if (ret)
+ return ret;
+
+ return pm_runtime_resume_and_get(adev->dev);
+}
+
+/**
+ * amdgpu_pm_get_access_if_active - Check if device is active for access.
+ * @adev: Target device.
+ *
+ * Checks the state of the @adev for access. Use runtime pm API to determine
+ * if device is active. Allow access only if device is active.Return 0 if the
+ * device is accessible or a negative error code otherwise.
+ */
+static int amdgpu_pm_get_access_if_active(struct amdgpu_device *adev)
+{
+ int ret;
+
+ /* Ignore runpm status. If device is in suspended state, deny access */
+ ret = amdgpu_pm_dev_state_check(adev, false);
+ if (ret)
+ return ret;
+
+ /*
+ * Allow only if device is active. If runpm is disabled also, as in
+ * kernels without CONFIG_PM, allow access.
+ */
+ ret = pm_runtime_get_if_active(adev->dev);
+ if (!ret)
+ return -EPERM;
+
+ return 0;
+}
+
+/**
+ * amdgpu_pm_put_access - Put to auto suspend mode after a device access.
+ * @adev: Target device.
+ *
+ * Should be paired with amdgpu_pm_get_access* calls
+ */
+static inline void amdgpu_pm_put_access(struct amdgpu_device *adev)
+{
+ pm_runtime_mark_last_busy(adev->dev);
+ pm_runtime_put_autosuspend(adev->dev);
+}
+
+/**
* DOC: power_dpm_state
*
* The power_dpm_state file is a legacy interface and is only provided for
@@ -140,18 +219,13 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
enum amd_pm_state_type pm;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
amdgpu_dpm_get_current_power_state(adev, &pm);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%s\n",
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
@@ -168,11 +242,6 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
enum amd_pm_state_type state;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (strncmp("battery", buf, strlen("battery")) == 0)
state = POWER_STATE_TYPE_BATTERY;
else if (strncmp("balanced", buf, strlen("balanced")) == 0)
@@ -182,14 +251,13 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
else
return -EINVAL;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
amdgpu_dpm_set_power_state(adev, state);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -263,18 +331,13 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
enum amd_dpm_forced_level level = 0xff;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
level = amdgpu_dpm_get_performance_level(adev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%s\n",
(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
@@ -299,11 +362,6 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
enum amd_dpm_forced_level level;
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (strncmp("low", buf, strlen("low")) == 0) {
level = AMD_DPM_FORCED_LEVEL_LOW;
} else if (strncmp("high", buf, strlen("high")) == 0) {
@@ -328,14 +386,13 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
return -EINVAL;
}
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
mutex_lock(&adev->pm.stable_pstate_ctx_lock);
if (amdgpu_dpm_force_performance_level(adev, level)) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
return -EINVAL;
}
@@ -343,8 +400,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
adev->pm.stable_pstate_ctx = NULL;
mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -359,19 +415,14 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
uint32_t i;
int buf_len, ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
if (amdgpu_dpm_get_pp_num_states(adev, &data))
memset(&data, 0, sizeof(data));
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
for (i = 0; i < data.nums; i++)
@@ -394,20 +445,15 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
enum amd_pm_state_type pm = 0;
int i = 0, ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
amdgpu_dpm_get_current_power_state(adev, &pm);
ret = amdgpu_dpm_get_pp_num_states(adev, &data);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return ret;
@@ -430,11 +476,6 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (adev->pm.pp_force_state_enabled)
return amdgpu_get_pp_cur_state(dev, attr, buf);
else
@@ -453,11 +494,6 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
unsigned long idx;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
adev->pm.pp_force_state_enabled = false;
if (strlen(buf) == 1)
@@ -469,7 +505,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
@@ -490,14 +526,13 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
adev->pm.pp_force_state_enabled = true;
}
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
err_out:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
+
return ret;
}
@@ -521,18 +556,13 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
char *table = NULL;
int size, ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_pp_table(adev, &table);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (size <= 0)
return size;
@@ -554,19 +584,13 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_pp_table(adev, buf, count);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return ret;
@@ -735,11 +759,6 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
const char delimiter[3] = {' ', '\n', '\0'};
uint32_t type;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (count > 127 || count == 0)
return -EINVAL;
@@ -785,7 +804,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str++;
}
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
@@ -806,14 +825,13 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
goto err_out;
}
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
err_out:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
+
return -EINVAL;
}
@@ -835,14 +853,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
};
uint clk_index;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
@@ -861,7 +874,7 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -892,23 +905,17 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
uint64_t featuremask;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = kstrtou64(buf, 0, &featuremask);
if (ret)
return -EINVAL;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -925,20 +932,15 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_ppfeature_status(adev, buf);
if (size <= 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -991,14 +993,9 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
int size = 0;
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
if (ret == -ENOENT)
@@ -1007,7 +1004,7 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1057,23 +1054,17 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
int ret;
uint32_t mask = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_force_clock_level(adev, type, mask);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -1240,18 +1231,13 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
uint32_t value = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
value = amdgpu_dpm_get_sclk_od(adev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%d\n", value);
}
@@ -1266,24 +1252,18 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
int ret;
long int value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = kstrtol(buf, 0, &value);
if (ret)
return -EINVAL;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -1297,18 +1277,13 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
uint32_t value = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
value = amdgpu_dpm_get_mclk_od(adev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%d\n", value);
}
@@ -1323,24 +1298,18 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
int ret;
long int value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = kstrtol(buf, 0, &value);
if (ret)
return -EINVAL;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -1378,20 +1347,15 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_power_profile_mode(adev, buf);
if (size <= 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1414,11 +1378,6 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
long int profile_mode = 0;
const char delimiter[3] = {' ', '\n', '\0'};
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
tmp[0] = *(buf);
tmp[1] = '\0';
ret = kstrtol(tmp, 0, &profile_mode);
@@ -1445,14 +1404,13 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
}
parameter[parameter_size] = profile_mode;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (!ret)
return count;
@@ -1466,19 +1424,14 @@ static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
{
int r, size = sizeof(uint32_t);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_if_active(adev->dev);
- if (r <= 0)
- return r ?: -EPERM;
+ r = amdgpu_pm_get_access_if_active(adev);
+ if (r)
+ return r;
/* get the sensor value */
r = amdgpu_dpm_read_sensor(adev, sensor, query, &size);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
return r;
}
@@ -1576,24 +1529,19 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
uint64_t count0 = 0, count1 = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (adev->flags & AMD_IS_APU)
return -ENODATA;
if (!adev->asic_funcs->get_pcie_usage)
return -ENODATA;
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%llu %llu %i\n",
count0, count1, pcie_get_mps(adev->pdev));
@@ -1616,11 +1564,6 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (adev->unique_id)
return sysfs_emit(buf, "%016llx\n", adev->unique_id);
@@ -1715,9 +1658,9 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
if (!ret)
@@ -1725,7 +1668,7 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
else
size = sysfs_emit(buf, "failed to get thermal limit\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1749,20 +1692,18 @@ static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
return -EINVAL;
}
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
dev_err(dev, "failed to update thermal limit\n");
return ret;
}
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -1786,18 +1727,13 @@ static ssize_t amdgpu_get_pm_metrics(struct device *dev,
ssize_t size = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1824,14 +1760,9 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
ssize_t size = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
if (size <= 0)
@@ -1843,7 +1774,7 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
memcpy(buf, gpu_metrics, size);
out:
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1939,19 +1870,14 @@ static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
int r = 0;
int bias = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_resume_and_get(ddev->dev);
- if (r < 0)
- return r;
-
r = kstrtoint(buf, 10, &bias);
if (r)
goto out;
+ r = amdgpu_pm_get_access(adev);
+ if (r < 0)
+ return r;
+
if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
@@ -1963,8 +1889,8 @@ static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
/* TODO: update bias level with SMU message */
out:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
+
return r;
}
@@ -2006,10 +1932,11 @@ static int pp_od_clk_voltage_attr_update(struct amdgpu_device *adev, struct amdg
return 0;
}
- /* Enable pp_od_clk_voltage node for gc 9.4.3 SRIOV/BM support */
+ /* Enable pp_od_clk_voltage node for gc 9.4.3, 9.4.4, 9.5.0 SRIOV/BM support */
if (gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4)) {
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0)) {
+ if (amdgpu_sriov_multi_vf_mode(adev))
*states = ATTR_STATE_UNSUPPORTED;
return 0;
}
@@ -2044,7 +1971,7 @@ static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_
* setting should not be allowed from VF if not in one VF mode.
*/
if (gc_ver >= IP_VERSION(10, 0, 0) ||
- (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))) {
+ (amdgpu_sriov_multi_vf_mode(adev))) {
dev_attr->attr.mode &= ~S_IWUGO;
dev_attr->store = NULL;
}
@@ -2087,7 +2014,8 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd
gc_ver == IP_VERSION(11, 0, 2) ||
gc_ver == IP_VERSION(11, 0, 3) ||
gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4)))
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
if (!((gc_ver == IP_VERSION(10, 3, 1) ||
@@ -2109,7 +2037,8 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd
gc_ver == IP_VERSION(11, 0, 2) ||
gc_ver == IP_VERSION(11, 0, 3) ||
gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4)))
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
if (!((gc_ver == IP_VERSION(10, 3, 1) ||
@@ -2120,7 +2049,8 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd
} else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
if (gc_ver == IP_VERSION(9, 4, 2) ||
gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4))
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0))
*states = ATTR_STATE_UNSUPPORTED;
}
@@ -2218,11 +2148,6 @@ static ssize_t amdgpu_get_pm_policy_attr(struct device *dev,
policy_attr =
container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
return amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, buf);
}
@@ -2239,11 +2164,6 @@ static ssize_t amdgpu_set_pm_policy_attr(struct device *dev,
char *tmp, *param;
long val;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
count = min(count, sizeof(tmp_buf));
memcpy(tmp_buf, buf, count);
tmp_buf[count - 1] = '\0';
@@ -2269,14 +2189,13 @@ static ssize_t amdgpu_set_pm_policy_attr(struct device *dev,
policy_attr =
container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return ret;
@@ -2395,13 +2314,22 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
gc_ver == IP_VERSION(9, 0, 1))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(vcn_busy_percent)) {
- if (!(gc_ver == IP_VERSION(10, 3, 1) ||
- gc_ver == IP_VERSION(10, 3, 3) ||
- gc_ver == IP_VERSION(10, 3, 6) ||
- gc_ver == IP_VERSION(10, 3, 7) ||
- gc_ver == IP_VERSION(11, 0, 1) ||
- gc_ver == IP_VERSION(11, 0, 4) ||
- gc_ver == IP_VERSION(11, 5, 0)))
+ if (!(gc_ver == IP_VERSION(9, 3, 0) ||
+ gc_ver == IP_VERSION(10, 3, 1) ||
+ gc_ver == IP_VERSION(10, 3, 3) ||
+ gc_ver == IP_VERSION(10, 3, 6) ||
+ gc_ver == IP_VERSION(10, 3, 7) ||
+ gc_ver == IP_VERSION(11, 0, 0) ||
+ gc_ver == IP_VERSION(11, 0, 1) ||
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3) ||
+ gc_ver == IP_VERSION(11, 0, 4) ||
+ gc_ver == IP_VERSION(11, 5, 0) ||
+ gc_ver == IP_VERSION(11, 5, 1) ||
+ gc_ver == IP_VERSION(11, 5, 2) ||
+ gc_ver == IP_VERSION(11, 5, 3) ||
+ gc_ver == IP_VERSION(12, 0, 0) ||
+ gc_ver == IP_VERSION(12, 0, 1)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pcie_bw)) {
/* PCIe Perf counters won't work on APU nodes */
@@ -2416,11 +2344,14 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
case IP_VERSION(10, 3, 0):
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
*states = ATTR_STATE_SUPPORTED;
break;
default:
@@ -2699,18 +2630,13 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
u32 pwm_mode = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(adev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -2728,11 +2654,6 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
u32 pwm_mode;
int value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtoint(buf, 10, &value);
if (err)
return err;
@@ -2746,14 +2667,13 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
else
return -EINVAL;
- ret = pm_runtime_resume_and_get(adev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -2784,16 +2704,11 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
u32 value;
u32 pwm_mode;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtou32(buf, 10, &value);
if (err)
return err;
- err = pm_runtime_resume_and_get(adev->dev);
+ err = amdgpu_pm_get_access(adev);
if (err < 0)
return err;
@@ -2810,8 +2725,7 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2827,18 +2741,13 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
int err;
u32 speed = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- err = pm_runtime_get_if_active(adev->dev);
- if (err <= 0)
- return err ?: -EPERM;
+ err = amdgpu_pm_get_access_if_active(adev);
+ if (err)
+ return err;
err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2854,18 +2763,13 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
int err;
u32 speed = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- err = pm_runtime_get_if_active(adev->dev);
- if (err <= 0)
- return err ?: -EPERM;
+ err = amdgpu_pm_get_access_if_active(adev);
+ if (err)
+ return err;
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2915,18 +2819,13 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
int err;
u32 rpm = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- err = pm_runtime_get_if_active(adev->dev);
- if (err <= 0)
- return err ?: -EPERM;
+ err = amdgpu_pm_get_access_if_active(adev);
+ if (err)
+ return err;
err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2943,16 +2842,11 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
u32 value;
u32 pwm_mode;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtou32(buf, 10, &value);
if (err)
return err;
- err = pm_runtime_resume_and_get(adev->dev);
+ err = amdgpu_pm_get_access(adev);
if (err < 0)
return err;
@@ -2968,8 +2862,7 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2985,18 +2878,13 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
u32 pwm_mode = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(adev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -3014,11 +2902,6 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
int value;
u32 pwm_mode;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtoint(buf, 10, &value);
if (err)
return err;
@@ -3030,14 +2913,13 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
else
return -EINVAL;
- err = pm_runtime_resume_and_get(adev->dev);
+ err = amdgpu_pm_get_access(adev);
if (err < 0)
return err;
err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return -EINVAL;
@@ -3152,14 +3034,9 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
ssize_t size;
int r;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_if_active(adev->dev);
- if (r <= 0)
- return r ?: -EPERM;
+ r = amdgpu_pm_get_access_if_active(adev);
+ if (r)
+ return r;
r = amdgpu_dpm_get_power_limit(adev, &limit,
pp_limit_level, power_type);
@@ -3169,7 +3046,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
else
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -3230,11 +3107,6 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
int err;
u32 value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (amdgpu_sriov_vf(adev))
return -EINVAL;
@@ -3245,14 +3117,13 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
value = value / 1000000; /* convert to Watt */
value |= limit_type << 24;
- err = pm_runtime_resume_and_get(adev->dev);
+ err = amdgpu_pm_get_access(adev);
if (err < 0)
return err;
err = amdgpu_dpm_set_power_limit(adev, value);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -3530,7 +3401,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* Skip crit temp on APU */
if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) ||
- (gc_ver == IP_VERSION(9, 4, 3) || gc_ver == IP_VERSION(9, 4, 4))) &&
+ (gc_ver == IP_VERSION(9, 4, 3) || gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0))) &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
return 0;
@@ -3605,7 +3477,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
adev->family == AMDGPU_FAMILY_KV || /* not implemented yet */
(gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4))) &&
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0))) &&
(attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
attr == &sensor_dev_attr_in0_label.dev_attr.attr))
return 0;
@@ -3613,7 +3486,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* only APUs other than gc 9,4,3 have vddnb */
if ((!(adev->flags & AMD_IS_APU) ||
(gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4))) &&
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0))) &&
(attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
attr == &sensor_dev_attr_in1_label.dev_attr.attr))
return 0;
@@ -3636,7 +3510,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* hotspot temperature for gc 9,4,3*/
if (gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4)) {
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0)) {
if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_label.dev_attr.attr)
@@ -3686,20 +3561,15 @@ static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev,
int size = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(adev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_print_clock_levels(adev, od_type, buf);
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(adev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -3767,11 +3637,6 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
long parameter[64];
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = parse_input_od_command_lines(in_buf,
count,
&cmd_type,
@@ -3780,7 +3645,7 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
if (ret)
return ret;
- ret = pm_runtime_resume_and_get(adev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
@@ -3799,14 +3664,12 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
goto err_out;
}
- pm_runtime_mark_last_busy(adev->dev);
- pm_runtime_put_autosuspend(adev->dev);
+ amdgpu_pm_put_access(adev);
return count;
err_out:
- pm_runtime_mark_last_busy(adev->dev);
- pm_runtime_put_autosuspend(adev->dev);
+ amdgpu_pm_put_access(adev);
return ret;
}
@@ -4776,16 +4639,10 @@ static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
{
struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
- struct drm_device *dev = adev_to_drm(adev);
u64 flags = 0;
int r;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_resume_and_get(dev->dev);
+ r = amdgpu_pm_get_access(adev);
if (r < 0)
return r;
@@ -4802,7 +4659,7 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
seq_printf(m, "\n");
out:
- pm_runtime_put_autosuspend(dev->dev);
+ amdgpu_pm_put_access(adev);
return r;
}
@@ -4822,10 +4679,9 @@ static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
void *smu_prv_buf;
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
+ ret = amdgpu_pm_dev_state_check(adev, true);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
if (ret)