summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-08-03 20:28:08 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-08-03 20:28:08 -0700
commit04084978003c1a1810a0b1fea581078106394a32 (patch)
tree03844f5718e1200ac7b2722edfe44e5c8b15efa4 /include
parentd516840629ccc1aa6b59a0886134688f9b5531a0 (diff)
parent86ba54fb0816480941cda78a99f107ab2bbd4249 (diff)
downloadlwn-04084978003c1a1810a0b1fea581078106394a32.tar.gz
lwn-04084978003c1a1810a0b1fea581078106394a32.zip
Merge tag 'pm-5.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates from Rafael Wysocki: "The most significant change here is the extension of the Energy Model to cover non-CPU devices (as well as CPUs) from Lukasz Luba. There is also some new hardware support (Ice Lake server idle states table for intel_idle, Sapphire Rapids and Power Limit 4 support in the RAPL driver), some new functionality in the existing drivers (eg. a new switch to disable/enable CPU energy-efficiency optimizations in intel_pstate, delayed timers in devfreq), some assorted fixes (cpufreq core, intel_pstate, intel_idle) and cleanups (eg. cpuidle-psci, devfreq), including the elimination of W=1 build warnings from cpufreq done by Lee Jones. Specifics: - Make the Energy Model cover non-CPU devices (Lukasz Luba). - Add Ice Lake server idle states table to the intel_idle driver and eliminate a redundant static variable from it (Chen Yu, Rafael Wysocki). - Eliminate all W=1 build warnings from cpufreq (Lee Jones). - Add support for Sapphire Rapids and for Power Limit 4 to the Intel RAPL power capping driver (Sumeet Pawnikar, Zhang Rui). - Fix function name in kerneldoc comments in the idle_inject power capping driver (Yangtao Li). - Fix locking issues with cpufreq governors and drop a redundant "weak" function definition from cpufreq (Viresh Kumar). - Rearrange cpufreq to register non-modular governors at the core_initcall level and allow the default cpufreq governor to be specified in the kernel command line (Quentin Perret). - Extend, fix and clean up the intel_pstate driver (Srinivas Pandruvada, Rafael Wysocki): * Add a new sysfs attribute for disabling/enabling CPU energy-efficiency optimizations in the processor. * Make the driver avoid enabling HWP if EPP is not supported. * Allow the driver to handle numeric EPP values in the sysfs interface and fix the setting of EPP via sysfs in the active mode. * Eliminate a static checker warning and clean up a kerneldoc comment. - Clean up some variable declarations in the powernv cpufreq driver (Wei Yongjun). - Fix up the ->enter_s2idle callback definition to cover the case when it points to the same function as ->idle correctly (Neal Liu). - Rearrange and clean up the PSCI cpuidle driver (Ulf Hansson). - Make the PM core emit "changed" uevent when adding/removing the "wakeup" sysfs attribute of devices (Abhishek Pandit-Subedi). - Add a helper macro for declaring PM callbacks and use it in the MMC jz4740 driver (Paul Cercueil). - Fix white space in some places in the hibernate code and make the system-wide PM code use "const char *" where appropriate (Xiang Chen, Alexey Dobriyan). - Add one more "unsafe" helper macro to the freezer to cover the NFS use case (He Zhe). - Change the language in the generic PM domains framework to use parent/child terminology and clean up a typo and some comment fromatting in that code (Kees Cook, Geert Uytterhoeven). - Update the operating performance points OPP framework (Lukasz Luba, Andrew-sh.Cheng, Valdis Kletnieks): * Refactor dev_pm_opp_of_register_em() and update related drivers. * Add a missing function export. * Allow disabled OPPs in dev_pm_opp_get_freq(). - Update devfreq core and drivers (Chanwoo Choi, Lukasz Luba, Enric Balletbo i Serra, Dmitry Osipenko, Kieran Bingham, Marc Zyngier): * Add support for delayed timers to the devfreq core and make the Samsung exynos5422-dmc driver use it. * Unify sysfs interface to use "df-" as a prefix in instance names consistently. * Fix devfreq_summary debugfs node indentation. * Add the rockchip,pmu phandle to the rk3399_dmc driver DT bindings. * List Dmitry Osipenko as the Tegra devfreq driver maintainer. * Fix typos in the core devfreq code. - Update the pm-graph utility to version 5.7 including a number of fixes related to suspend-to-idle (Todd Brandt). - Fix coccicheck errors and warnings in the cpupower utility (Shuah Khan). - Replace HTTP links with HTTPs ones in multiple places (Alexander A. Klimov)" * tag 'pm-5.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (71 commits) cpuidle: ACPI: fix 'return' with no value build warning cpufreq: intel_pstate: Fix EPP setting via sysfs in active mode cpufreq: intel_pstate: Rearrange the storing of new EPP values intel_idle: Customize IceLake server support PM / devfreq: Fix the wrong end with semicolon PM / devfreq: Fix indentaion of devfreq_summary debugfs node PM / devfreq: Clean up the devfreq instance name in sysfs attr memory: samsung: exynos5422-dmc: Add module param to control IRQ mode memory: samsung: exynos5422-dmc: Adjust polling interval and uptreshold memory: samsung: exynos5422-dmc: Use delayed timer as default PM / devfreq: Add support delayed timer for polling mode dt-bindings: devfreq: rk3399_dmc: Add rockchip,pmu phandle PM / devfreq: tegra: Add Dmitry as a maintainer PM / devfreq: event: Fix trivial spelling PM / devfreq: rk3399_dmc: Fix kernel oops when rockchip,pmu is absent cpuidle: change enter_s2idle() prototype cpuidle: psci: Prevent domain idlestates until consumers are ready cpuidle: psci: Convert PM domain to platform driver cpuidle: psci: Fix error path via converting to a platform driver cpuidle: psci: Fail cpuidle registration if set OSI mode failed ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/cpufreq.h14
-rw-r--r--include/linux/cpuidle.h9
-rw-r--r--include/linux/devfreq.h9
-rw-r--r--include/linux/device.h5
-rw-r--r--include/linux/energy_model.h149
-rw-r--r--include/linux/freezer.h14
-rw-r--r--include/linux/intel_rapl.h5
-rw-r--r--include/linux/pm.h10
-rw-r--r--include/linux/pm_domain.h12
-rw-r--r--include/linux/pm_opp.h15
10 files changed, 168 insertions, 74 deletions
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 3494f6763597..e62b022cb07e 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -577,6 +577,20 @@ unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy);
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
+#define cpufreq_governor_init(__governor) \
+static int __init __governor##_init(void) \
+{ \
+ return cpufreq_register_governor(&__governor); \
+} \
+core_initcall(__governor##_init)
+
+#define cpufreq_governor_exit(__governor) \
+static void __exit __governor##_exit(void) \
+{ \
+ return cpufreq_unregister_governor(&__governor); \
+} \
+module_exit(__governor##_exit)
+
struct cpufreq_governor *cpufreq_default_governor(void);
struct cpufreq_governor *cpufreq_fallback_governor(void);
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index ec2ef63771f0..b65909ae4e20 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -65,10 +65,13 @@ struct cpuidle_state {
* CPUs execute ->enter_s2idle with the local tick or entire timekeeping
* suspended, so it must not re-enable interrupts at any point (even
* temporarily) or attempt to change states of clock event devices.
+ *
+ * This callback may point to the same function as ->enter if all of
+ * the above requirements are met by it.
*/
- void (*enter_s2idle) (struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index);
+ int (*enter_s2idle)(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index);
};
/* Idle State Flags */
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 57e871a559a9..12782fbb4c25 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -31,6 +31,13 @@
#define DEVFREQ_PRECHANGE (0)
#define DEVFREQ_POSTCHANGE (1)
+/* DEVFREQ work timers */
+enum devfreq_timer {
+ DEVFREQ_TIMER_DEFERRABLE = 0,
+ DEVFREQ_TIMER_DELAYED,
+ DEVFREQ_TIMER_NUM,
+};
+
struct devfreq;
struct devfreq_governor;
@@ -70,6 +77,7 @@ struct devfreq_dev_status {
* @initial_freq: The operating frequency when devfreq_add_device() is
* called.
* @polling_ms: The polling interval in ms. 0 disables polling.
+ * @timer: Timer type is either deferrable or delayed timer.
* @target: The device should set its operating frequency at
* freq or lowest-upper-than-freq value. If freq is
* higher than any operable frequency, set maximum.
@@ -96,6 +104,7 @@ struct devfreq_dev_status {
struct devfreq_dev_profile {
unsigned long initial_freq;
unsigned int polling_ms;
+ enum devfreq_timer timer;
int (*target)(struct device *dev, unsigned long *freq, u32 flags);
int (*get_dev_status)(struct device *dev,
diff --git a/include/linux/device.h b/include/linux/device.h
index 5efed864b387..4e2e9d3a2eda 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -13,6 +13,7 @@
#define _DEVICE_H_
#include <linux/dev_printk.h>
+#include <linux/energy_model.h>
#include <linux/ioport.h>
#include <linux/kobject.h>
#include <linux/klist.h>
@@ -560,6 +561,10 @@ struct device {
struct dev_pm_info power;
struct dev_pm_domain *pm_domain;
+#ifdef CONFIG_ENERGY_MODEL
+ struct em_perf_domain *em_pd;
+#endif
+
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
struct irq_domain *msi_domain;
#endif
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index ade6486a3382..b67a51c574b9 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_ENERGY_MODEL_H
#define _LINUX_ENERGY_MODEL_H
#include <linux/cpumask.h>
+#include <linux/device.h>
#include <linux/jump_label.h>
#include <linux/kobject.h>
#include <linux/rcupdate.h>
@@ -10,13 +11,15 @@
#include <linux/types.h>
/**
- * em_cap_state - Capacity state of a performance domain
- * @frequency: The CPU frequency in KHz, for consistency with CPUFreq
- * @power: The power consumed by 1 CPU at this level, in milli-watts
+ * em_perf_state - Performance state of a performance domain
+ * @frequency: The frequency in KHz, for consistency with CPUFreq
+ * @power: The power consumed at this level, in milli-watts (by 1 CPU or
+ by a registered device). It can be a total power: static and
+ dynamic.
* @cost: The cost coefficient associated with this level, used during
* energy calculation. Equal to: power * max_frequency / frequency
*/
-struct em_cap_state {
+struct em_perf_state {
unsigned long frequency;
unsigned long power;
unsigned long cost;
@@ -24,102 +27,119 @@ struct em_cap_state {
/**
* em_perf_domain - Performance domain
- * @table: List of capacity states, in ascending order
- * @nr_cap_states: Number of capacity states
- * @cpus: Cpumask covering the CPUs of the domain
+ * @table: List of performance states, in ascending order
+ * @nr_perf_states: Number of performance states
+ * @cpus: Cpumask covering the CPUs of the domain. It's here
+ * for performance reasons to avoid potential cache
+ * misses during energy calculations in the scheduler
+ * and simplifies allocating/freeing that memory region.
*
- * A "performance domain" represents a group of CPUs whose performance is
- * scaled together. All CPUs of a performance domain must have the same
- * micro-architecture. Performance domains often have a 1-to-1 mapping with
- * CPUFreq policies.
+ * In case of CPU device, a "performance domain" represents a group of CPUs
+ * whose performance is scaled together. All CPUs of a performance domain
+ * must have the same micro-architecture. Performance domains often have
+ * a 1-to-1 mapping with CPUFreq policies. In case of other devices the @cpus
+ * field is unused.
*/
struct em_perf_domain {
- struct em_cap_state *table;
- int nr_cap_states;
+ struct em_perf_state *table;
+ int nr_perf_states;
unsigned long cpus[];
};
+#define em_span_cpus(em) (to_cpumask((em)->cpus))
+
#ifdef CONFIG_ENERGY_MODEL
-#define EM_CPU_MAX_POWER 0xFFFF
+#define EM_MAX_POWER 0xFFFF
struct em_data_callback {
/**
- * active_power() - Provide power at the next capacity state of a CPU
- * @power : Active power at the capacity state in mW (modified)
- * @freq : Frequency at the capacity state in kHz (modified)
- * @cpu : CPU for which we do this operation
+ * active_power() - Provide power at the next performance state of
+ * a device
+ * @power : Active power at the performance state in mW
+ * (modified)
+ * @freq : Frequency at the performance state in kHz
+ * (modified)
+ * @dev : Device for which we do this operation (can be a CPU)
*
- * active_power() must find the lowest capacity state of 'cpu' above
+ * active_power() must find the lowest performance state of 'dev' above
* 'freq' and update 'power' and 'freq' to the matching active power
* and frequency.
*
- * The power is the one of a single CPU in the domain, expressed in
- * milli-watts. It is expected to fit in the [0, EM_CPU_MAX_POWER]
- * range.
+ * In case of CPUs, the power is the one of a single CPU in the domain,
+ * expressed in milli-watts. It is expected to fit in the
+ * [0, EM_MAX_POWER] range.
*
* Return 0 on success.
*/
- int (*active_power)(unsigned long *power, unsigned long *freq, int cpu);
+ int (*active_power)(unsigned long *power, unsigned long *freq,
+ struct device *dev);
};
#define EM_DATA_CB(_active_power_cb) { .active_power = &_active_power_cb }
struct em_perf_domain *em_cpu_get(int cpu);
-int em_register_perf_domain(cpumask_t *span, unsigned int nr_states,
- struct em_data_callback *cb);
+struct em_perf_domain *em_pd_get(struct device *dev);
+int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
+ struct em_data_callback *cb, cpumask_t *span);
+void em_dev_unregister_perf_domain(struct device *dev);
/**
- * em_pd_energy() - Estimates the energy consumed by the CPUs of a perf. domain
+ * em_cpu_energy() - Estimates the energy consumed by the CPUs of a
+ performance domain
* @pd : performance domain for which energy has to be estimated
* @max_util : highest utilization among CPUs of the domain
* @sum_util : sum of the utilization of all CPUs in the domain
*
+ * This function must be used only for CPU devices. There is no validation,
+ * i.e. if the EM is a CPU type and has cpumask allocated. It is called from
+ * the scheduler code quite frequently and that is why there is not checks.
+ *
* Return: the sum of the energy consumed by the CPUs of the domain assuming
* a capacity state satisfying the max utilization of the domain.
*/
-static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
+static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
unsigned long max_util, unsigned long sum_util)
{
unsigned long freq, scale_cpu;
- struct em_cap_state *cs;
+ struct em_perf_state *ps;
int i, cpu;
/*
- * In order to predict the capacity state, map the utilization of the
- * most utilized CPU of the performance domain to a requested frequency,
- * like schedutil.
+ * In order to predict the performance state, map the utilization of
+ * the most utilized CPU of the performance domain to a requested
+ * frequency, like schedutil.
*/
cpu = cpumask_first(to_cpumask(pd->cpus));
scale_cpu = arch_scale_cpu_capacity(cpu);
- cs = &pd->table[pd->nr_cap_states - 1];
- freq = map_util_freq(max_util, cs->frequency, scale_cpu);
+ ps = &pd->table[pd->nr_perf_states - 1];
+ freq = map_util_freq(max_util, ps->frequency, scale_cpu);
/*
- * Find the lowest capacity state of the Energy Model above the
+ * Find the lowest performance state of the Energy Model above the
* requested frequency.
*/
- for (i = 0; i < pd->nr_cap_states; i++) {
- cs = &pd->table[i];
- if (cs->frequency >= freq)
+ for (i = 0; i < pd->nr_perf_states; i++) {
+ ps = &pd->table[i];
+ if (ps->frequency >= freq)
break;
}
/*
- * The capacity of a CPU in the domain at that capacity state (cs)
+ * The capacity of a CPU in the domain at the performance state (ps)
* can be computed as:
*
- * cs->freq * scale_cpu
- * cs->cap = -------------------- (1)
+ * ps->freq * scale_cpu
+ * ps->cap = -------------------- (1)
* cpu_max_freq
*
* So, ignoring the costs of idle states (which are not available in
- * the EM), the energy consumed by this CPU at that capacity state is
- * estimated as:
+ * the EM), the energy consumed by this CPU at that performance state
+ * is estimated as:
*
- * cs->power * cpu_util
+ * ps->power * cpu_util
* cpu_nrg = -------------------- (2)
- * cs->cap
+ * ps->cap
*
- * since 'cpu_util / cs->cap' represents its percentage of busy time.
+ * since 'cpu_util / ps->cap' represents its percentage of busy time.
*
* NOTE: Although the result of this computation actually is in
* units of power, it can be manipulated as an energy value
@@ -129,55 +149,64 @@ static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
* By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product
* of two terms:
*
- * cs->power * cpu_max_freq cpu_util
+ * ps->power * cpu_max_freq cpu_util
* cpu_nrg = ------------------------ * --------- (3)
- * cs->freq scale_cpu
+ * ps->freq scale_cpu
*
- * The first term is static, and is stored in the em_cap_state struct
- * as 'cs->cost'.
+ * The first term is static, and is stored in the em_perf_state struct
+ * as 'ps->cost'.
*
* Since all CPUs of the domain have the same micro-architecture, they
- * share the same 'cs->cost', and the same CPU capacity. Hence, the
+ * share the same 'ps->cost', and the same CPU capacity. Hence, the
* total energy of the domain (which is the simple sum of the energy of
* all of its CPUs) can be factorized as:
*
- * cs->cost * \Sum cpu_util
+ * ps->cost * \Sum cpu_util
* pd_nrg = ------------------------ (4)
* scale_cpu
*/
- return cs->cost * sum_util / scale_cpu;
+ return ps->cost * sum_util / scale_cpu;
}
/**
- * em_pd_nr_cap_states() - Get the number of capacity states of a perf. domain
+ * em_pd_nr_perf_states() - Get the number of performance states of a perf.
+ * domain
* @pd : performance domain for which this must be done
*
- * Return: the number of capacity states in the performance domain table
+ * Return: the number of performance states in the performance domain table
*/
-static inline int em_pd_nr_cap_states(struct em_perf_domain *pd)
+static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
{
- return pd->nr_cap_states;
+ return pd->nr_perf_states;
}
#else
struct em_data_callback {};
#define EM_DATA_CB(_active_power_cb) { }
-static inline int em_register_perf_domain(cpumask_t *span,
- unsigned int nr_states, struct em_data_callback *cb)
+static inline
+int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
+ struct em_data_callback *cb, cpumask_t *span)
{
return -EINVAL;
}
+static inline void em_dev_unregister_perf_domain(struct device *dev)
+{
+}
static inline struct em_perf_domain *em_cpu_get(int cpu)
{
return NULL;
}
-static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
+static inline struct em_perf_domain *em_pd_get(struct device *dev)
+{
+ return NULL;
+}
+static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
unsigned long max_util, unsigned long sum_util)
{
return 0;
}
-static inline int em_pd_nr_cap_states(struct em_perf_domain *pd)
+static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
{
return 0;
}
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 21f5aa0b217f..27828145ca09 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -207,6 +207,17 @@ static inline long freezable_schedule_timeout_interruptible(long timeout)
return __retval;
}
+/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
+static inline long freezable_schedule_timeout_interruptible_unsafe(long timeout)
+{
+ long __retval;
+
+ freezer_do_not_count();
+ __retval = schedule_timeout_interruptible(timeout);
+ freezer_count_unsafe();
+ return __retval;
+}
+
/* Like schedule_timeout_killable(), but should not block the freezer. */
static inline long freezable_schedule_timeout_killable(long timeout)
{
@@ -285,6 +296,9 @@ static inline void set_freezable(void) {}
#define freezable_schedule_timeout_interruptible(timeout) \
schedule_timeout_interruptible(timeout)
+#define freezable_schedule_timeout_interruptible_unsafe(timeout) \
+ schedule_timeout_interruptible(timeout)
+
#define freezable_schedule_timeout_killable(timeout) \
schedule_timeout_killable(timeout)
diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h
index efb3ce892c20..3582176a1eca 100644
--- a/include/linux/intel_rapl.h
+++ b/include/linux/intel_rapl.h
@@ -29,6 +29,7 @@ enum rapl_domain_reg_id {
RAPL_DOMAIN_REG_PERF,
RAPL_DOMAIN_REG_POLICY,
RAPL_DOMAIN_REG_INFO,
+ RAPL_DOMAIN_REG_PL4,
RAPL_DOMAIN_REG_MAX,
};
@@ -38,12 +39,14 @@ enum rapl_primitives {
ENERGY_COUNTER,
POWER_LIMIT1,
POWER_LIMIT2,
+ POWER_LIMIT4,
FW_LOCK,
PL1_ENABLE, /* power limit 1, aka long term */
PL1_CLAMP, /* allow frequency to go below OS request */
PL2_ENABLE, /* power limit 2, aka short term, instantaneous */
PL2_CLAMP,
+ PL4_ENABLE, /* power limit 4, aka max peak power */
TIME_WINDOW1, /* long term */
TIME_WINDOW2, /* short term */
@@ -65,7 +68,7 @@ struct rapl_domain_data {
unsigned long timestamp;
};
-#define NR_POWER_LIMITS (2)
+#define NR_POWER_LIMITS (3)
struct rapl_power_limit {
struct powercap_zone_constraint *constraint;
int prim_id; /* primitive ID used to enable */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 121c104a4090..a30a4b54df52 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -351,7 +351,7 @@ struct dev_pm_ops {
* to RAM and hibernation.
*/
#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
-const struct dev_pm_ops name = { \
+const struct dev_pm_ops __maybe_unused name = { \
SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
}
@@ -369,11 +369,17 @@ const struct dev_pm_ops name = { \
* .runtime_resume(), respectively (and analogously for hibernation).
*/
#define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
-const struct dev_pm_ops name = { \
+const struct dev_pm_ops __maybe_unused name = { \
SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
}
+#ifdef CONFIG_PM
+#define pm_ptr(_ptr) (_ptr)
+#else
+#define pm_ptr(_ptr) NULL
+#endif
+
/*
* PM_EVENT_ messages
*
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 9ec78ee53652..ee11502a575b 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -95,8 +95,8 @@ struct generic_pm_domain {
struct device dev;
struct dev_pm_domain domain; /* PM domain operations */
struct list_head gpd_list_node; /* Node in the global PM domains list */
- struct list_head master_links; /* Links with PM domain as a master */
- struct list_head slave_links; /* Links with PM domain as a slave */
+ struct list_head parent_links; /* Links with PM domain as a parent */
+ struct list_head child_links; /* Links with PM domain as a child */
struct list_head dev_list; /* List of devices */
struct dev_power_governor *gov;
struct work_struct power_off_work;
@@ -151,10 +151,10 @@ static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
}
struct gpd_link {
- struct generic_pm_domain *master;
- struct list_head master_node;
- struct generic_pm_domain *slave;
- struct list_head slave_node;
+ struct generic_pm_domain *parent;
+ struct list_head parent_node;
+ struct generic_pm_domain *child;
+ struct list_head child_node;
/* Sub-domain's per-master domain performance state */
unsigned int performance_state;
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index d5c4a329321d..ee34c553f6bf 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -11,6 +11,7 @@
#ifndef __LINUX_OPP_H__
#define __LINUX_OPP_H__
+#include <linux/energy_model.h>
#include <linux/err.h>
#include <linux/notifier.h>
@@ -373,7 +374,11 @@ struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
int of_get_required_opp_performance_state(struct device_node *np, int index);
int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table);
-void dev_pm_opp_of_register_em(struct cpumask *cpus);
+int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus);
+static inline void dev_pm_opp_of_unregister_em(struct device *dev)
+{
+ em_dev_unregister_perf_domain(dev);
+}
#else
static inline int dev_pm_opp_of_add_table(struct device *dev)
{
@@ -413,7 +418,13 @@ static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
return NULL;
}
-static inline void dev_pm_opp_of_register_em(struct cpumask *cpus)
+static inline int dev_pm_opp_of_register_em(struct device *dev,
+ struct cpumask *cpus)
+{
+ return -ENOTSUPP;
+}
+
+static inline void dev_pm_opp_of_unregister_em(struct device *dev)
{
}