diff options
author | Tony Lindgren <tony@atomide.com> | 2015-11-25 10:56:40 -0800 |
---|---|---|
committer | Tony Lindgren <tony@atomide.com> | 2015-11-25 10:56:40 -0800 |
commit | 970259bff472579204108c6f27036ec4d1206ae1 (patch) | |
tree | 869bdcda9aac4c67712d93e6141056fdf3f41bd8 /drivers/cpufreq | |
parent | 9b1b61cd8e31d9beba871333d7a798b3adb89288 (diff) | |
parent | 29f5b34ca1a191c2cf4f6c8c12f4dec56e8d3bc1 (diff) | |
download | lwn-970259bff472579204108c6f27036ec4d1206ae1.tar.gz lwn-970259bff472579204108c6f27036ec4d1206ae1.zip |
Merge branch '81xx' into omap-for-v4.4/fixes
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/Kconfig.arm | 1 | ||||
-rw-r--r-- | drivers/cpufreq/Kconfig.x86 | 1 | ||||
-rw-r--r-- | drivers/cpufreq/arm_big_little.c | 22 | ||||
-rw-r--r-- | drivers/cpufreq/cppc_cpufreq.c | 3 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_governor.c | 33 | ||||
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 326 | ||||
-rw-r--r-- | drivers/cpufreq/s5pv210-cpufreq.c | 4 |
7 files changed, 118 insertions, 272 deletions
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 1582c1c016b0..8014c2307332 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -84,6 +84,7 @@ config ARM_KIRKWOOD_CPUFREQ config ARM_MT8173_CPUFREQ bool "Mediatek MT8173 CPUFreq support" depends on ARCH_MEDIATEK && REGULATOR + depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST) depends on !CPU_THERMAL || THERMAL=y select PM_OPP help diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index adbd1de1cea5..c59bdcb83217 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 @@ -5,7 +5,6 @@ config X86_INTEL_PSTATE bool "Intel P state control" depends on X86 - select ACPI_PROCESSOR if ACPI help This driver provides a P state for Intel core processors. The driver implements an internal governor and will become diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index f1e42f8ce0fc..c5d256caa664 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c @@ -149,6 +149,19 @@ bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate) __func__, cpu, old_cluster, new_cluster, new_rate); ret = clk_set_rate(clk[new_cluster], new_rate * 1000); + if (!ret) { + /* + * FIXME: clk_set_rate hasn't returned an error here however it + * may be that clk_change_rate failed due to hardware or + * firmware issues and wasn't able to report that due to the + * current design of the clk core layer. To work around this + * problem we will read back the clock rate and check it is + * correct. This needs to be removed once clk core is fixed. + */ + if (clk_get_rate(clk[new_cluster]) != new_rate * 1000) + ret = -EIO; + } + if (WARN_ON(ret)) { pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret, new_cluster); @@ -189,15 +202,6 @@ bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate) mutex_unlock(&cluster_lock[old_cluster]); } - /* - * FIXME: clk_set_rate has to handle the case where clk_change_rate - * can fail due to hardware or firmware issues. Until the clk core - * layer is fixed, we can check here. In most of the cases we will - * be reading only the cached value anyway. This needs to be removed - * once clk core is fixed. - */ - if (bL_cpufreq_get_rate(cpu) != new_rate) - return -EIO; return 0; } diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 93c219fab850..e8cb334094b0 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -166,8 +166,7 @@ static int __init cppc_cpufreq_init(void) out: for_each_possible_cpu(i) - if (all_cpu_data[i]) - kfree(all_cpu_data[i]); + kfree(all_cpu_data[i]); kfree(all_cpu_data); return -ENODEV; diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 11258c4c1b17..b260576ddb12 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -171,10 +171,6 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, { int i; - mutex_lock(&cpufreq_governor_lock); - if (!policy->governor_enabled) - goto out_unlock; - if (!all_cpus) { /* * Use raw_smp_processor_id() to avoid preemptible warnings. @@ -188,9 +184,6 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, for_each_cpu(i, policy->cpus) __gov_queue_work(i, dbs_data, delay); } - -out_unlock: - mutex_unlock(&cpufreq_governor_lock); } EXPORT_SYMBOL_GPL(gov_queue_work); @@ -229,13 +222,24 @@ static void dbs_timer(struct work_struct *work) struct cpu_dbs_info *cdbs = container_of(work, struct cpu_dbs_info, dwork.work); struct cpu_common_dbs_info *shared = cdbs->shared; - struct cpufreq_policy *policy = shared->policy; - struct dbs_data *dbs_data = policy->governor_data; + struct cpufreq_policy *policy; + struct dbs_data *dbs_data; unsigned int sampling_rate, delay; bool modify_all = true; mutex_lock(&shared->timer_mutex); + policy = shared->policy; + + /* + * Governor might already be disabled and there is no point continuing + * with the work-handler. + */ + if (!policy) + goto unlock; + + dbs_data = policy->governor_data; + if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; @@ -252,6 +256,7 @@ static void dbs_timer(struct work_struct *work) delay = dbs_data->cdata->gov_dbs_timer(cdbs, dbs_data, modify_all); gov_queue_work(dbs_data, policy, delay, modify_all); +unlock: mutex_unlock(&shared->timer_mutex); } @@ -478,9 +483,17 @@ static int cpufreq_governor_stop(struct cpufreq_policy *policy, if (!shared || !shared->policy) return -EBUSY; + /* + * Work-handler must see this updated, as it should not proceed any + * further after governor is disabled. And so timer_mutex is taken while + * updating this value. + */ + mutex_lock(&shared->timer_mutex); + shared->policy = NULL; + mutex_unlock(&shared->timer_mutex); + gov_cancel_work(dbs_data, policy); - shared->policy = NULL; mutex_destroy(&shared->timer_mutex); return 0; } diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 93a3c635ea27..001a532e342e 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -34,14 +34,10 @@ #include <asm/cpu_device_id.h> #include <asm/cpufeature.h> -#if IS_ENABLED(CONFIG_ACPI) -#include <acpi/processor.h> -#endif - -#define BYT_RATIOS 0x66a -#define BYT_VIDS 0x66b -#define BYT_TURBO_RATIOS 0x66c -#define BYT_TURBO_VIDS 0x66d +#define ATOM_RATIOS 0x66a +#define ATOM_VIDS 0x66b +#define ATOM_TURBO_RATIOS 0x66c +#define ATOM_TURBO_VIDS 0x66d #define FRAC_BITS 8 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) @@ -117,9 +113,6 @@ struct cpudata { u64 prev_mperf; u64 prev_tsc; struct sample sample; -#if IS_ENABLED(CONFIG_ACPI) - struct acpi_processor_performance acpi_perf_data; -#endif }; static struct cpudata **all_cpu_data; @@ -150,7 +143,6 @@ struct cpu_defaults { static struct pstate_adjust_policy pid_params; static struct pstate_funcs pstate_funcs; static int hwp_active; -static int no_acpi_perf; struct perf_limits { int no_turbo; @@ -163,8 +155,6 @@ struct perf_limits { int max_sysfs_pct; int min_policy_pct; int min_sysfs_pct; - int max_perf_ctl; - int min_perf_ctl; }; static struct perf_limits performance_limits = { @@ -191,8 +181,6 @@ static struct perf_limits powersave_limits = { .max_sysfs_pct = 100, .min_policy_pct = 0, .min_sysfs_pct = 0, - .max_perf_ctl = 0, - .min_perf_ctl = 0, }; #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE @@ -201,153 +189,6 @@ static struct perf_limits *limits = &performance_limits; static struct perf_limits *limits = &powersave_limits; #endif -#if IS_ENABLED(CONFIG_ACPI) -/* - * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and - * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and - * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state - * ratio, out of it only high 8 bits are used. For example 0x1700 is setting - * target ratio 0x17. The _PSS control value stores in a format which can be - * directly written to PERF_CTL MSR. But in intel_pstate driver this shift - * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()). - * This function converts the _PSS control value to intel pstate driver format - * for comparison and assignment. - */ -static int convert_to_native_pstate_format(struct cpudata *cpu, int index) -{ - return cpu->acpi_perf_data.states[index].control >> 8; -} - -static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy) -{ - struct cpudata *cpu; - int ret; - bool turbo_absent = false; - int max_pstate_index; - int min_pss_ctl, max_pss_ctl, turbo_pss_ctl; - int i; - - cpu = all_cpu_data[policy->cpu]; - - pr_debug("intel_pstate: default limits 0x%x 0x%x 0x%x\n", - cpu->pstate.min_pstate, cpu->pstate.max_pstate, - cpu->pstate.turbo_pstate); - - if (!cpu->acpi_perf_data.shared_cpu_map && - zalloc_cpumask_var_node(&cpu->acpi_perf_data.shared_cpu_map, - GFP_KERNEL, cpu_to_node(policy->cpu))) { - return -ENOMEM; - } - - ret = acpi_processor_register_performance(&cpu->acpi_perf_data, - policy->cpu); - if (ret) - return ret; - - /* - * Check if the control value in _PSS is for PERF_CTL MSR, which should - * guarantee that the states returned by it map to the states in our - * list directly. - */ - if (cpu->acpi_perf_data.control_register.space_id != - ACPI_ADR_SPACE_FIXED_HARDWARE) - return -EIO; - - pr_debug("intel_pstate: CPU%u - ACPI _PSS perf data\n", policy->cpu); - for (i = 0; i < cpu->acpi_perf_data.state_count; i++) - pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", - (i == cpu->acpi_perf_data.state ? '*' : ' '), i, - (u32) cpu->acpi_perf_data.states[i].core_frequency, - (u32) cpu->acpi_perf_data.states[i].power, - (u32) cpu->acpi_perf_data.states[i].control); - - /* - * If there is only one entry _PSS, simply ignore _PSS and continue as - * usual without taking _PSS into account - */ - if (cpu->acpi_perf_data.state_count < 2) - return 0; - - turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0); - min_pss_ctl = convert_to_native_pstate_format(cpu, - cpu->acpi_perf_data.state_count - 1); - /* Check if there is a turbo freq in _PSS */ - if (turbo_pss_ctl <= cpu->pstate.max_pstate && - turbo_pss_ctl > cpu->pstate.min_pstate) { - pr_debug("intel_pstate: no turbo range exists in _PSS\n"); - limits->no_turbo = limits->turbo_disabled = 1; - cpu->pstate.turbo_pstate = cpu->pstate.max_pstate; - turbo_absent = true; - } - - /* Check if the max non turbo p state < Intel P state max */ - max_pstate_index = turbo_absent ? 0 : 1; - max_pss_ctl = convert_to_native_pstate_format(cpu, max_pstate_index); - if (max_pss_ctl < cpu->pstate.max_pstate && - max_pss_ctl > cpu->pstate.min_pstate) - cpu->pstate.max_pstate = max_pss_ctl; - - /* check If min perf > Intel P State min */ - if (min_pss_ctl > cpu->pstate.min_pstate && - min_pss_ctl < cpu->pstate.max_pstate) { - cpu->pstate.min_pstate = min_pss_ctl; - policy->cpuinfo.min_freq = min_pss_ctl * cpu->pstate.scaling; - } - - if (turbo_absent) - policy->cpuinfo.max_freq = cpu->pstate.max_pstate * - cpu->pstate.scaling; - else { - policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * - cpu->pstate.scaling; - /* - * The _PSS table doesn't contain whole turbo frequency range. - * This just contains +1 MHZ above the max non turbo frequency, - * with control value corresponding to max turbo ratio. But - * when cpufreq set policy is called, it will call with this - * max frequency, which will cause a reduced performance as - * this driver uses real max turbo frequency as the max - * frequeny. So correct this frequency in _PSS table to - * correct max turbo frequency based on the turbo ratio. - * Also need to convert to MHz as _PSS freq is in MHz. - */ - cpu->acpi_perf_data.states[0].core_frequency = - turbo_pss_ctl * 100; - } - - pr_debug("intel_pstate: Updated limits using _PSS 0x%x 0x%x 0x%x\n", - cpu->pstate.min_pstate, cpu->pstate.max_pstate, - cpu->pstate.turbo_pstate); - pr_debug("intel_pstate: policy max_freq=%d Khz min_freq = %d KHz\n", - policy->cpuinfo.max_freq, policy->cpuinfo.min_freq); - - return 0; -} - -static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) -{ - struct cpudata *cpu; - - if (!no_acpi_perf) - return 0; - - cpu = all_cpu_data[policy->cpu]; - acpi_processor_unregister_performance(policy->cpu); - return 0; -} - -#else -static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy) -{ - return 0; -} - -static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) -{ - return 0; -} -#endif - static inline void pid_reset(struct _pid *pid, int setpoint, int busy, int deadband, int integral) { pid->setpoint = setpoint; @@ -684,36 +525,34 @@ static void __init intel_pstate_sysfs_expose_params(void) static void intel_pstate_hwp_enable(struct cpudata *cpudata) { - pr_info("intel_pstate: HWP enabled\n"); - wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); } -static int byt_get_min_pstate(void) +static int atom_get_min_pstate(void) { u64 value; - rdmsrl(BYT_RATIOS, value); + rdmsrl(ATOM_RATIOS, value); return (value >> 8) & 0x7F; } -static int byt_get_max_pstate(void) +static int atom_get_max_pstate(void) { u64 value; - rdmsrl(BYT_RATIOS, value); + rdmsrl(ATOM_RATIOS, value); return (value >> 16) & 0x7F; } -static int byt_get_turbo_pstate(void) +static int atom_get_turbo_pstate(void) { u64 value; - rdmsrl(BYT_TURBO_RATIOS, value); + rdmsrl(ATOM_TURBO_RATIOS, value); return value & 0x7F; } -static void byt_set_pstate(struct cpudata *cpudata, int pstate) +static void atom_set_pstate(struct cpudata *cpudata, int pstate) { u64 val; int32_t vid_fp; @@ -738,27 +577,42 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate) wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); } -#define BYT_BCLK_FREQS 5 -static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800}; - -static int byt_get_scaling(void) +static int silvermont_get_scaling(void) { u64 value; int i; + /* Defined in Table 35-6 from SDM (Sept 2015) */ + static int silvermont_freq_table[] = { + 83300, 100000, 133300, 116700, 80000}; rdmsrl(MSR_FSB_FREQ, value); - i = value & 0x3; + i = value & 0x7; + WARN_ON(i > 4); + + return silvermont_freq_table[i]; +} - BUG_ON(i > BYT_BCLK_FREQS); +static int airmont_get_scaling(void) +{ + u64 value; + int i; + /* Defined in Table 35-10 from SDM (Sept 2015) */ + static int airmont_freq_table[] = { + 83300, 100000, 133300, 116700, 80000, + 93300, 90000, 88900, 87500}; - return byt_freq_table[i] * 100; + rdmsrl(MSR_FSB_FREQ, value); + i = value & 0xF; + WARN_ON(i > 8); + + return airmont_freq_table[i]; } -static void byt_get_vid(struct cpudata *cpudata) +static void atom_get_vid(struct cpudata *cpudata) { u64 value; - rdmsrl(BYT_VIDS, value); + rdmsrl(ATOM_VIDS, value); cpudata->vid.min = int_tofp((value >> 8) & 0x7f); cpudata->vid.max = int_tofp((value >> 16) & 0x7f); cpudata->vid.ratio = div_fp( @@ -766,7 +620,7 @@ static void byt_get_vid(struct cpudata *cpudata) int_tofp(cpudata->pstate.max_pstate - cpudata->pstate.min_pstate)); - rdmsrl(BYT_TURBO_VIDS, value); + rdmsrl(ATOM_TURBO_VIDS, value); cpudata->vid.turbo = value & 0x7f; } @@ -887,7 +741,7 @@ static struct cpu_defaults core_params = { }, }; -static struct cpu_defaults byt_params = { +static struct cpu_defaults silvermont_params = { .pid_policy = { .sample_rate_ms = 10, .deadband = 0, @@ -897,13 +751,33 @@ static struct cpu_defaults byt_params = { .i_gain_pct = 4, }, .funcs = { - .get_max = byt_get_max_pstate, - .get_max_physical = byt_get_max_pstate, - .get_min = byt_get_min_pstate, - .get_turbo = byt_get_turbo_pstate, - .set = byt_set_pstate, - .get_scaling = byt_get_scaling, - .get_vid = byt_get_vid, + .get_max = atom_get_max_pstate, + .get_max_physical = atom_get_max_pstate, + .get_min = atom_get_min_pstate, + .get_turbo = atom_get_turbo_pstate, + .set = atom_set_pstate, + .get_scaling = silvermont_get_scaling, + .get_vid = atom_get_vid, + }, +}; + +static struct cpu_defaults airmont_params = { + .pid_policy = { + .sample_rate_ms = 10, + .deadband = 0, + .setpoint = 60, + .p_gain_pct = 14, + .d_gain_pct = 0, + .i_gain_pct = 4, + }, + .funcs = { + .get_max = atom_get_max_pstate, + .get_max_physical = atom_get_max_pstate, + .get_min = atom_get_min_pstate, + .get_turbo = atom_get_turbo_pstate, + .set = atom_set_pstate, + .get_scaling = airmont_get_scaling, + .get_vid = atom_get_vid, }, }; @@ -940,23 +814,12 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) * policy, or by cpu specific default values determined through * experimentation. */ - if (limits->max_perf_ctl && limits->max_sysfs_pct >= - limits->max_policy_pct) { - *max = limits->max_perf_ctl; - } else { - max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), - limits->max_perf)); - *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate, - cpu->pstate.turbo_pstate); - } + max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits->max_perf)); + *max = clamp_t(int, max_perf_adj, + cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); - if (limits->min_perf_ctl) { - *min = limits->min_perf_ctl; - } else { - min_perf = fp_toint(mul_fp(int_tofp(max_perf), - limits->min_perf)); - *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); - } + min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits->min_perf)); + *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); } static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) @@ -1155,7 +1018,7 @@ static void intel_pstate_timer_func(unsigned long __data) static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(0x2a, core_params), ICPU(0x2d, core_params), - ICPU(0x37, byt_params), + ICPU(0x37, silvermont_params), ICPU(0x3a, core_params), ICPU(0x3c, core_params), ICPU(0x3d, core_params), @@ -1164,7 +1027,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(0x45, core_params), ICPU(0x46, core_params), ICPU(0x47, core_params), - ICPU(0x4c, byt_params), + ICPU(0x4c, airmont_params), ICPU(0x4e, core_params), ICPU(0x4f, core_params), ICPU(0x5e, core_params), @@ -1231,12 +1094,6 @@ static unsigned int intel_pstate_get(unsigned int cpu_num) static int intel_pstate_set_policy(struct cpufreq_policy *policy) { -#if IS_ENABLED(CONFIG_ACPI) - struct cpudata *cpu; - int i; -#endif - pr_debug("intel_pstate: %s max %u policy->max %u\n", __func__, - policy->cpuinfo.max_freq, policy->max); if (!policy->cpuinfo.max_freq) return -ENODEV; @@ -1272,23 +1129,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), int_tofp(100)); -#if IS_ENABLED(CONFIG_ACPI) - cpu = all_cpu_data[policy->cpu]; - for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { - int control; - - control = convert_to_native_pstate_format(cpu, i); - if (control * cpu->pstate.scaling == policy->max) - limits->max_perf_ctl = control; - if (control * cpu->pstate.scaling == policy->min) - limits->min_perf_ctl = control; - } - - pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n", - policy->cpuinfo.max_freq, policy->max, limits->min_perf_ctl, - limits->max_perf_ctl); -#endif - if (hwp_active) intel_pstate_hwp_set(); @@ -1343,30 +1183,18 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; - if (!no_acpi_perf) - intel_pstate_init_perf_limits(policy); - /* - * If there is no acpi perf data or error, we ignore and use Intel P - * state calculated limits, So this is not fatal error. - */ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; cpumask_set_cpu(policy->cpu, policy->cpus); return 0; } -static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) -{ - return intel_pstate_exit_perf_limits(policy); -} - static struct cpufreq_driver intel_pstate_driver = { .flags = CPUFREQ_CONST_LOOPS, .verify = intel_pstate_verify_policy, .setpolicy = intel_pstate_set_policy, .get = intel_pstate_get, .init = intel_pstate_cpu_init, - .exit = intel_pstate_cpu_exit, .stop_cpu = intel_pstate_stop_cpu, .name = "intel_pstate", }; @@ -1408,6 +1236,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs) } #if IS_ENABLED(CONFIG_ACPI) +#include <acpi/processor.h> static bool intel_pstate_no_acpi_pss(void) { @@ -1557,8 +1386,10 @@ static int __init intel_pstate_init(void) if (!all_cpu_data) return -ENOMEM; - if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) + if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) { + pr_info("intel_pstate: HWP enabled\n"); hwp_active++; + } if (!hwp_active && hwp_only) goto out; @@ -1593,15 +1424,14 @@ static int __init intel_pstate_setup(char *str) if (!strcmp(str, "disable")) no_load = 1; - if (!strcmp(str, "no_hwp")) + if (!strcmp(str, "no_hwp")) { + pr_info("intel_pstate: HWP disabled\n"); no_hwp = 1; + } if (!strcmp(str, "force")) force_load = 1; if (!strcmp(str, "hwp_only")) hwp_only = 1; - if (!strcmp(str, "no_acpi")) - no_acpi_perf = 1; - return 0; } early_param("intel_pstate", intel_pstate_setup); diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index 9e231f52150c..051a8a8224cd 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c @@ -212,11 +212,11 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq) /* Find current DRAM frequency */ tmp = s5pv210_dram_conf[ch].freq; - do_div(tmp, freq); + tmp /= freq; tmp1 = s5pv210_dram_conf[ch].refresh; - do_div(tmp1, tmp); + tmp1 /= tmp; __raw_writel(tmp1, reg); } |