diff options
author | Dietmar Eggemann <dietmar.eggemann@arm.com> | 2022-06-23 11:11:02 +0200 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2022-06-28 09:17:47 +0200 |
commit | ec4fc801a02d96180c597238fe87141471b70971 (patch) | |
tree | a369388a03b4ec7bbae4e28f2f4cdf92d7e9742a | |
parent | bb4479994945e9170534389a7762eb56149320ac (diff) | |
download | lwn-ec4fc801a02d96180c597238fe87141471b70971.tar.gz lwn-ec4fc801a02d96180c597238fe87141471b70971.zip |
sched/fair: Rename select_idle_mask to select_rq_mask
On 21/06/2022 11:04, Vincent Donnefort wrote:
> From: Dietmar Eggemann <dietmar.eggemann@arm.com>
https://lkml.kernel.org/r/202206221253.ZVyGQvPX-lkp@intel.com discovered
that this patch doesn't build anymore (on tip sched/core or linux-next)
because of commit f5b2eeb499910 ("sched/fair: Consider CPU affinity when
allowing NUMA imbalance in find_idlest_group()").
New version of [PATCH v11 4/7] sched/fair: Rename select_idle_mask to
select_rq_mask below.
-- >8 --
Decouple the name of the per-cpu cpumask select_idle_mask from its usage
in select_idle_[cpu/capacity]() of the CFS run-queue selection
(select_task_rq_fair()).
This is to support the reuse of this cpumask in the Energy Aware
Scheduling (EAS) path (find_energy_efficient_cpu()) of the CFS run-queue
selection.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
Tested-by: Lukasz Luba <lukasz.luba@arm.com>
Link: https://lkml.kernel.org/r/250691c7-0e2b-05ab-bedf-b245c11d9400@arm.com
-rw-r--r-- | kernel/sched/core.c | 4 | ||||
-rw-r--r-- | kernel/sched/fair.c | 10 |
2 files changed, 7 insertions, 7 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c538a0ac4617..dd69e85b7879 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9536,7 +9536,7 @@ static struct kmem_cache *task_group_cache __read_mostly; #endif DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); -DECLARE_PER_CPU(cpumask_var_t, select_idle_mask); +DECLARE_PER_CPU(cpumask_var_t, select_rq_mask); void __init sched_init(void) { @@ -9585,7 +9585,7 @@ void __init sched_init(void) for_each_possible_cpu(i) { per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node( cpumask_size(), GFP_KERNEL, cpu_to_node(i)); - per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node( + per_cpu(select_rq_mask, i) = (cpumask_var_t)kzalloc_node( cpumask_size(), GFP_KERNEL, cpu_to_node(i)); } #endif /* CONFIG_CPUMASK_OFFSTACK */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6de09b26b455..e3f750135f78 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5894,7 +5894,7 @@ dequeue_throttle: /* Working cpumask for: load_balance, load_balance_newidle. */ DEFINE_PER_CPU(cpumask_var_t, load_balance_mask); -DEFINE_PER_CPU(cpumask_var_t, select_idle_mask); +DEFINE_PER_CPU(cpumask_var_t, select_rq_mask); #ifdef CONFIG_NO_HZ_COMMON @@ -6384,7 +6384,7 @@ static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd */ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int target) { - struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); + struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask); int i, cpu, idle_cpu = -1, nr = INT_MAX; struct sched_domain_shared *sd_share; struct rq *this_rq = this_rq(); @@ -6482,7 +6482,7 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) int cpu, best_cpu = -1; struct cpumask *cpus; - cpus = this_cpu_cpumask_var_ptr(select_idle_mask); + cpus = this_cpu_cpumask_var_ptr(select_rq_mask); cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); task_util = uclamp_task_util(p); @@ -6532,7 +6532,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) } /* - * per-cpu select_idle_mask usage + * per-cpu select_rq_mask usage */ lockdep_assert_irqs_disabled(); @@ -9255,7 +9255,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) * take care of it. */ if (p->nr_cpus_allowed != NR_CPUS) { - struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); + struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask); cpumask_and(cpus, sched_group_span(local), p->cpus_ptr); imb_numa_nr = min(cpumask_weight(cpus), sd->imb_numa_nr); |