diff options
Diffstat (limited to 'kernel/sched/topology.c')
-rw-r--r-- | kernel/sched/topology.c | 22 |
1 files changed, 16 insertions, 6 deletions
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 12af4b157928..4f6fa7553d92 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -10,6 +10,7 @@ DEFINE_MUTEX(sched_domains_mutex); /* Protected by sched_domains_mutex: */ cpumask_var_t sched_domains_tmpmask; +cpumask_var_t sched_domains_tmpmask2; #ifdef CONFIG_SCHED_DEBUG @@ -500,13 +501,16 @@ enum s_alloc { * Only CPUs that can arrive at this group should be considered to continue * balancing. */ -static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) +static void +build_group_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask) { const struct cpumask *sg_span = sched_group_cpus(sg); struct sd_data *sdd = sd->private; struct sched_domain *sibling; int i; + cpumask_clear(mask); + for_each_cpu(i, sg_span) { sibling = *per_cpu_ptr(sdd->sd, i); @@ -522,11 +526,11 @@ static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) if (!cpumask_equal(sg_span, sched_domain_span(sibling->child))) continue; - cpumask_set_cpu(i, sched_group_mask(sg)); + cpumask_set_cpu(i, mask); } /* We must not have empty masks here */ - WARN_ON_ONCE(cpumask_empty(sched_group_mask(sg))); + WARN_ON_ONCE(cpumask_empty(mask)); } /* @@ -560,14 +564,19 @@ build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) } static void init_overlap_sched_group(struct sched_domain *sd, - struct sched_group *sg, int cpu) + struct sched_group *sg) { + struct cpumask *mask = sched_domains_tmpmask2; struct sd_data *sdd = sd->private; struct cpumask *sg_span; + int cpu; + + build_group_mask(sd, sg, mask); + cpu = cpumask_first_and(sched_group_cpus(sg), mask); sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); if (atomic_inc_return(&sg->sgc->ref) == 1) - build_group_mask(sd, sg); + cpumask_copy(sched_group_mask(sg), mask); /* * Initialize sgc->capacity such that even if we mess up the @@ -619,7 +628,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) sg_span = sched_group_cpus(sg); cpumask_or(covered, covered, sg_span); - init_overlap_sched_group(sd, sg, i); + init_overlap_sched_group(sd, sg); if (!first) first = sg; @@ -1578,6 +1587,7 @@ int sched_init_domains(const struct cpumask *cpu_map) int err; zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL); + zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL); zalloc_cpumask_var(&fallback_doms, GFP_KERNEL); arch_update_cpu_topology(); |