summaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-11-25 02:35:10 +1030
committerIngo Molnar <mingo@elte.hu>2008-11-24 17:51:30 +0100
commit4d2732c63e0c05cfef2a74868d08eace922dfc3e (patch)
tree775cd2e595d09ef1f5ae9c3ea66cf69374eed647 /kernel/sched.c
parent7d1e6a9b95e3edeac91888bc683ae62f18519432 (diff)
downloadlwn-4d2732c63e0c05cfef2a74868d08eace922dfc3e.tar.gz
lwn-4d2732c63e0c05cfef2a74868d08eace922dfc3e.zip
sched: convert idle_balance() to cpumask_var_t.
Impact: stack usage reduction Dynamically allocating cpumasks (when CONFIG_CPUMASK_OFFSTACK) saves space in the stack. cpumask_var_t is just a struct cpumask for !CONFIG_CPUMASK_OFFSTACK. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 2f8ea99df16a..154a95fcea7e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3676,7 +3676,10 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
struct sched_domain *sd;
int pulled_task = -1;
unsigned long next_balance = jiffies + HZ;
- cpumask_t tmpmask;
+ cpumask_var_t tmpmask;
+
+ if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC))
+ return;
for_each_domain(this_cpu, sd) {
unsigned long interval;
@@ -3687,7 +3690,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
if (sd->flags & SD_BALANCE_NEWIDLE)
/* If we've pulled tasks over stop searching: */
pulled_task = load_balance_newidle(this_cpu, this_rq,
- sd, &tmpmask);
+ sd, tmpmask);
interval = msecs_to_jiffies(sd->balance_interval);
if (time_after(next_balance, sd->last_balance + interval))
@@ -3702,6 +3705,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
*/
this_rq->next_balance = next_balance;
}
+ free_cpumask_var(tmpmask);
}
/*