diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-09-21 13:56:43 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-09-21 13:56:49 +0200 |
commit | cf84fd96323633be7d437e1de4505fc82ff2c11a (patch) | |
tree | 094b7a7421945c9ea244f3605378065935f1c100 /kernel/sched_fair.c | |
parent | 31915ab4cbf507aadab40847cf9989da5e88b090 (diff) | |
parent | b30a3f6257ed2105259b404d419b4964e363928c (diff) | |
download | lwn-cf84fd96323633be7d437e1de4505fc82ff2c11a.tar.gz lwn-cf84fd96323633be7d437e1de4505fc82ff2c11a.zip |
Merge commit 'v2.6.36-rc5' into sched/core
Merge reason: Pick up the latest fixes in -rc5.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 11 |
1 files changed, 4 insertions, 7 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index ab661ebc4895..a171138a9402 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -54,13 +54,13 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling * Minimal preemption granularity for CPU-bound tasks: * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds) */ -unsigned int sysctl_sched_min_granularity = 2000000ULL; -unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL; +unsigned int sysctl_sched_min_granularity = 750000ULL; +unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; /* * is kept at sysctl_sched_latency / sysctl_sched_min_granularity */ -static unsigned int sched_nr_latency = 3; +static unsigned int sched_nr_latency = 8; /* * After fork, child runs first. If set to 0 (default) then @@ -1313,7 +1313,7 @@ static struct sched_group * find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu, int load_idx) { - struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups; + struct sched_group *idlest = NULL, *group = sd->groups; unsigned long min_load = ULONG_MAX, this_load = 0; int imbalance = 100 + (sd->imbalance_pct-100)/2; @@ -1348,7 +1348,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, if (local_group) { this_load = avg_load; - this = group; } else if (avg_load < min_load) { min_load = avg_load; idlest = group; @@ -2268,8 +2267,6 @@ unsigned long scale_rt_power(int cpu) struct rq *rq = cpu_rq(cpu); u64 total, available; - sched_avg_update(rq); - total = sched_avg_period() + (rq->clock - rq->age_stamp); available = total - rq->rt_avg; |