diff options
author | Venkatesh Pallipadi <venki@google.com> | 2011-02-10 10:23:27 +0100 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2011-02-17 15:37:27 -0800 |
commit | c8c885599ad2115e0a2fe661c2fb6ba4edc92c19 (patch) | |
tree | 335a51d3c4b687621eb68cd5c6123d9c947a507a /kernel/sched.c | |
parent | 3a69989d43689a40f3af7cad04c5aa840f3d2530 (diff) | |
download | lwn-c8c885599ad2115e0a2fe661c2fb6ba4edc92c19.tar.gz lwn-c8c885599ad2115e0a2fe661c2fb6ba4edc92c19.zip |
sched: Remove irq time from available CPU power
Commit: aa483808516ca5cacfa0e5849691f64fec25828e upstream
The idea was suggested by Peter Zijlstra here:
http://marc.info/?l=linux-kernel&m=127476934517534&w=2
irq time is technically not available to the tasks running on the CPU.
This patch removes irq time from CPU power piggybacking on
sched_rt_avg_update().
Tested this by keeping CPU X busy with a network intensive task having 75%
oa a single CPU irq processing (hard+soft) on a 4-way system. And start seven
cycle soakers on the system. Without this change, there will be two tasks on
each CPU. With this change, there is a single task on irq busy CPU X and
remaining 7 tasks are spread around among other 3 CPUs.
Signed-off-by: Venkatesh Pallipadi <venki@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1286237003-12406-8-git-send-email-venki@google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 26 |
1 files changed, 25 insertions, 1 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 5761f098a4bf..4f8609420bec 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -553,6 +553,10 @@ struct rq { u64 avg_idle; #endif +#ifdef CONFIG_IRQ_TIME_ACCOUNTING + u64 prev_irq_time; +#endif + /* calc_load related fields */ unsigned long calc_load_update; long calc_load_active; @@ -622,6 +626,7 @@ static inline int cpu_of(struct rq *rq) #define raw_rq() (&__raw_get_cpu_var(runqueues)) static u64 irq_time_cpu(int cpu); +static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { @@ -632,6 +637,8 @@ inline void update_rq_clock(struct rq *rq) irq_time = irq_time_cpu(cpu); if (rq->clock - irq_time > rq->clock_task) rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -1883,6 +1890,15 @@ void account_system_vtime(struct task_struct *curr) local_irq_restore(flags); } +static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) +{ + if (sched_clock_irqtime && sched_feat(NONIRQ_POWER)) { + u64 delta_irq = curr_irq_time - rq->prev_irq_time; + rq->prev_irq_time = curr_irq_time; + sched_rt_avg_update(rq, delta_irq); + } +} + #else static u64 irq_time_cpu(int cpu) @@ -1890,6 +1906,8 @@ static u64 irq_time_cpu(int cpu) return 0; } +static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) { } + #endif #include "sched_stats.h" @@ -3755,7 +3773,13 @@ unsigned long scale_rt_power(int cpu) u64 total, available; total = sched_avg_period() + (rq->clock - rq->age_stamp); - available = total - rq->rt_avg; + + if (unlikely(total < rq->rt_avg)) { + /* Ensures that power won't end up being negative */ + available = 0; + } else { + available = total - rq->rt_avg; + } if (unlikely((s64)total < SCHED_LOAD_SCALE)) total = SCHED_LOAD_SCALE; |