diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-05-04 18:54:32 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-04 19:29:57 +0200 |
commit | b82914ce33146186d554b0f5c41e4e13693614ce (patch) | |
tree | dde12a31830dd4216433d9a6b365c90bccaa1792 /kernel | |
parent | dab6f6a3401f596fe934f41fc5da3f401adfdfb1 (diff) | |
download | lwn-b82914ce33146186d554b0f5c41e4e13693614ce.tar.gz lwn-b82914ce33146186d554b0f5c41e4e13693614ce.zip |
perf_counter: round-robin per-CPU counters too
This used to be unstable when we had the rq->lock dependencies,
but now that they are that of the past we can turn on percpu
counter RR too.
[ Impact: handle counter over-commit for per-CPU counters too ]
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_counter.c | 10 |
1 files changed, 3 insertions, 7 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 8660ae579530..b9679c36bcc2 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1069,18 +1069,14 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) { struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); struct perf_counter_context *ctx = &curr->perf_counter_ctx; - const int rotate_percpu = 0; - if (rotate_percpu) - perf_counter_cpu_sched_out(cpuctx); + perf_counter_cpu_sched_out(cpuctx); perf_counter_task_sched_out(curr, cpu); - if (rotate_percpu) - rotate_ctx(&cpuctx->ctx); + rotate_ctx(&cpuctx->ctx); rotate_ctx(ctx); - if (rotate_percpu) - perf_counter_cpu_sched_in(cpuctx, cpu); + perf_counter_cpu_sched_in(cpuctx, cpu); perf_counter_task_sched_in(curr, cpu); } |