diff options
author | Peter Zijlstra <peterz@infradead.org> | 2015-04-17 20:05:30 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-05-08 12:25:38 +0200 |
commit | ff303e66c240ba6269e31817a386995440a18c99 (patch) | |
tree | aa4e506beabc409966d9cda0c722dee41e8d8ce9 | |
parent | 1836ac856e4fb446e48afa4f8cae897d4856b06c (diff) | |
download | lwn-ff303e66c240ba6269e31817a386995440a18c99.tar.gz lwn-ff303e66c240ba6269e31817a386995440a18c99.zip |
perf: Fix software migrate events
Stephane asked about PERF_COUNT_SW_CPU_MIGRATIONS and I realized it
was borken:
> The problem is that the task isn't actually scheduled while its being
> migrated (obviously), and if its not scheduled, the counters aren't
> scheduled either, so there's no observing of the fact.
>
> A further problem with migrations is that many migrations happen from
> softirq context, which is nested inside the 'random' task context of
> whoemever happens to run at that time, similarly for the wakeup
> migrations triggered from (soft)irq context. All those end up being
> accounted in the task that's currently running, eg. your 'ls'.
The below cures this by marking a task as migrated and accounting it
on the subsequent sched_in().
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | include/linux/perf_event.h | 24 | ||||
-rw-r--r-- | include/linux/sched.h | 7 | ||||
-rw-r--r-- | kernel/sched/core.c | 2 |
3 files changed, 29 insertions, 4 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 61992cf2e977..e86f85abeda7 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -798,11 +798,33 @@ perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) extern struct static_key_deferred perf_sched_events; +static __always_inline bool +perf_sw_migrate_enabled(void) +{ + if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS])) + return true; + return false; +} + +static inline void perf_event_task_migrate(struct task_struct *task) +{ + if (perf_sw_migrate_enabled()) + task->sched_migrated = 1; +} + static inline void perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task) { if (static_key_false(&perf_sched_events.key)) __perf_event_task_sched_in(prev, task); + + if (perf_sw_migrate_enabled() && task->sched_migrated) { + struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); + + perf_fetch_caller_regs(regs); + ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0); + task->sched_migrated = 0; + } } static inline void perf_event_task_sched_out(struct task_struct *prev, @@ -925,6 +947,8 @@ perf_aux_output_skip(struct perf_output_handle *handle, static inline void * perf_get_aux(struct perf_output_handle *handle) { return NULL; } static inline void +perf_event_task_migrate(struct task_struct *task) { } +static inline void perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task) { } static inline void diff --git a/include/linux/sched.h b/include/linux/sched.h index 26a2e6122734..2c5e6c3db654 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1356,9 +1356,6 @@ struct task_struct { #endif struct mm_struct *mm, *active_mm; -#ifdef CONFIG_COMPAT_BRK - unsigned brk_randomized:1; -#endif /* per-thread vma caching */ u32 vmacache_seqnum; struct vm_area_struct *vmacache[VMACACHE_SIZE]; @@ -1381,10 +1378,14 @@ struct task_struct { /* Revert to default priority/policy when forking */ unsigned sched_reset_on_fork:1; unsigned sched_contributes_to_load:1; + unsigned sched_migrated:1; #ifdef CONFIG_MEMCG_KMEM unsigned memcg_kmem_skip_account:1; #endif +#ifdef CONFIG_COMPAT_BRK + unsigned brk_randomized:1; +#endif unsigned long atomic_flags; /* Flags needing atomic access. */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fe22f7510bce..8652fd540780 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1049,7 +1049,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) if (p->sched_class->migrate_task_rq) p->sched_class->migrate_task_rq(p, new_cpu); p->se.nr_migrations++; - perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); + perf_event_task_migrate(p); } __set_task_cpu(p, new_cpu); |