summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2010-02-24 17:35:13 +0100
committerThomas Gleixner <tglx@linutronix.de>2010-02-24 17:35:13 +0100
commit2e5f15f4c51efd5fdb1ea97380c38d3f8c32d3e9 (patch)
treea16390eb35fbbe648b84899bbc35ff1f1dc51467 /include
parent8452259b8aefef73981b2ffbc23d5245d19bd140 (diff)
downloadlwn-2e5f15f4c51efd5fdb1ea97380c38d3f8c32d3e9.tar.gz
lwn-2e5f15f4c51efd5fdb1ea97380c38d3f8c32d3e9.zip
perf_events: defer poll() wakeups to softirq
Use timer softirq for wakeups on preempt_rt Normally pending work is work that cannot be done from NMI context, such as wakeups and disabling the counter. The pending work is a single linked list using atomic ops so that it functions from NMI context. Normally this is called from IRQ context through use of an self-IPI (x86) or upon enabling hard interrupts (powerpc). Architectures that do not implement perf_event_set_pending() nor call perf_event_do_pending() upon leaving NMI context will get a polling fallback from the timer softirq. However, in -rt we cannot do the wakeup from IRQ context because its a wait_queue wakup, which can be O(n), so defer all wakeups to the softirq fallback by creating a second pending list that's only processed from there. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include')
-rw-r--r--include/linux/perf_event.h5
1 files changed, 5 insertions, 0 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index a177698d95e2..f57b3abc5e0e 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -645,6 +645,9 @@ struct perf_event {
int pending_kill;
int pending_disable;
struct perf_pending_entry pending;
+#ifdef CONFIG_PREEMPT_RT
+ struct perf_pending_entry pending_softirq;
+#endif
atomic_t event_limit;
@@ -753,6 +756,7 @@ extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
extern void set_perf_event_pending(void);
extern void perf_event_do_pending(void);
+extern void perf_event_do_pending_softirq(void);
extern void perf_event_print_debug(void);
extern void __perf_disable(void);
extern bool __perf_enable(void);
@@ -883,6 +887,7 @@ static inline int perf_event_init_task(struct task_struct *child) { return 0; }
static inline void perf_event_exit_task(struct task_struct *child) { }
static inline void perf_event_free_task(struct task_struct *task) { }
static inline void perf_event_do_pending(void) { }
+static inline void perf_event_do_pending_softirq(void) { }
static inline void perf_event_print_debug(void) { }
static inline void perf_disable(void) { }
static inline void perf_enable(void) { }