summaryrefslogtreecommitdiff
path: root/include/linux/perf_counter.h
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-03-23 18:22:10 +0100
committerIngo Molnar <mingo@elte.hu>2009-04-06 09:30:27 +0200
commit7b732a75047738e4f85438ed2f9cd34bf5f2a19a (patch)
treebae36de785ac819ceef6fa5e1b7884a4a421cc3c /include/linux/perf_counter.h
parentb09d2501ed3d294619cbfbcf828ad39324d0e548 (diff)
downloadlwn-7b732a75047738e4f85438ed2f9cd34bf5f2a19a.tar.gz
lwn-7b732a75047738e4f85438ed2f9cd34bf5f2a19a.zip
perf_counter: new output ABI - part 1
Impact: Rework the perfcounter output ABI use sys_read() only for instant data and provide mmap() output for all async overflow data. The first mmap() determines the size of the output buffer. The mmap() size must be a PAGE_SIZE multiple of 1+pages, where pages must be a power of 2 or 0. Further mmap()s of the same fd must have the same size. Once all maps are gone, you can again mmap() with a new size. In case of 0 extra pages there is no data output and the first page only contains meta data. When there are data pages, a poll() event will be generated for each full page of data. Furthermore, the output is circular. This means that although 1 page is a valid configuration, its useless, since we'll start overwriting it the instant we report a full page. Future work will focus on the output format (currently maintained) where we'll likey want each entry denoted by a header which includes a type and length. Further future work will allow to splice() the fd, also containing the async overflow data -- splice() would be mutually exclusive with mmap() of the data. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Orig-LKML-Reference: <20090323172417.470536358@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/perf_counter.h')
-rw-r--r--include/linux/perf_counter.h36
1 files changed, 15 insertions, 21 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 40b324e91bf6..2b5e66d5ebdf 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -152,6 +152,8 @@ struct perf_counter_mmap_page {
__u32 lock; /* seqlock for synchronization */
__u32 index; /* hardware counter identifier */
__s64 offset; /* add to hardware counter value */
+
+ __u32 data_head; /* head in the data section */
};
#ifdef __KERNEL__
@@ -218,21 +220,6 @@ struct hw_perf_counter {
#endif
};
-/*
- * Hardcoded buffer length limit for now, for IRQ-fed events:
- */
-#define PERF_DATA_BUFLEN 2048
-
-/**
- * struct perf_data - performance counter IRQ data sampling ...
- */
-struct perf_data {
- int len;
- int rd_idx;
- int overrun;
- u8 data[PERF_DATA_BUFLEN];
-};
-
struct perf_counter;
/**
@@ -256,6 +243,14 @@ enum perf_counter_active_state {
struct file;
+struct perf_mmap_data {
+ struct rcu_head rcu_head;
+ int nr_pages;
+ atomic_t head;
+ struct perf_counter_mmap_page *user_page;
+ void *data_pages[0];
+};
+
/**
* struct perf_counter - performance counter kernel representation:
*/
@@ -289,16 +284,15 @@ struct perf_counter {
int oncpu;
int cpu;
- /* pointer to page shared with userspace via mmap */
- unsigned long user_page;
+ /* mmap bits */
+ struct mutex mmap_mutex;
+ atomic_t mmap_count;
+ struct perf_mmap_data *data;
- /* read() / irq related data */
+ /* poll related */
wait_queue_head_t waitq;
/* optional: for NMIs */
int wakeup_pending;
- struct perf_data *irqdata;
- struct perf_data *usrdata;
- struct perf_data data[2];
void (*destroy)(struct perf_counter *);
struct rcu_head rcu_head;