summaryrefslogtreecommitdiff
path: root/include/linux/bitops.h
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2011-11-18 12:35:21 +0100
committerIngo Molnar <mingo@elte.hu>2011-12-06 08:33:54 +0100
commit1e2ad28f80b4e155678259238f51edebc19e4014 (patch)
tree694661d2db06536d134baa42d9f3985db6f95d9e /include/linux/bitops.h
parent0f5a2601284237e2ba089389fd75d67f77626cef (diff)
downloadlwn-1e2ad28f80b4e155678259238f51edebc19e4014.tar.gz
lwn-1e2ad28f80b4e155678259238f51edebc19e4014.zip
perf, x86: Implement event scheduler helper functions
This patch introduces x86 perf scheduler code helper functions. We need this to later add more complex functionality to support overlapping counter constraints (next patch). The algorithm is modified so that the range of weight values is now generated from the constraints. There shouldn't be other functional changes. With the helper functions the scheduler is controlled. There are functions to initialize, traverse the event list, find unused counters etc. The scheduler keeps its own state. V3: * Added macro for_each_set_bit_cont(). * Changed functions interfaces of perf_sched_find_counter() and perf_sched_next_event() to use bool as return value. * Added some comments to make code better understandable. V4: * Fix broken event assignment if weight of the first event is not wmin (perf_sched_init()). Signed-off-by: Robert Richter <robert.richter@amd.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/r/1321616122-1533-2-git-send-email-robert.richter@amd.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/bitops.h')
-rw-r--r--include/linux/bitops.h10
1 files changed, 8 insertions, 2 deletions
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index a3ef66a2a083..3c1063acb2ab 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -22,8 +22,14 @@ extern unsigned long __sw_hweight64(__u64 w);
#include <asm/bitops.h>
#define for_each_set_bit(bit, addr, size) \
- for ((bit) = find_first_bit((addr), (size)); \
- (bit) < (size); \
+ for ((bit) = find_first_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_cont(bit, addr, size) \
+ for ((bit) = find_next_bit((addr), (size), (bit)); \
+ (bit) < (size); \
(bit) = find_next_bit((addr), (size), (bit) + 1))
static __inline__ int get_bitmask_order(unsigned int count)