summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-04-14 08:55:18 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2026-04-14 08:55:18 -0700
commita970ed18812d0cf5e1f54401403300bb35b36433 (patch)
treeaabcf39565be81d880127b71a32012314a6684ea /include/linux
parent5181afcdf99527dd92a88f80fc4d0d8013e1b510 (diff)
parent592a22338e5acfcd10983699cae8ea02ecd42935 (diff)
downloadlwn-a970ed18812d0cf5e1f54401403300bb35b36433.tar.gz
lwn-a970ed18812d0cf5e1f54401403300bb35b36433.zip
Merge tag 'bitmap-for-v7.1' of https://github.com/norov/linux
Pull bitmap updates from Yury Norov: - new API: bitmap_weight_from() and bitmap_weighted_xor() (Yury) - drop unused __find_nth_andnot_bit() (Yury) - new tests and test improvements (Andy, Akinobu, Yury) - fixes for count_zeroes API (Yury) - cleanup bitmap_print_to_pagebuf() mess (Yury) - documentation updates (Andy, Kai, Kit). * tag 'bitmap-for-v7.1' of https://github.com/norov/linux: (24 commits) bitops: Update kernel-doc for sign_extendXX() powerpc/xive: simplify xive_spapr_debug_show() thermal: intel: switch cpumask_get() to using cpumask_print_to_pagebuf() coresight: don't use bitmap_print_to_pagebuf() lib/prime_numbers: drop temporary buffer in dump_primes() drm/xe: switch xe_pagefault_queue_init() to using bitmap_weighted_or() ice: use bitmap_empty() in ice_vf_has_no_qs_ena ice: use bitmap_weighted_xor() in ice_find_free_recp_res_idx() bitmap: introduce bitmap_weighted_xor() bitmap: add test_zero_nbits() bitmap: exclude nbits == 0 cases from bitmap test bitmap: test bitmap_weight() for more asm-generic/bitops: Fix a comment typo in instrumented-atomic.h bitops: fix kernel-doc parameter name for parity8() lib: count_zeros: unify count_{leading,trailing}_zeros() lib: count_zeros: fix 32/64-bit inconsistency in count_trailing_zeros() lib: crypto: fix comments for count_leading_zeros() x86/topology: use bitmap_weight_from() bitmap: add bitmap_weight_from() lib/find_bit_benchmark: avoid clearing randomly filled bitmap in test_find_first_bit() ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bitmap.h48
-rw-r--r--include/linux/bitops.h12
-rw-r--r--include/linux/count_zeros.h13
-rw-r--r--include/linux/find.h2
4 files changed, 60 insertions, 15 deletions
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index b0395e4ccf90..b007d54a9036 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -46,6 +46,7 @@ struct device;
* bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
* bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
* bitmap_weighted_or(dst, src1, src2, nbits) *dst = *src1 | *src2. Returns Hamming Weight of dst
+ * bitmap_weighted_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2. Returns Hamming Weight of dst
* bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
* bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
* bitmap_complement(dst, src, nbits) *dst = ~(*src)
@@ -57,6 +58,7 @@ struct device;
* bitmap_weight(src, nbits) Hamming Weight: number set bits
* bitmap_weight_and(src1, src2, nbits) Hamming Weight of and'ed bitmap
* bitmap_weight_andnot(src1, src2, nbits) Hamming Weight of andnot'ed bitmap
+ * bitmap_weight_from(src, start, end) Hamming Weight starting from @start
* bitmap_set(dst, pos, nbits) Set specified bit area
* bitmap_clear(dst, pos, nbits) Clear specified bit area
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
@@ -168,6 +170,8 @@ void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
unsigned int __bitmap_weighted_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
+unsigned int __bitmap_weighted_xor(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
@@ -353,6 +357,18 @@ unsigned int bitmap_weighted_or(unsigned long *dst, const unsigned long *src1,
}
static __always_inline
+unsigned int bitmap_weighted_xor(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits)) {
+ *dst = *src1 ^ *src2;
+ return hweight_long(*dst & BITMAP_LAST_WORD_MASK(nbits));
+ } else {
+ return __bitmap_weighted_xor(dst, src1, src2, nbits);
+ }
+}
+
+static __always_inline
void bitmap_xor(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
@@ -479,6 +495,38 @@ unsigned long bitmap_weight_andnot(const unsigned long *src1,
return __bitmap_weight_andnot(src1, src2, nbits);
}
+/**
+ * bitmap_weight_from - Hamming weight for a memory region
+ * @bitmap: The base address
+ * @start: The bitnumber to starts weighting
+ * @end: the bitmap size in bits
+ *
+ * Returns the number of set bits in the region. If @start >= @end,
+ * return >= end.
+ */
+static __always_inline
+unsigned long bitmap_weight_from(const unsigned long *bitmap,
+ unsigned int start, unsigned int end)
+{
+ unsigned long w;
+
+ if (unlikely(start >= end))
+ return end;
+
+ if (small_const_nbits(end))
+ return hweight_long(*bitmap & GENMASK(end - 1, start));
+
+ bitmap += start / BITS_PER_LONG;
+ /* Opencode round_down() to not include math.h */
+ end -= start & ~(BITS_PER_LONG - 1);
+ start %= BITS_PER_LONG;
+ w = bitmap_weight(bitmap, end);
+ if (start)
+ w -= hweight_long(*bitmap & BITMAP_LAST_WORD_MASK(start));
+
+ return w;
+}
+
static __always_inline
void bitmap_set(unsigned long *map, unsigned int start, unsigned int nbits)
{
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index ea7898cc5903..657eab2725ce 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -179,9 +179,11 @@ static inline __u8 ror8(__u8 word, unsigned int shift)
/**
* sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
* @value: value to sign extend
- * @index: 0 based bit index (0<=index<32) to sign bit
+ * @index: 0 based bit index (0 <= index < 32) to sign bit
*
* This is safe to use for 16- and 8-bit types as well.
+ *
+ * Return: 32-bit sign extended value
*/
static __always_inline __s32 sign_extend32(__u32 value, int index)
{
@@ -192,7 +194,11 @@ static __always_inline __s32 sign_extend32(__u32 value, int index)
/**
* sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
* @value: value to sign extend
- * @index: 0 based bit index (0<=index<64) to sign bit
+ * @index: 0 based bit index (0 <= index < 64) to sign bit
+ *
+ * This is safe to use for 32-, 16- and 8-bit types as well.
+ *
+ * Return: 64-bit sign extended value
*/
static __always_inline __s64 sign_extend64(__u64 value, int index)
{
@@ -230,7 +236,7 @@ static inline int get_count_order_long(unsigned long l)
/**
* parity8 - get the parity of an u8 value
- * @value: the value to be examined
+ * @val: the value to be examined
*
* Determine the parity of the u8 argument.
*
diff --git a/include/linux/count_zeros.h b/include/linux/count_zeros.h
index 5b8ff5ac660d..b72ba3faa036 100644
--- a/include/linux/count_zeros.h
+++ b/include/linux/count_zeros.h
@@ -18,7 +18,7 @@
*
* If the MSB of @x is set, the result is 0.
* If only the LSB of @x is set, then the result is BITS_PER_LONG-1.
- * If @x is 0 then the result is COUNT_LEADING_ZEROS_0.
+ * If @x is 0 then the result is BITS_PER_LONG.
*/
static inline int count_leading_zeros(unsigned long x)
{
@@ -28,8 +28,6 @@ static inline int count_leading_zeros(unsigned long x)
return BITS_PER_LONG - fls64(x);
}
-#define COUNT_LEADING_ZEROS_0 BITS_PER_LONG
-
/**
* count_trailing_zeros - Count the number of zeros from the LSB forwards
* @x: The value
@@ -38,16 +36,11 @@ static inline int count_leading_zeros(unsigned long x)
*
* If the LSB of @x is set, the result is 0.
* If only the MSB of @x is set, then the result is BITS_PER_LONG-1.
- * If @x is 0 then the result is COUNT_TRAILING_ZEROS_0.
+ * If @x is 0 then the result is BITS_PER_LONG.
*/
static inline int count_trailing_zeros(unsigned long x)
{
-#define COUNT_TRAILING_ZEROS_0 (-1)
-
- if (sizeof(x) == 4)
- return ffs(x);
- else
- return (x != 0) ? __ffs(x) : COUNT_TRAILING_ZEROS_0;
+ return x ? __ffs(x) : BITS_PER_LONG;
}
#endif /* _LINUX_BITOPS_COUNT_ZEROS_H_ */
diff --git a/include/linux/find.h b/include/linux/find.h
index 9d720ad92bc1..6c2be8ca615d 100644
--- a/include/linux/find.h
+++ b/include/linux/find.h
@@ -22,8 +22,6 @@ extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long si
unsigned long __find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n);
unsigned long __find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long size, unsigned long n);
-unsigned long __find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
- unsigned long size, unsigned long n);
unsigned long __find_nth_and_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
const unsigned long *addr3, unsigned long size,
unsigned long n);