diff options
author | Yury Norov <yury.norov@gmail.com> | 2022-09-19 14:05:53 -0700 |
---|---|---|
committer | Yury Norov <yury.norov@gmail.com> | 2022-10-01 10:22:58 -0700 |
commit | 78e5a3399421ad79fc024e6d78e2deb7809d26af (patch) | |
tree | 300d0095c49837a9b1e74417dd58d75660c46730 /include/linux/cpumask.h | |
parent | 8173aa26260e6d0153db0c7135d41a4da612da5b (diff) | |
download | lwn-78e5a3399421ad79fc024e6d78e2deb7809d26af.tar.gz lwn-78e5a3399421ad79fc024e6d78e2deb7809d26af.zip |
cpumask: fix checking valid cpu range
The range of valid CPUs is [0, nr_cpu_ids). Some cpumask functions are
passed with a shifted CPU index, and for them, the valid range is
[-1, nr_cpu_ids-1). Currently for those functions, we check the index
against [-1, nr_cpu_ids), which is wrong.
Signed-off-by: Yury Norov <yury.norov@gmail.com>
Diffstat (limited to 'include/linux/cpumask.h')
-rw-r--r-- | include/linux/cpumask.h | 19 |
1 files changed, 8 insertions, 11 deletions
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 13b32dd9803b..286804bfe3b7 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -174,9 +174,8 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp) static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) { - /* -1 is a legal arg here. */ - if (n != -1) - cpumask_check(n); + /* n is a prior cpu */ + cpumask_check(n + 1); return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1); } @@ -189,9 +188,8 @@ unsigned int cpumask_next(int n, const struct cpumask *srcp) */ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) { - /* -1 is a legal arg here. */ - if (n != -1) - cpumask_check(n); + /* n is a prior cpu */ + cpumask_check(n + 1); return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); } @@ -231,9 +229,8 @@ static inline unsigned int cpumask_next_and(int n, const struct cpumask *src1p, const struct cpumask *src2p) { - /* -1 is a legal arg here. */ - if (n != -1) - cpumask_check(n); + /* n is a prior cpu */ + cpumask_check(n + 1); return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits, n + 1); } @@ -263,8 +260,8 @@ static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) { cpumask_check(start); - if (n != -1) - cpumask_check(n); + /* n is a prior cpu */ + cpumask_check(n + 1); /* * Return the first available CPU when wrapping, or when starting before cpu0, |