cpumask: fix checking valid cpu range
The range of valid CPUs is [0, nr_cpu_ids). Some cpumask functions are passed with a shifted CPU index, and for them, the valid range is [-1, nr_cpu_ids-1). Currently for those functions, we check the index against [-1, nr_cpu_ids), which is wrong. Signed-off-by: Yury Norov <yury.norov@gmail.com>
This commit is contained in:
parent
8173aa2626
commit
78e5a33994
1 changed files with 8 additions and 11 deletions
|
@ -174,9 +174,8 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp)
|
||||||
static inline
|
static inline
|
||||||
unsigned int cpumask_next(int n, const struct cpumask *srcp)
|
unsigned int cpumask_next(int n, const struct cpumask *srcp)
|
||||||
{
|
{
|
||||||
/* -1 is a legal arg here. */
|
/* n is a prior cpu */
|
||||||
if (n != -1)
|
cpumask_check(n + 1);
|
||||||
cpumask_check(n);
|
|
||||||
return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
|
return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -189,9 +188,8 @@ unsigned int cpumask_next(int n, const struct cpumask *srcp)
|
||||||
*/
|
*/
|
||||||
static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
|
static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
|
||||||
{
|
{
|
||||||
/* -1 is a legal arg here. */
|
/* n is a prior cpu */
|
||||||
if (n != -1)
|
cpumask_check(n + 1);
|
||||||
cpumask_check(n);
|
|
||||||
return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
|
return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -231,9 +229,8 @@ static inline
|
||||||
unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
|
unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
|
||||||
const struct cpumask *src2p)
|
const struct cpumask *src2p)
|
||||||
{
|
{
|
||||||
/* -1 is a legal arg here. */
|
/* n is a prior cpu */
|
||||||
if (n != -1)
|
cpumask_check(n + 1);
|
||||||
cpumask_check(n);
|
|
||||||
return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
|
return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
|
||||||
nr_cpumask_bits, n + 1);
|
nr_cpumask_bits, n + 1);
|
||||||
}
|
}
|
||||||
|
@ -263,8 +260,8 @@ static inline
|
||||||
unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
|
unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
|
||||||
{
|
{
|
||||||
cpumask_check(start);
|
cpumask_check(start);
|
||||||
if (n != -1)
|
/* n is a prior cpu */
|
||||||
cpumask_check(n);
|
cpumask_check(n + 1);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return the first available CPU when wrapping, or when starting before cpu0,
|
* Return the first available CPU when wrapping, or when starting before cpu0,
|
||||||
|
|
Loading…
Add table
Reference in a new issue