rcuref: Plug slowpath race in rcuref_put()
Kernel test robot reported an "imbalanced put" in the rcuref_put() slow
path, which turned out to be a false positive. Consider the following race:
ref = 0 (via rcuref_init(ref, 1))
T1 T2
rcuref_put(ref)
-> atomic_add_negative_release(-1, ref) # ref -> 0xffffffff
-> rcuref_put_slowpath(ref)
rcuref_get(ref)
-> atomic_add_negative_relaxed(1, &ref->refcnt)
-> return true; # ref -> 0
rcuref_put(ref)
-> atomic_add_negative_release(-1, ref) # ref -> 0xffffffff
-> rcuref_put_slowpath()
-> cnt = atomic_read(&ref->refcnt); # cnt -> 0xffffffff / RCUREF_NOREF
-> atomic_try_cmpxchg_release(&ref->refcnt, &cnt, RCUREF_DEAD)) # ref -> 0xe0000000 / RCUREF_DEAD
-> return true
-> cnt = atomic_read(&ref->refcnt); # cnt -> 0xe0000000 / RCUREF_DEAD
-> if (cnt > RCUREF_RELEASED) # 0xe0000000 > 0xc0000000
-> WARN_ONCE(cnt >= RCUREF_RELEASED, "rcuref - imbalanced put()")
The problem is the additional read in the slow path (after it
decremented to RCUREF_NOREF) which can happen after the counter has been
marked RCUREF_DEAD.
Prevent this by reusing the return value of the decrement. Now every "final"
put uses RCUREF_NOREF in the slow path and attempts the final cmpxchg() to
RCUREF_DEAD.
[ bigeasy: Add changelog ]
Fixes: ee1ee6db07
("atomics: Provide rcuref - scalable reference counting")
Reported-by: kernel test robot <oliver.sang@intel.com>
Debugged-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: stable@vger.kernel.org
Closes: https://lore.kernel.org/oe-lkp/202412311453.9d7636a2-lkp@intel.com
This commit is contained in:
parent
5e0e02f0d7
commit
b9a4952067
2 changed files with 8 additions and 6 deletions
|
@ -71,27 +71,30 @@ static inline __must_check bool rcuref_get(rcuref_t *ref)
|
|||
return rcuref_get_slowpath(ref);
|
||||
}
|
||||
|
||||
extern __must_check bool rcuref_put_slowpath(rcuref_t *ref);
|
||||
extern __must_check bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt);
|
||||
|
||||
/*
|
||||
* Internal helper. Do not invoke directly.
|
||||
*/
|
||||
static __always_inline __must_check bool __rcuref_put(rcuref_t *ref)
|
||||
{
|
||||
int cnt;
|
||||
|
||||
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && preemptible(),
|
||||
"suspicious rcuref_put_rcusafe() usage");
|
||||
/*
|
||||
* Unconditionally decrease the reference count. The saturation and
|
||||
* dead zones provide enough tolerance for this.
|
||||
*/
|
||||
if (likely(!atomic_add_negative_release(-1, &ref->refcnt)))
|
||||
cnt = atomic_sub_return_release(1, &ref->refcnt);
|
||||
if (likely(cnt >= 0))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Handle the last reference drop and cases inside the saturation
|
||||
* and dead zones.
|
||||
*/
|
||||
return rcuref_put_slowpath(ref);
|
||||
return rcuref_put_slowpath(ref, cnt);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -220,6 +220,7 @@ EXPORT_SYMBOL_GPL(rcuref_get_slowpath);
|
|||
/**
|
||||
* rcuref_put_slowpath - Slowpath of __rcuref_put()
|
||||
* @ref: Pointer to the reference count
|
||||
* @cnt: The resulting value of the fastpath decrement
|
||||
*
|
||||
* Invoked when the reference count is outside of the valid zone.
|
||||
*
|
||||
|
@ -233,10 +234,8 @@ EXPORT_SYMBOL_GPL(rcuref_get_slowpath);
|
|||
* with a concurrent get()/put() pair. Caller is not allowed to
|
||||
* deconstruct the protected object.
|
||||
*/
|
||||
bool rcuref_put_slowpath(rcuref_t *ref)
|
||||
bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt)
|
||||
{
|
||||
unsigned int cnt = atomic_read(&ref->refcnt);
|
||||
|
||||
/* Did this drop the last reference? */
|
||||
if (likely(cnt == RCUREF_NOREF)) {
|
||||
/*
|
||||
|
|
Loading…
Add table
Reference in a new issue