1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

xfs: remove xfs_buf_cache.bc_lock

xfs_buf_cache.bc_lock serializes adding buffers to and removing them from
the hashtable.  But as the rhashtable code already uses fine grained
internal locking for inserts and removals the extra protection isn't
actually required.

It also happens to fix a lock order inversion vs b_lock added by the
recent lookup race fix.

Fixes: ee10f6fcdb ("xfs: fix buffer lookup vs release race")
Reported-by: Lai, Yi <yi1.lai@linux.intel.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Carlos Maiolino <cmaiolino@redhat.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Carlos Maiolino <cem@kernel.org>
This commit is contained in:
Christoph Hellwig 2025-01-28 06:22:58 +01:00 committed by Carlos Maiolino
parent 26b63bee2f
commit a9ab28b3d2
2 changed files with 17 additions and 15 deletions

View file

@ -41,8 +41,7 @@ struct kmem_cache *xfs_buf_cache;
*
* xfs_buf_rele:
* b_lock
* pag_buf_lock
* lru_lock
* lru_lock
*
* xfs_buftarg_drain_rele
* lru_lock
@ -220,14 +219,21 @@ _xfs_buf_alloc(
*/
flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
spin_lock_init(&bp->b_lock);
/*
* A new buffer is held and locked by the owner. This ensures that the
* buffer is owned by the caller and racing RCU lookups right after
* inserting into the hash table are safe (and will have to wait for
* the unlock to do anything non-trivial).
*/
bp->b_hold = 1;
sema_init(&bp->b_sema, 0); /* held, no waiters */
spin_lock_init(&bp->b_lock);
atomic_set(&bp->b_lru_ref, 1);
init_completion(&bp->b_iowait);
INIT_LIST_HEAD(&bp->b_lru);
INIT_LIST_HEAD(&bp->b_list);
INIT_LIST_HEAD(&bp->b_li_list);
sema_init(&bp->b_sema, 0); /* held, no waiters */
bp->b_target = target;
bp->b_mount = target->bt_mount;
bp->b_flags = flags;
@ -497,7 +503,6 @@ int
xfs_buf_cache_init(
struct xfs_buf_cache *bch)
{
spin_lock_init(&bch->bc_lock);
return rhashtable_init(&bch->bc_hash, &xfs_buf_hash_params);
}
@ -647,17 +652,20 @@ xfs_buf_find_insert(
if (error)
goto out_free_buf;
spin_lock(&bch->bc_lock);
/* The new buffer keeps the perag reference until it is freed. */
new_bp->b_pag = pag;
rcu_read_lock();
bp = rhashtable_lookup_get_insert_fast(&bch->bc_hash,
&new_bp->b_rhash_head, xfs_buf_hash_params);
if (IS_ERR(bp)) {
rcu_read_unlock();
error = PTR_ERR(bp);
spin_unlock(&bch->bc_lock);
goto out_free_buf;
}
if (bp && xfs_buf_try_hold(bp)) {
/* found an existing buffer */
spin_unlock(&bch->bc_lock);
rcu_read_unlock();
error = xfs_buf_find_lock(bp, flags);
if (error)
xfs_buf_rele(bp);
@ -665,10 +673,8 @@ xfs_buf_find_insert(
*bpp = bp;
goto out_free_buf;
}
rcu_read_unlock();
/* The new buffer keeps the perag reference until it is freed. */
new_bp->b_pag = pag;
spin_unlock(&bch->bc_lock);
*bpp = new_bp;
return 0;
@ -1085,7 +1091,6 @@ xfs_buf_rele_cached(
}
/* we are asked to drop the last reference */
spin_lock(&bch->bc_lock);
__xfs_buf_ioacct_dec(bp);
if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
/*
@ -1097,7 +1102,6 @@ xfs_buf_rele_cached(
bp->b_state &= ~XFS_BSTATE_DISPOSE;
else
bp->b_hold--;
spin_unlock(&bch->bc_lock);
} else {
bp->b_hold--;
/*
@ -1115,7 +1119,6 @@ xfs_buf_rele_cached(
ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
rhashtable_remove_fast(&bch->bc_hash, &bp->b_rhash_head,
xfs_buf_hash_params);
spin_unlock(&bch->bc_lock);
if (pag)
xfs_perag_put(pag);
freebuf = true;

View file

@ -80,7 +80,6 @@ typedef unsigned int xfs_buf_flags_t;
#define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */
struct xfs_buf_cache {
spinlock_t bc_lock;
struct rhashtable bc_hash;
};