1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

Signed-off-by: Carlos Maiolino <cem@kernel.org>

-----BEGIN PGP SIGNATURE-----
 
 iJUEABMJAB0WIQSmtYVZ/MfVMGUq1GNcsMJ8RxYuYwUCZ8VyFAAKCRBcsMJ8RxYu
 Y4VkAYDeFyUEQUb38Z2w1nq4CPiSpgT48Wa5/ES3P2lrLz/ULtt9LB6lteBzpG4S
 La9ktFsBgPzW7pBdLyLkEFsHxlI+LePJ3E6tmP7B6xuFaPArnoSy1wATSNH0/dM9
 lBGZ33Xcng==
 =2fdY
 -----END PGP SIGNATURE-----

Merge tag 'xfs-fixes-6.14-rc6' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux

Pull xfs cleanups from Carlos Maiolino:
 "Just a few cleanups"

* tag 'xfs-fixes-6.14-rc6' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux:
  xfs: remove the XBF_STALE check from xfs_buf_rele_cached
  xfs: remove most in-flight buffer accounting
  xfs: decouple buffer readahead from the normal buffer read path
  xfs: reduce context switches for synchronous buffered I/O
This commit is contained in:
Linus Torvalds 2025-03-03 07:38:02 -10:00
commit d9a9c94dbc
7 changed files with 71 additions and 132 deletions

View file

@ -29,11 +29,6 @@ struct kmem_cache *xfs_buf_cache;
/*
* Locking orders
*
* xfs_buf_ioacct_inc:
* xfs_buf_ioacct_dec:
* b_sema (caller holds)
* b_lock
*
* xfs_buf_stale:
* b_sema (caller holds)
* b_lock
@ -81,51 +76,6 @@ xfs_buf_vmap_len(
return (bp->b_page_count * PAGE_SIZE);
}
/*
* Bump the I/O in flight count on the buftarg if we haven't yet done so for
* this buffer. The count is incremented once per buffer (per hold cycle)
* because the corresponding decrement is deferred to buffer release. Buffers
* can undergo I/O multiple times in a hold-release cycle and per buffer I/O
* tracking adds unnecessary overhead. This is used for sychronization purposes
* with unmount (see xfs_buftarg_drain()), so all we really need is a count of
* in-flight buffers.
*
* Buffers that are never released (e.g., superblock, iclog buffers) must set
* the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count
* never reaches zero and unmount hangs indefinitely.
*/
static inline void
xfs_buf_ioacct_inc(
struct xfs_buf *bp)
{
if (bp->b_flags & XBF_NO_IOACCT)
return;
ASSERT(bp->b_flags & XBF_ASYNC);
spin_lock(&bp->b_lock);
if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
bp->b_state |= XFS_BSTATE_IN_FLIGHT;
percpu_counter_inc(&bp->b_target->bt_io_count);
}
spin_unlock(&bp->b_lock);
}
/*
* Clear the in-flight state on a buffer about to be released to the LRU or
* freed and unaccount from the buftarg.
*/
static inline void
__xfs_buf_ioacct_dec(
struct xfs_buf *bp)
{
lockdep_assert_held(&bp->b_lock);
if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
percpu_counter_dec(&bp->b_target->bt_io_count);
}
}
/*
* When we mark a buffer stale, we remove the buffer from the LRU and clear the
* b_lru_ref count so that the buffer is freed immediately when the buffer
@ -149,15 +99,7 @@ xfs_buf_stale(
*/
bp->b_flags &= ~_XBF_DELWRI_Q;
/*
* Once the buffer is marked stale and unlocked, a subsequent lookup
* could reset b_flags. There is no guarantee that the buffer is
* unaccounted (released to LRU) before that occurs. Drop in-flight
* status now to preserve accounting consistency.
*/
spin_lock(&bp->b_lock);
__xfs_buf_ioacct_dec(bp);
atomic_set(&bp->b_lru_ref, 0);
if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
(list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru)))
@ -794,18 +736,13 @@ out_put_perag:
int
_xfs_buf_read(
struct xfs_buf *bp,
xfs_buf_flags_t flags)
struct xfs_buf *bp)
{
ASSERT(!(flags & XBF_WRITE));
ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD | XBF_DONE);
bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
bp->b_flags |= XBF_READ;
xfs_buf_submit(bp);
if (flags & XBF_ASYNC)
return 0;
return xfs_buf_iowait(bp);
}
@ -857,6 +794,8 @@ xfs_buf_read_map(
struct xfs_buf *bp;
int error;
ASSERT(!(flags & (XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD)));
flags |= XBF_READ;
*bpp = NULL;
@ -870,21 +809,11 @@ xfs_buf_read_map(
/* Initiate the buffer read and wait. */
XFS_STATS_INC(target->bt_mount, xb_get_read);
bp->b_ops = ops;
error = _xfs_buf_read(bp, flags);
/* Readahead iodone already dropped the buffer, so exit. */
if (flags & XBF_ASYNC)
return 0;
error = _xfs_buf_read(bp);
} else {
/* Buffer already read; all we need to do is check it. */
error = xfs_buf_reverify(bp, ops);
/* Readahead already finished; drop the buffer and exit. */
if (flags & XBF_ASYNC) {
xfs_buf_relse(bp);
return 0;
}
/* We do not want read in the flags */
bp->b_flags &= ~XBF_READ;
ASSERT(bp->b_ops != NULL || ops == NULL);
@ -936,6 +865,7 @@ xfs_buf_readahead_map(
int nmaps,
const struct xfs_buf_ops *ops)
{
const xfs_buf_flags_t flags = XBF_READ | XBF_ASYNC | XBF_READ_AHEAD;
struct xfs_buf *bp;
/*
@ -945,9 +875,21 @@ xfs_buf_readahead_map(
if (xfs_buftarg_is_mem(target))
return;
xfs_buf_read_map(target, map, nmaps,
XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD, &bp, ops,
__this_address);
if (xfs_buf_get_map(target, map, nmaps, flags | XBF_TRYLOCK, &bp))
return;
trace_xfs_buf_readahead(bp, 0, _RET_IP_);
if (bp->b_flags & XBF_DONE) {
xfs_buf_reverify(bp, ops);
xfs_buf_relse(bp);
return;
}
XFS_STATS_INC(target->bt_mount, xb_get_read);
bp->b_ops = ops;
bp->b_flags &= ~(XBF_WRITE | XBF_DONE);
bp->b_flags |= flags;
percpu_counter_inc(&target->bt_readahead_count);
xfs_buf_submit(bp);
}
/*
@ -1003,10 +945,12 @@ xfs_buf_get_uncached(
struct xfs_buf *bp;
DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
/* there are currently no valid flags for xfs_buf_get_uncached */
ASSERT(flags == 0);
*bpp = NULL;
/* flags might contain irrelevant bits, pass only what we care about */
error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp);
error = _xfs_buf_alloc(target, &map, 1, flags, &bp);
if (error)
return error;
@ -1060,7 +1004,6 @@ xfs_buf_rele_uncached(
spin_unlock(&bp->b_lock);
return;
}
__xfs_buf_ioacct_dec(bp);
spin_unlock(&bp->b_lock);
xfs_buf_free(bp);
}
@ -1079,20 +1022,12 @@ xfs_buf_rele_cached(
spin_lock(&bp->b_lock);
ASSERT(bp->b_hold >= 1);
if (bp->b_hold > 1) {
/*
* Drop the in-flight state if the buffer is already on the LRU
* and it holds the only reference. This is racy because we
* haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT
* ensures the decrement occurs only once per-buf.
*/
if (--bp->b_hold == 1 && !list_empty(&bp->b_lru))
__xfs_buf_ioacct_dec(bp);
bp->b_hold--;
goto out_unlock;
}
/* we are asked to drop the last reference */
__xfs_buf_ioacct_dec(bp);
if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
if (atomic_read(&bp->b_lru_ref)) {
/*
* If the buffer is added to the LRU, keep the reference to the
* buffer for the LRU and clear the (now stale) dispose list
@ -1345,6 +1280,7 @@ xfs_buf_ioend_handle_error(
resubmit:
xfs_buf_ioerror(bp, 0);
bp->b_flags |= (XBF_DONE | XBF_WRITE_FAIL);
reinit_completion(&bp->b_iowait);
xfs_buf_submit(bp);
return true;
out_stale:
@ -1355,8 +1291,9 @@ out_stale:
return false;
}
static void
xfs_buf_ioend(
/* returns false if the caller needs to resubmit the I/O, else true */
static bool
__xfs_buf_ioend(
struct xfs_buf *bp)
{
trace_xfs_buf_iodone(bp, _RET_IP_);
@ -1369,6 +1306,8 @@ xfs_buf_ioend(
bp->b_ops->verify_read(bp);
if (!bp->b_error)
bp->b_flags |= XBF_DONE;
if (bp->b_flags & XBF_READ_AHEAD)
percpu_counter_dec(&bp->b_target->bt_readahead_count);
} else {
if (!bp->b_error) {
bp->b_flags &= ~XBF_WRITE_FAIL;
@ -1376,7 +1315,7 @@ xfs_buf_ioend(
}
if (unlikely(bp->b_error) && xfs_buf_ioend_handle_error(bp))
return;
return false;
/* clear the retry state */
bp->b_last_error = 0;
@ -1397,7 +1336,15 @@ xfs_buf_ioend(
bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD |
_XBF_LOGRECOVERY);
return true;
}
static void
xfs_buf_ioend(
struct xfs_buf *bp)
{
if (!__xfs_buf_ioend(bp))
return;
if (bp->b_flags & XBF_ASYNC)
xfs_buf_relse(bp);
else
@ -1411,15 +1358,8 @@ xfs_buf_ioend_work(
struct xfs_buf *bp =
container_of(work, struct xfs_buf, b_ioend_work);
xfs_buf_ioend(bp);
}
static void
xfs_buf_ioend_async(
struct xfs_buf *bp)
{
INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work);
if (__xfs_buf_ioend(bp))
xfs_buf_relse(bp);
}
void
@ -1491,7 +1431,13 @@ xfs_buf_bio_end_io(
XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR))
xfs_buf_ioerror(bp, -EIO);
xfs_buf_ioend_async(bp);
if (bp->b_flags & XBF_ASYNC) {
INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work);
} else {
complete(&bp->b_iowait);
}
bio_put(bio);
}
@ -1568,9 +1514,11 @@ xfs_buf_iowait(
{
ASSERT(!(bp->b_flags & XBF_ASYNC));
trace_xfs_buf_iowait(bp, _RET_IP_);
wait_for_completion(&bp->b_iowait);
trace_xfs_buf_iowait_done(bp, _RET_IP_);
do {
trace_xfs_buf_iowait(bp, _RET_IP_);
wait_for_completion(&bp->b_iowait);
trace_xfs_buf_iowait_done(bp, _RET_IP_);
} while (!__xfs_buf_ioend(bp));
return bp->b_error;
}
@ -1648,9 +1596,6 @@ xfs_buf_submit(
*/
bp->b_error = 0;
if (bp->b_flags & XBF_ASYNC)
xfs_buf_ioacct_inc(bp);
if ((bp->b_flags & XBF_WRITE) && !xfs_buf_verify_write(bp)) {
xfs_force_shutdown(bp->b_mount, SHUTDOWN_CORRUPT_INCORE);
xfs_buf_ioend(bp);
@ -1776,9 +1721,8 @@ xfs_buftarg_wait(
struct xfs_buftarg *btp)
{
/*
* First wait on the buftarg I/O count for all in-flight buffers to be
* released. This is critical as new buffers do not make the LRU until
* they are released.
* First wait for all in-flight readahead buffers to be released. This is
* critical as new buffers do not make the LRU until they are released.
*
* Next, flush the buffer workqueue to ensure all completion processing
* has finished. Just waiting on buffer locks is not sufficient for
@ -1787,7 +1731,7 @@ xfs_buftarg_wait(
* all reference counts have been dropped before we start walking the
* LRU list.
*/
while (percpu_counter_sum(&btp->bt_io_count))
while (percpu_counter_sum(&btp->bt_readahead_count))
delay(100);
flush_workqueue(btp->bt_mount->m_buf_workqueue);
}
@ -1904,8 +1848,8 @@ xfs_destroy_buftarg(
struct xfs_buftarg *btp)
{
shrinker_free(btp->bt_shrinker);
ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
percpu_counter_destroy(&btp->bt_io_count);
ASSERT(percpu_counter_sum(&btp->bt_readahead_count) == 0);
percpu_counter_destroy(&btp->bt_readahead_count);
list_lru_destroy(&btp->bt_lru);
}
@ -1959,7 +1903,7 @@ xfs_init_buftarg(
if (list_lru_init(&btp->bt_lru))
return -ENOMEM;
if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
if (percpu_counter_init(&btp->bt_readahead_count, 0, GFP_KERNEL))
goto out_destroy_lru;
btp->bt_shrinker =
@ -1973,7 +1917,7 @@ xfs_init_buftarg(
return 0;
out_destroy_io_count:
percpu_counter_destroy(&btp->bt_io_count);
percpu_counter_destroy(&btp->bt_readahead_count);
out_destroy_lru:
list_lru_destroy(&btp->bt_lru);
return -ENOMEM;

View file

@ -27,7 +27,6 @@ struct xfs_buf;
#define XBF_READ (1u << 0) /* buffer intended for reading from device */
#define XBF_WRITE (1u << 1) /* buffer intended for writing to device */
#define XBF_READ_AHEAD (1u << 2) /* asynchronous read-ahead */
#define XBF_NO_IOACCT (1u << 3) /* bypass I/O accounting (non-LRU bufs) */
#define XBF_ASYNC (1u << 4) /* initiator will not wait for completion */
#define XBF_DONE (1u << 5) /* all pages in the buffer uptodate */
#define XBF_STALE (1u << 6) /* buffer has been staled, do not find it */
@ -58,7 +57,6 @@ typedef unsigned int xfs_buf_flags_t;
{ XBF_READ, "READ" }, \
{ XBF_WRITE, "WRITE" }, \
{ XBF_READ_AHEAD, "READ_AHEAD" }, \
{ XBF_NO_IOACCT, "NO_IOACCT" }, \
{ XBF_ASYNC, "ASYNC" }, \
{ XBF_DONE, "DONE" }, \
{ XBF_STALE, "STALE" }, \
@ -77,7 +75,6 @@ typedef unsigned int xfs_buf_flags_t;
* Internal state flags.
*/
#define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */
#define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */
struct xfs_buf_cache {
struct rhashtable bc_hash;
@ -116,7 +113,7 @@ struct xfs_buftarg {
struct shrinker *bt_shrinker;
struct list_lru bt_lru;
struct percpu_counter bt_io_count;
struct percpu_counter bt_readahead_count;
struct ratelimit_state bt_ioerror_rl;
/* Atomic write unit values */
@ -291,7 +288,7 @@ int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
size_t numblks, xfs_buf_flags_t flags, struct xfs_buf **bpp,
const struct xfs_buf_ops *ops);
int _xfs_buf_read(struct xfs_buf *bp, xfs_buf_flags_t flags);
int _xfs_buf_read(struct xfs_buf *bp);
void xfs_buf_hold(struct xfs_buf *bp);
/* Releasing Buffers */

View file

@ -117,7 +117,7 @@ xmbuf_free(
struct xfs_buftarg *btp)
{
ASSERT(xfs_buftarg_is_mem(btp));
ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
ASSERT(percpu_counter_sum(&btp->bt_readahead_count) == 0);
trace_xmbuf_free(btp);

View file

@ -3380,7 +3380,7 @@ xlog_do_recover(
*/
xfs_buf_lock(bp);
xfs_buf_hold(bp);
error = _xfs_buf_read(bp, XBF_READ);
error = _xfs_buf_read(bp);
if (error) {
if (!xlog_is_shutdown(log)) {
xfs_buf_ioerror_alert(bp, __this_address);

View file

@ -181,14 +181,11 @@ xfs_readsb(
/*
* Allocate a (locked) buffer to hold the superblock. This will be kept
* around at all times to optimize access to the superblock. Therefore,
* set XBF_NO_IOACCT to make sure it doesn't hold the buftarg count
* elevated.
* around at all times to optimize access to the superblock.
*/
reread:
error = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
BTOBB(sector_size), XBF_NO_IOACCT, &bp,
buf_ops);
BTOBB(sector_size), 0, &bp, buf_ops);
if (error) {
if (loud)
xfs_warn(mp, "SB validate failed with error %d.", error);

View file

@ -1407,7 +1407,7 @@ xfs_rtmount_readsb(
/* m_blkbb_log is not set up yet */
error = xfs_buf_read_uncached(mp->m_rtdev_targp, XFS_RTSB_DADDR,
mp->m_sb.sb_blocksize >> BBSHIFT, XBF_NO_IOACCT, &bp,
mp->m_sb.sb_blocksize >> BBSHIFT, 0, &bp,
&xfs_rtsb_buf_ops);
if (error) {
xfs_warn(mp, "rt sb validate failed with error %d.", error);

View file

@ -593,6 +593,7 @@ DEFINE_EVENT(xfs_buf_flags_class, name, \
DEFINE_BUF_FLAGS_EVENT(xfs_buf_find);
DEFINE_BUF_FLAGS_EVENT(xfs_buf_get);
DEFINE_BUF_FLAGS_EVENT(xfs_buf_read);
DEFINE_BUF_FLAGS_EVENT(xfs_buf_readahead);
TRACE_EVENT(xfs_buf_ioerror,
TP_PROTO(struct xfs_buf *bp, int error, xfs_failaddr_t caller_ip),