xfs bug fixes 6.14-rc2
Signed-off-by: Carlos Maiolino <cem@kernel.org> -----BEGIN PGP SIGNATURE----- iJUEABMJAB0WIQSmtYVZ/MfVMGUq1GNcsMJ8RxYuYwUCZ6C1iQAKCRBcsMJ8RxYu Y1k+AX9qlNEvvzfcJRP7wcqjX64B0IIpuBNd86flWIYcRUug9d/vIExKvRF2A+t7 x1kc5bsBgONUBNY+xDTMAXU58MnONDIWoy9oeP/WXGaXWelN5TKlVDhHU30gXNT1 CoDNOQ/B1w== =pduU -----END PGP SIGNATURE----- Merge tag 'xfs-fixes-6.14-rc2' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux Pull xfs bug fixes from Carlos Maiolino: "A few fixes for XFS, but the most notable one is: - xfs: remove xfs_buf_cache.bc_lock which has been hit by different persons including syzbot" * tag 'xfs-fixes-6.14-rc2' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux: xfs: remove xfs_buf_cache.bc_lock xfs: Add error handling for xfs_reflink_cancel_cow_range xfs: Propagate errors from xfs_reflink_cancel_cow_range in xfs_dax_write_iomap_end xfs: don't call remap_verify_area with sb write protection held xfs: remove an out of data comment in _xfs_buf_alloc xfs: fix the entry condition of exact EOF block allocation optimization
This commit is contained in:
commit
0a08238acf
6 changed files with 58 additions and 76 deletions
|
@ -3563,12 +3563,12 @@ xfs_bmap_btalloc_at_eof(
|
|||
int error;
|
||||
|
||||
/*
|
||||
* If there are already extents in the file, try an exact EOF block
|
||||
* allocation to extend the file as a contiguous extent. If that fails,
|
||||
* or it's the first allocation in a file, just try for a stripe aligned
|
||||
* allocation.
|
||||
* If there are already extents in the file, and xfs_bmap_adjacent() has
|
||||
* given a better blkno, try an exact EOF block allocation to extend the
|
||||
* file as a contiguous extent. If that fails, or it's the first
|
||||
* allocation in a file, just try for a stripe aligned allocation.
|
||||
*/
|
||||
if (ap->offset) {
|
||||
if (ap->eof) {
|
||||
xfs_extlen_t nextminlen = 0;
|
||||
|
||||
/*
|
||||
|
@ -3736,7 +3736,8 @@ xfs_bmap_btalloc_best_length(
|
|||
int error;
|
||||
|
||||
ap->blkno = XFS_INO_TO_FSB(args->mp, ap->ip->i_ino);
|
||||
xfs_bmap_adjacent(ap);
|
||||
if (!xfs_bmap_adjacent(ap))
|
||||
ap->eof = false;
|
||||
|
||||
/*
|
||||
* Search for an allocation group with a single extent large enough for
|
||||
|
|
|
@ -41,8 +41,7 @@ struct kmem_cache *xfs_buf_cache;
|
|||
*
|
||||
* xfs_buf_rele:
|
||||
* b_lock
|
||||
* pag_buf_lock
|
||||
* lru_lock
|
||||
* lru_lock
|
||||
*
|
||||
* xfs_buftarg_drain_rele
|
||||
* lru_lock
|
||||
|
@ -220,23 +219,25 @@ _xfs_buf_alloc(
|
|||
*/
|
||||
flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
|
||||
|
||||
spin_lock_init(&bp->b_lock);
|
||||
/*
|
||||
* A new buffer is held and locked by the owner. This ensures that the
|
||||
* buffer is owned by the caller and racing RCU lookups right after
|
||||
* inserting into the hash table are safe (and will have to wait for
|
||||
* the unlock to do anything non-trivial).
|
||||
*/
|
||||
bp->b_hold = 1;
|
||||
sema_init(&bp->b_sema, 0); /* held, no waiters */
|
||||
|
||||
spin_lock_init(&bp->b_lock);
|
||||
atomic_set(&bp->b_lru_ref, 1);
|
||||
init_completion(&bp->b_iowait);
|
||||
INIT_LIST_HEAD(&bp->b_lru);
|
||||
INIT_LIST_HEAD(&bp->b_list);
|
||||
INIT_LIST_HEAD(&bp->b_li_list);
|
||||
sema_init(&bp->b_sema, 0); /* held, no waiters */
|
||||
bp->b_target = target;
|
||||
bp->b_mount = target->bt_mount;
|
||||
bp->b_flags = flags;
|
||||
|
||||
/*
|
||||
* Set length and io_length to the same value initially.
|
||||
* I/O routines should use io_length, which will be the same in
|
||||
* most cases but may be reset (e.g. XFS recovery).
|
||||
*/
|
||||
error = xfs_buf_get_maps(bp, nmaps);
|
||||
if (error) {
|
||||
kmem_cache_free(xfs_buf_cache, bp);
|
||||
|
@ -502,7 +503,6 @@ int
|
|||
xfs_buf_cache_init(
|
||||
struct xfs_buf_cache *bch)
|
||||
{
|
||||
spin_lock_init(&bch->bc_lock);
|
||||
return rhashtable_init(&bch->bc_hash, &xfs_buf_hash_params);
|
||||
}
|
||||
|
||||
|
@ -652,17 +652,20 @@ xfs_buf_find_insert(
|
|||
if (error)
|
||||
goto out_free_buf;
|
||||
|
||||
spin_lock(&bch->bc_lock);
|
||||
/* The new buffer keeps the perag reference until it is freed. */
|
||||
new_bp->b_pag = pag;
|
||||
|
||||
rcu_read_lock();
|
||||
bp = rhashtable_lookup_get_insert_fast(&bch->bc_hash,
|
||||
&new_bp->b_rhash_head, xfs_buf_hash_params);
|
||||
if (IS_ERR(bp)) {
|
||||
rcu_read_unlock();
|
||||
error = PTR_ERR(bp);
|
||||
spin_unlock(&bch->bc_lock);
|
||||
goto out_free_buf;
|
||||
}
|
||||
if (bp && xfs_buf_try_hold(bp)) {
|
||||
/* found an existing buffer */
|
||||
spin_unlock(&bch->bc_lock);
|
||||
rcu_read_unlock();
|
||||
error = xfs_buf_find_lock(bp, flags);
|
||||
if (error)
|
||||
xfs_buf_rele(bp);
|
||||
|
@ -670,10 +673,8 @@ xfs_buf_find_insert(
|
|||
*bpp = bp;
|
||||
goto out_free_buf;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
/* The new buffer keeps the perag reference until it is freed. */
|
||||
new_bp->b_pag = pag;
|
||||
spin_unlock(&bch->bc_lock);
|
||||
*bpp = new_bp;
|
||||
return 0;
|
||||
|
||||
|
@ -1090,7 +1091,6 @@ xfs_buf_rele_cached(
|
|||
}
|
||||
|
||||
/* we are asked to drop the last reference */
|
||||
spin_lock(&bch->bc_lock);
|
||||
__xfs_buf_ioacct_dec(bp);
|
||||
if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
|
||||
/*
|
||||
|
@ -1102,7 +1102,6 @@ xfs_buf_rele_cached(
|
|||
bp->b_state &= ~XFS_BSTATE_DISPOSE;
|
||||
else
|
||||
bp->b_hold--;
|
||||
spin_unlock(&bch->bc_lock);
|
||||
} else {
|
||||
bp->b_hold--;
|
||||
/*
|
||||
|
@ -1120,7 +1119,6 @@ xfs_buf_rele_cached(
|
|||
ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
|
||||
rhashtable_remove_fast(&bch->bc_hash, &bp->b_rhash_head,
|
||||
xfs_buf_hash_params);
|
||||
spin_unlock(&bch->bc_lock);
|
||||
if (pag)
|
||||
xfs_perag_put(pag);
|
||||
freebuf = true;
|
||||
|
|
|
@ -80,7 +80,6 @@ typedef unsigned int xfs_buf_flags_t;
|
|||
#define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */
|
||||
|
||||
struct xfs_buf_cache {
|
||||
spinlock_t bc_lock;
|
||||
struct rhashtable bc_hash;
|
||||
};
|
||||
|
||||
|
|
|
@ -329,22 +329,6 @@ out_trans_cancel:
|
|||
* successfully but before locks are dropped.
|
||||
*/
|
||||
|
||||
/* Verify that we have security clearance to perform this operation. */
|
||||
static int
|
||||
xfs_exchange_range_verify_area(
|
||||
struct xfs_exchrange *fxr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = remap_verify_area(fxr->file1, fxr->file1_offset, fxr->length,
|
||||
true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return remap_verify_area(fxr->file2, fxr->file2_offset, fxr->length,
|
||||
true);
|
||||
}
|
||||
|
||||
/*
|
||||
* Performs necessary checks before doing a range exchange, having stabilized
|
||||
* mutable inode attributes via i_rwsem.
|
||||
|
@ -355,11 +339,13 @@ xfs_exchange_range_checks(
|
|||
unsigned int alloc_unit)
|
||||
{
|
||||
struct inode *inode1 = file_inode(fxr->file1);
|
||||
loff_t size1 = i_size_read(inode1);
|
||||
struct inode *inode2 = file_inode(fxr->file2);
|
||||
loff_t size2 = i_size_read(inode2);
|
||||
uint64_t allocmask = alloc_unit - 1;
|
||||
int64_t test_len;
|
||||
uint64_t blen;
|
||||
loff_t size1, size2, tmp;
|
||||
loff_t tmp;
|
||||
int error;
|
||||
|
||||
/* Don't touch certain kinds of inodes */
|
||||
|
@ -368,24 +354,25 @@ xfs_exchange_range_checks(
|
|||
if (IS_SWAPFILE(inode1) || IS_SWAPFILE(inode2))
|
||||
return -ETXTBSY;
|
||||
|
||||
size1 = i_size_read(inode1);
|
||||
size2 = i_size_read(inode2);
|
||||
|
||||
/* Ranges cannot start after EOF. */
|
||||
if (fxr->file1_offset > size1 || fxr->file2_offset > size2)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* If the caller said to exchange to EOF, we set the length of the
|
||||
* request large enough to cover everything to the end of both files.
|
||||
*/
|
||||
if (fxr->flags & XFS_EXCHANGE_RANGE_TO_EOF) {
|
||||
/*
|
||||
* If the caller said to exchange to EOF, we set the length of
|
||||
* the request large enough to cover everything to the end of
|
||||
* both files.
|
||||
*/
|
||||
fxr->length = max_t(int64_t, size1 - fxr->file1_offset,
|
||||
size2 - fxr->file2_offset);
|
||||
|
||||
error = xfs_exchange_range_verify_area(fxr);
|
||||
if (error)
|
||||
return error;
|
||||
} else {
|
||||
/*
|
||||
* Otherwise we require both ranges to end within EOF.
|
||||
*/
|
||||
if (fxr->file1_offset + fxr->length > size1 ||
|
||||
fxr->file2_offset + fxr->length > size2)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -401,15 +388,6 @@ xfs_exchange_range_checks(
|
|||
check_add_overflow(fxr->file2_offset, fxr->length, &tmp))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* We require both ranges to end within EOF, unless we're exchanging
|
||||
* to EOF.
|
||||
*/
|
||||
if (!(fxr->flags & XFS_EXCHANGE_RANGE_TO_EOF) &&
|
||||
(fxr->file1_offset + fxr->length > size1 ||
|
||||
fxr->file2_offset + fxr->length > size2))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Make sure we don't hit any file size limits. If we hit any size
|
||||
* limits such that test_length was adjusted, we abort the whole
|
||||
|
@ -747,6 +725,7 @@ xfs_exchange_range(
|
|||
{
|
||||
struct inode *inode1 = file_inode(fxr->file1);
|
||||
struct inode *inode2 = file_inode(fxr->file2);
|
||||
loff_t check_len = fxr->length;
|
||||
int ret;
|
||||
|
||||
BUILD_BUG_ON(XFS_EXCHANGE_RANGE_ALL_FLAGS &
|
||||
|
@ -779,14 +758,18 @@ xfs_exchange_range(
|
|||
return -EBADF;
|
||||
|
||||
/*
|
||||
* If we're not exchanging to EOF, we can check the areas before
|
||||
* stabilizing both files' i_size.
|
||||
* If we're exchanging to EOF we can't calculate the length until taking
|
||||
* the iolock. Pass a 0 length to remap_verify_area similar to the
|
||||
* FICLONE and FICLONERANGE ioctls that support cloning to EOF as well.
|
||||
*/
|
||||
if (!(fxr->flags & XFS_EXCHANGE_RANGE_TO_EOF)) {
|
||||
ret = xfs_exchange_range_verify_area(fxr);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
if (fxr->flags & XFS_EXCHANGE_RANGE_TO_EOF)
|
||||
check_len = 0;
|
||||
ret = remap_verify_area(fxr->file1, fxr->file1_offset, check_len, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = remap_verify_area(fxr->file2, fxr->file2_offset, check_len, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Update cmtime if the fd/inode don't forbid it. */
|
||||
if (!(fxr->file1->f_mode & FMODE_NOCMTIME) && !IS_NOCMTIME(inode1))
|
||||
|
|
|
@ -1404,8 +1404,11 @@ xfs_inactive(
|
|||
goto out;
|
||||
|
||||
/* Try to clean out the cow blocks if there are any. */
|
||||
if (xfs_inode_has_cow_data(ip))
|
||||
xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
|
||||
if (xfs_inode_has_cow_data(ip)) {
|
||||
error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
|
||||
if (error)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (VFS_I(ip)->i_nlink != 0) {
|
||||
/*
|
||||
|
|
|
@ -976,10 +976,8 @@ xfs_dax_write_iomap_end(
|
|||
if (!xfs_is_cow_inode(ip))
|
||||
return 0;
|
||||
|
||||
if (!written) {
|
||||
xfs_reflink_cancel_cow_range(ip, pos, length, true);
|
||||
return 0;
|
||||
}
|
||||
if (!written)
|
||||
return xfs_reflink_cancel_cow_range(ip, pos, length, true);
|
||||
|
||||
return xfs_reflink_end_cow(ip, pos, written);
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue