btrfs: simplify range parameters of btrfs_wait_ordered_roots()
The range is specified only in two ways, we can simplify the case for the whole filesystem range as a NULL block group parameter. Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
839d6ea4f8
commit
42317ab440
13 changed files with 38 additions and 28 deletions
|
@ -684,7 +684,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
|
|||
if (ret)
|
||||
btrfs_err(fs_info, "kobj add dev failed %d", ret);
|
||||
|
||||
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
|
||||
btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
|
||||
|
||||
/*
|
||||
* Commit dev_replace state and reserve 1 item for it.
|
||||
|
@ -880,7 +880,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
|
|||
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
|
||||
return ret;
|
||||
}
|
||||
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
|
||||
btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
|
||||
|
||||
/*
|
||||
* We have to use this loop approach because at this point src_device
|
||||
|
|
|
@ -4520,7 +4520,7 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
|
|||
* extents that haven't had their dirty pages IO start writeout yet
|
||||
* actually get run and error out properly.
|
||||
*/
|
||||
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
|
||||
btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
|
||||
}
|
||||
|
||||
static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
|
||||
|
|
|
@ -1070,7 +1070,7 @@ static noinline int btrfs_mksnapshot(const struct path *parent,
|
|||
atomic_inc(&root->snapshot_force_cow);
|
||||
snapshot_force_cow = true;
|
||||
|
||||
btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
|
||||
btrfs_wait_ordered_extents(root, U64_MAX, NULL);
|
||||
|
||||
ret = btrfs_mksubvol(parent, idmap, name, namelen,
|
||||
root, readonly, inherit);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "qgroup.h"
|
||||
#include "subpage.h"
|
||||
#include "file.h"
|
||||
#include "block-group.h"
|
||||
|
||||
static struct kmem_cache *btrfs_ordered_extent_cache;
|
||||
|
||||
|
@ -711,11 +712,11 @@ static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
|
|||
}
|
||||
|
||||
/*
|
||||
* wait for all the ordered extents in a root. This is done when balancing
|
||||
* space between drives.
|
||||
* Wait for all the ordered extents in a root. Use @bg as range or do whole
|
||||
* range if it's NULL.
|
||||
*/
|
||||
u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
|
||||
const u64 range_start, const u64 range_len)
|
||||
const struct btrfs_block_group *bg)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
LIST_HEAD(splice);
|
||||
|
@ -723,7 +724,17 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
|
|||
LIST_HEAD(works);
|
||||
struct btrfs_ordered_extent *ordered, *next;
|
||||
u64 count = 0;
|
||||
const u64 range_end = range_start + range_len;
|
||||
u64 range_start, range_len;
|
||||
u64 range_end;
|
||||
|
||||
if (bg) {
|
||||
range_start = bg->start;
|
||||
range_len = bg->length;
|
||||
} else {
|
||||
range_start = 0;
|
||||
range_len = U64_MAX;
|
||||
}
|
||||
range_end = range_start + range_len;
|
||||
|
||||
mutex_lock(&root->ordered_extent_mutex);
|
||||
spin_lock(&root->ordered_extent_lock);
|
||||
|
@ -770,8 +781,12 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
|
|||
return count;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for @nr ordered extents that intersect the @bg, or the whole range of
|
||||
* the filesystem if @bg is NULL.
|
||||
*/
|
||||
void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
|
||||
const u64 range_start, const u64 range_len)
|
||||
const struct btrfs_block_group *bg)
|
||||
{
|
||||
struct btrfs_root *root;
|
||||
LIST_HEAD(splice);
|
||||
|
@ -789,8 +804,7 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
|
|||
&fs_info->ordered_roots);
|
||||
spin_unlock(&fs_info->ordered_root_lock);
|
||||
|
||||
done = btrfs_wait_ordered_extents(root, nr,
|
||||
range_start, range_len);
|
||||
done = btrfs_wait_ordered_extents(root, nr, bg);
|
||||
btrfs_put_root(root);
|
||||
|
||||
spin_lock(&fs_info->ordered_root_lock);
|
||||
|
|
|
@ -193,9 +193,9 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
|
|||
void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
|
||||
struct list_head *list);
|
||||
u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
|
||||
const u64 range_start, const u64 range_len);
|
||||
const struct btrfs_block_group *bg);
|
||||
void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
|
||||
const u64 range_start, const u64 range_len);
|
||||
const struct btrfs_block_group *bg);
|
||||
void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
|
||||
u64 end,
|
||||
struct extent_state **cached_state);
|
||||
|
|
|
@ -1340,7 +1340,7 @@ static int flush_reservations(struct btrfs_fs_info *fs_info)
|
|||
ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
|
||||
btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
|
||||
trans = btrfs_join_transaction(fs_info->tree_root);
|
||||
if (IS_ERR(trans))
|
||||
return PTR_ERR(trans);
|
||||
|
@ -4208,7 +4208,7 @@ static int try_flush_qgroup(struct btrfs_root *root)
|
|||
ret = btrfs_start_delalloc_snapshot(root, true);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
|
||||
btrfs_wait_ordered_extents(root, U64_MAX, NULL);
|
||||
|
||||
trans = btrfs_attach_transaction_barrier(root);
|
||||
if (IS_ERR(trans)) {
|
||||
|
|
|
@ -4122,9 +4122,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
|
|||
|
||||
btrfs_wait_block_group_reservations(rc->block_group);
|
||||
btrfs_wait_nocow_writers(rc->block_group);
|
||||
btrfs_wait_ordered_roots(fs_info, U64_MAX,
|
||||
rc->block_group->start,
|
||||
rc->block_group->length);
|
||||
btrfs_wait_ordered_roots(fs_info, U64_MAX, rc->block_group);
|
||||
|
||||
ret = btrfs_zone_finish(rc->block_group);
|
||||
WARN_ON(ret && ret != -EAGAIN);
|
||||
|
|
|
@ -2448,7 +2448,7 @@ static int finish_extent_writes_for_zoned(struct btrfs_root *root,
|
|||
|
||||
btrfs_wait_block_group_reservations(cache);
|
||||
btrfs_wait_nocow_writers(cache);
|
||||
btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);
|
||||
btrfs_wait_ordered_roots(fs_info, U64_MAX, cache);
|
||||
|
||||
trans = btrfs_join_transaction(root);
|
||||
if (IS_ERR(trans))
|
||||
|
@ -2684,8 +2684,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
|
|||
*/
|
||||
if (sctx->is_dev_replace) {
|
||||
btrfs_wait_nocow_writers(cache);
|
||||
btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
|
||||
cache->length);
|
||||
btrfs_wait_ordered_roots(fs_info, U64_MAX, cache);
|
||||
}
|
||||
|
||||
scrub_pause_off(fs_info);
|
||||
|
|
|
@ -8046,7 +8046,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx)
|
|||
ret = btrfs_start_delalloc_snapshot(root, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
|
||||
btrfs_wait_ordered_extents(root, U64_MAX, NULL);
|
||||
}
|
||||
|
||||
for (i = 0; i < sctx->clone_roots_cnt; i++) {
|
||||
|
@ -8054,7 +8054,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx)
|
|||
ret = btrfs_start_delalloc_snapshot(root, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
|
||||
btrfs_wait_ordered_extents(root, U64_MAX, NULL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -704,7 +704,7 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
|
|||
skip_async:
|
||||
loops++;
|
||||
if (wait_ordered && !trans) {
|
||||
btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
|
||||
btrfs_wait_ordered_roots(fs_info, items, NULL);
|
||||
} else {
|
||||
time_left = schedule_timeout_killable(1);
|
||||
if (time_left)
|
||||
|
|
|
@ -983,7 +983,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
|
|||
return 0;
|
||||
}
|
||||
|
||||
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
|
||||
btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
|
||||
|
||||
trans = btrfs_attach_transaction_barrier(root);
|
||||
if (IS_ERR(trans)) {
|
||||
|
|
|
@ -2110,7 +2110,7 @@ static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
|
|||
static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
|
||||
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
|
||||
btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -2212,8 +2212,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
|
|||
/* Ensure all writes in this block group finish */
|
||||
btrfs_wait_block_group_reservations(block_group);
|
||||
/* No need to wait for NOCOW writers. Zoned mode does not allow that */
|
||||
btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start,
|
||||
block_group->length);
|
||||
btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group);
|
||||
/* Wait for extent buffers to be written. */
|
||||
if (is_metadata)
|
||||
wait_eb_writebacks(block_group);
|
||||
|
|
Loading…
Add table
Reference in a new issue