for-6.14-rc2-tag
-----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE8rQSAMVO+zA4DBdWxWXV+ddtWDsFAmeuSwwACgkQxWXV+ddt WDtQ8Q//fsTAu1DLjeVrzMhVNswGwr3PgWzLk3PqWTDEG9UeR/jJYntIPglVyhhP Mp2E3CYe2rlWwK0K3PITDu179tLrnCvbfKEPwWvyVZw5D0EDjPQYs9/H5ztSE8O4 4i3kv2LjlXHE3h62tjNoeHL4NK1SRJcFeH69XhhIe0ELvTQVarvfJupZwdQQivWg sDlQXklXxl1kEtHVGnmz6jd09a0vti7xw8MAG6QiIP83Hvt6Ie+NLfTfTCkRIWSK 95mPM+1YhmLQe15sD8xjHyYmH5E0cEXQh1Pvlz6xqQWRvZERG8Pmj+iwFTLaw4iA JR6sN2/KFgXE9OIGbFqQ+dvm++2hWcnPwW+h6EdOSj0DQkupbJm4VeBK0WQ4YZ+x Q0OQXPTfGpcjp7KyJrT6EZFq5VxeEfOz4hozhiCSTs+Xpx7Oh/2THL01N/dUMn0C SNR9E4/Rlq7rWV7euGwicwo/tZZIdCr4ihUGk4jpamlUbIXj+2SrOc4cpQdypmsO DeYvwzIXnPe8/Eo3rZ5ej0DK7GxfEFyd6v6l0oS6HepvMJ6y6/eiOYteVbGpvhXv J2M6PLstiZc152VHPApN9+ZlXBeGjyMfxLcsweblpSBBt/57otY6cMhqNuIp0j9B 0zP3KKOwrIJ8tzcwjMSH+2OZsDQ7oc7eiJI08r0IcpCbCBTIeyE= =0hI+ -----END PGP SIGNATURE----- Merge tag 'for-6.14-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux Pull btrfs fixes from David Sterba: - fix stale page cache after race between readahead and direct IO write - fix hole expansion when writing at an offset beyond EOF, the range will not be zeroed - use proper way to calculate offsets in folio ranges * tag 'for-6.14-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: btrfs: fix hole expansion when writing at an offset beyond EOF btrfs: fix stale page cache after race between readahead and direct IO write btrfs: fix two misuses of folio_shift()
This commit is contained in:
commit
945ce413ac
2 changed files with 21 additions and 12 deletions
|
@ -523,8 +523,6 @@ static void end_bbio_data_read(struct btrfs_bio *bbio)
|
||||||
u64 end;
|
u64 end;
|
||||||
u32 len;
|
u32 len;
|
||||||
|
|
||||||
/* For now only order 0 folios are supported for data. */
|
|
||||||
ASSERT(folio_order(folio) == 0);
|
|
||||||
btrfs_debug(fs_info,
|
btrfs_debug(fs_info,
|
||||||
"%s: bi_sector=%llu, err=%d, mirror=%u",
|
"%s: bi_sector=%llu, err=%d, mirror=%u",
|
||||||
__func__, bio->bi_iter.bi_sector, bio->bi_status,
|
__func__, bio->bi_iter.bi_sector, bio->bi_status,
|
||||||
|
@ -552,7 +550,6 @@ static void end_bbio_data_read(struct btrfs_bio *bbio)
|
||||||
|
|
||||||
if (likely(uptodate)) {
|
if (likely(uptodate)) {
|
||||||
loff_t i_size = i_size_read(inode);
|
loff_t i_size = i_size_read(inode);
|
||||||
pgoff_t end_index = i_size >> folio_shift(folio);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Zero out the remaining part if this range straddles
|
* Zero out the remaining part if this range straddles
|
||||||
|
@ -561,9 +558,11 @@ static void end_bbio_data_read(struct btrfs_bio *bbio)
|
||||||
* Here we should only zero the range inside the folio,
|
* Here we should only zero the range inside the folio,
|
||||||
* not touch anything else.
|
* not touch anything else.
|
||||||
*
|
*
|
||||||
* NOTE: i_size is exclusive while end is inclusive.
|
* NOTE: i_size is exclusive while end is inclusive and
|
||||||
|
* folio_contains() takes PAGE_SIZE units.
|
||||||
*/
|
*/
|
||||||
if (folio_index(folio) == end_index && i_size <= end) {
|
if (folio_contains(folio, i_size >> PAGE_SHIFT) &&
|
||||||
|
i_size <= end) {
|
||||||
u32 zero_start = max(offset_in_folio(folio, i_size),
|
u32 zero_start = max(offset_in_folio(folio, i_size),
|
||||||
offset_in_folio(folio, start));
|
offset_in_folio(folio, start));
|
||||||
u32 zero_len = offset_in_folio(folio, end) + 1 -
|
u32 zero_len = offset_in_folio(folio, end) + 1 -
|
||||||
|
@ -899,7 +898,6 @@ static struct extent_map *get_extent_map(struct btrfs_inode *inode,
|
||||||
u64 len, struct extent_map **em_cached)
|
u64 len, struct extent_map **em_cached)
|
||||||
{
|
{
|
||||||
struct extent_map *em;
|
struct extent_map *em;
|
||||||
struct extent_state *cached_state = NULL;
|
|
||||||
|
|
||||||
ASSERT(em_cached);
|
ASSERT(em_cached);
|
||||||
|
|
||||||
|
@ -915,14 +913,12 @@ static struct extent_map *get_extent_map(struct btrfs_inode *inode,
|
||||||
*em_cached = NULL;
|
*em_cached = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
btrfs_lock_and_flush_ordered_range(inode, start, start + len - 1, &cached_state);
|
|
||||||
em = btrfs_get_extent(inode, folio, start, len);
|
em = btrfs_get_extent(inode, folio, start, len);
|
||||||
if (!IS_ERR(em)) {
|
if (!IS_ERR(em)) {
|
||||||
BUG_ON(*em_cached);
|
BUG_ON(*em_cached);
|
||||||
refcount_inc(&em->refs);
|
refcount_inc(&em->refs);
|
||||||
*em_cached = em;
|
*em_cached = em;
|
||||||
}
|
}
|
||||||
unlock_extent(&inode->io_tree, start, start + len - 1, &cached_state);
|
|
||||||
|
|
||||||
return em;
|
return em;
|
||||||
}
|
}
|
||||||
|
@ -956,7 +952,7 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (folio->index == last_byte >> folio_shift(folio)) {
|
if (folio_contains(folio, last_byte >> PAGE_SHIFT)) {
|
||||||
size_t zero_offset = offset_in_folio(folio, last_byte);
|
size_t zero_offset = offset_in_folio(folio, last_byte);
|
||||||
|
|
||||||
if (zero_offset) {
|
if (zero_offset) {
|
||||||
|
@ -1079,11 +1075,18 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
|
||||||
|
|
||||||
int btrfs_read_folio(struct file *file, struct folio *folio)
|
int btrfs_read_folio(struct file *file, struct folio *folio)
|
||||||
{
|
{
|
||||||
|
struct btrfs_inode *inode = folio_to_inode(folio);
|
||||||
|
const u64 start = folio_pos(folio);
|
||||||
|
const u64 end = start + folio_size(folio) - 1;
|
||||||
|
struct extent_state *cached_state = NULL;
|
||||||
struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
|
struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
|
||||||
struct extent_map *em_cached = NULL;
|
struct extent_map *em_cached = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
btrfs_lock_and_flush_ordered_range(inode, start, end, &cached_state);
|
||||||
ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, NULL);
|
ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, NULL);
|
||||||
|
unlock_extent(&inode->io_tree, start, end, &cached_state);
|
||||||
|
|
||||||
free_extent_map(em_cached);
|
free_extent_map(em_cached);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2380,12 +2383,20 @@ void btrfs_readahead(struct readahead_control *rac)
|
||||||
{
|
{
|
||||||
struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
|
struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
|
||||||
struct folio *folio;
|
struct folio *folio;
|
||||||
|
struct btrfs_inode *inode = BTRFS_I(rac->mapping->host);
|
||||||
|
const u64 start = readahead_pos(rac);
|
||||||
|
const u64 end = start + readahead_length(rac) - 1;
|
||||||
|
struct extent_state *cached_state = NULL;
|
||||||
struct extent_map *em_cached = NULL;
|
struct extent_map *em_cached = NULL;
|
||||||
u64 prev_em_start = (u64)-1;
|
u64 prev_em_start = (u64)-1;
|
||||||
|
|
||||||
|
btrfs_lock_and_flush_ordered_range(inode, start, end, &cached_state);
|
||||||
|
|
||||||
while ((folio = readahead_folio(rac)) != NULL)
|
while ((folio = readahead_folio(rac)) != NULL)
|
||||||
btrfs_do_readpage(folio, &em_cached, &bio_ctrl, &prev_em_start);
|
btrfs_do_readpage(folio, &em_cached, &bio_ctrl, &prev_em_start);
|
||||||
|
|
||||||
|
unlock_extent(&inode->io_tree, start, end, &cached_state);
|
||||||
|
|
||||||
if (em_cached)
|
if (em_cached)
|
||||||
free_extent_map(em_cached);
|
free_extent_map(em_cached);
|
||||||
submit_one_bio(&bio_ctrl);
|
submit_one_bio(&bio_ctrl);
|
||||||
|
|
|
@ -1039,7 +1039,6 @@ int btrfs_write_check(struct kiocb *iocb, size_t count)
|
||||||
loff_t pos = iocb->ki_pos;
|
loff_t pos = iocb->ki_pos;
|
||||||
int ret;
|
int ret;
|
||||||
loff_t oldsize;
|
loff_t oldsize;
|
||||||
loff_t start_pos;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Quickly bail out on NOWAIT writes if we don't have the nodatacow or
|
* Quickly bail out on NOWAIT writes if we don't have the nodatacow or
|
||||||
|
@ -1066,9 +1065,8 @@ int btrfs_write_check(struct kiocb *iocb, size_t count)
|
||||||
inode_inc_iversion(inode);
|
inode_inc_iversion(inode);
|
||||||
}
|
}
|
||||||
|
|
||||||
start_pos = round_down(pos, fs_info->sectorsize);
|
|
||||||
oldsize = i_size_read(inode);
|
oldsize = i_size_read(inode);
|
||||||
if (start_pos > oldsize) {
|
if (pos > oldsize) {
|
||||||
/* Expand hole size to cover write data, preventing empty gap */
|
/* Expand hole size to cover write data, preventing empty gap */
|
||||||
loff_t end_pos = round_up(pos + count, fs_info->sectorsize);
|
loff_t end_pos = round_up(pos + count, fs_info->sectorsize);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue