buffer: return bool from grow_dev_folio()
Patch series "More buffer_head cleanups", v2. The first patch is a left-over from last cycle. The rest fix "obvious" block size > PAGE_SIZE problems. I haven't tested with a large block size setup (but I have done an ext4 xfstests run). This patch (of 7): Rename grow_dev_page() to grow_dev_folio() and make it return a bool. Document what that bool means; it's more subtle than it first appears. Also rename the 'failed' label to 'unlock' beacuse it's not exactly 'failed'. It just hasn't succeeded. Link: https://lkml.kernel.org/r/20231109210608.2252323-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Hannes Reinecke <hare@suse.de> Cc: Luis Chamberlain <mcgrof@kernel.org> Cc: Pankaj Raghav <p.raghav@samsung.com> Cc: Ryusuke Konishi <konishi.ryusuke@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
ffda655682
commit
6d840a1877
1 changed files with 25 additions and 25 deletions
50
fs/buffer.c
50
fs/buffer.c
|
@ -1024,40 +1024,43 @@ static sector_t folio_init_buffers(struct folio *folio,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Create the page-cache page that contains the requested block.
|
* Create the page-cache folio that contains the requested block.
|
||||||
*
|
*
|
||||||
* This is used purely for blockdev mappings.
|
* This is used purely for blockdev mappings.
|
||||||
|
*
|
||||||
|
* Returns false if we have a 'permanent' failure. Returns true if
|
||||||
|
* we succeeded, or the caller should retry.
|
||||||
*/
|
*/
|
||||||
static int
|
static bool grow_dev_folio(struct block_device *bdev, sector_t block,
|
||||||
grow_dev_page(struct block_device *bdev, sector_t block,
|
pgoff_t index, unsigned size, int sizebits, gfp_t gfp)
|
||||||
pgoff_t index, int size, int sizebits, gfp_t gfp)
|
|
||||||
{
|
{
|
||||||
struct inode *inode = bdev->bd_inode;
|
struct inode *inode = bdev->bd_inode;
|
||||||
struct folio *folio;
|
struct folio *folio;
|
||||||
struct buffer_head *bh;
|
struct buffer_head *bh;
|
||||||
sector_t end_block;
|
sector_t end_block = 0;
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
folio = __filemap_get_folio(inode->i_mapping, index,
|
folio = __filemap_get_folio(inode->i_mapping, index,
|
||||||
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
|
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
|
||||||
if (IS_ERR(folio))
|
if (IS_ERR(folio))
|
||||||
return PTR_ERR(folio);
|
return false;
|
||||||
|
|
||||||
bh = folio_buffers(folio);
|
bh = folio_buffers(folio);
|
||||||
if (bh) {
|
if (bh) {
|
||||||
if (bh->b_size == size) {
|
if (bh->b_size == size) {
|
||||||
end_block = folio_init_buffers(folio, bdev,
|
end_block = folio_init_buffers(folio, bdev,
|
||||||
(sector_t)index << sizebits, size);
|
(sector_t)index << sizebits, size);
|
||||||
goto done;
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Caller should retry if this call fails */
|
||||||
|
end_block = ~0ULL;
|
||||||
if (!try_to_free_buffers(folio))
|
if (!try_to_free_buffers(folio))
|
||||||
goto failed;
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = -ENOMEM;
|
|
||||||
bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT);
|
bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT);
|
||||||
if (!bh)
|
if (!bh)
|
||||||
goto failed;
|
goto unlock;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Link the folio to the buffers and initialise them. Take the
|
* Link the folio to the buffers and initialise them. Take the
|
||||||
|
@ -1069,20 +1072,19 @@ grow_dev_page(struct block_device *bdev, sector_t block,
|
||||||
end_block = folio_init_buffers(folio, bdev,
|
end_block = folio_init_buffers(folio, bdev,
|
||||||
(sector_t)index << sizebits, size);
|
(sector_t)index << sizebits, size);
|
||||||
spin_unlock(&inode->i_mapping->private_lock);
|
spin_unlock(&inode->i_mapping->private_lock);
|
||||||
done:
|
unlock:
|
||||||
ret = (block < end_block) ? 1 : -ENXIO;
|
|
||||||
failed:
|
|
||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
return ret;
|
return block < end_block;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Create buffers for the specified block device block's page. If
|
* Create buffers for the specified block device block's folio. If
|
||||||
* that page was dirty, the buffers are set dirty also.
|
* that folio was dirty, the buffers are set dirty also. Returns false
|
||||||
|
* if we've hit a permanent error.
|
||||||
*/
|
*/
|
||||||
static int
|
static bool grow_buffers(struct block_device *bdev, sector_t block,
|
||||||
grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
|
unsigned size, gfp_t gfp)
|
||||||
{
|
{
|
||||||
pgoff_t index;
|
pgoff_t index;
|
||||||
int sizebits;
|
int sizebits;
|
||||||
|
@ -1099,11 +1101,11 @@ grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
|
||||||
"device %pg\n",
|
"device %pg\n",
|
||||||
__func__, (unsigned long long)block,
|
__func__, (unsigned long long)block,
|
||||||
bdev);
|
bdev);
|
||||||
return -EIO;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Create a page with the proper size buffers.. */
|
/* Create a folio with the proper size buffers */
|
||||||
return grow_dev_page(bdev, block, index, size, sizebits, gfp);
|
return grow_dev_folio(bdev, block, index, size, sizebits, gfp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct buffer_head *
|
static struct buffer_head *
|
||||||
|
@ -1124,14 +1126,12 @@ __getblk_slow(struct block_device *bdev, sector_t block,
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
struct buffer_head *bh;
|
struct buffer_head *bh;
|
||||||
int ret;
|
|
||||||
|
|
||||||
bh = __find_get_block(bdev, block, size);
|
bh = __find_get_block(bdev, block, size);
|
||||||
if (bh)
|
if (bh)
|
||||||
return bh;
|
return bh;
|
||||||
|
|
||||||
ret = grow_buffers(bdev, block, size, gfp);
|
if (!grow_buffers(bdev, block, size, gfp))
|
||||||
if (ret < 0)
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue