mm: return an ERR_PTR from __filemap_get_folio
Instead of returning NULL for all errors, distinguish between: - no entry found and not asked to allocated (-ENOENT) - failed to allocate memory (-ENOMEM) - would block (-EAGAIN) so that callers don't have to guess the error based on the passed in flags. Also pass through the error through the direct callers: filemap_get_folio, filemap_lock_folio filemap_grab_folio and filemap_get_incore_folio. [hch@lst.de: fix null-pointer deref] Link: https://lkml.kernel.org/r/20230310070023.GA13563@lst.de Link: https://lkml.kernel.org/r/20230310043137.GA1624890@u2004 Link: https://lkml.kernel.org/r/20230307143410.28031-8-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Ryusuke Konishi <konishi.ryusuke@gmail.com> [nilfs2] Cc: Andreas Gruenbacher <agruenba@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
48c9d11375
commit
66dabbb65d
21 changed files with 67 additions and 65 deletions
10
fs/afs/dir.c
10
fs/afs/dir.c
|
@ -319,16 +319,16 @@ expand:
|
||||||
struct folio *folio;
|
struct folio *folio;
|
||||||
|
|
||||||
folio = filemap_get_folio(mapping, i);
|
folio = filemap_get_folio(mapping, i);
|
||||||
if (!folio) {
|
if (IS_ERR(folio)) {
|
||||||
if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
|
if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
|
||||||
afs_stat_v(dvnode, n_inval);
|
afs_stat_v(dvnode, n_inval);
|
||||||
|
|
||||||
ret = -ENOMEM;
|
|
||||||
folio = __filemap_get_folio(mapping,
|
folio = __filemap_get_folio(mapping,
|
||||||
i, FGP_LOCK | FGP_CREAT,
|
i, FGP_LOCK | FGP_CREAT,
|
||||||
mapping->gfp_mask);
|
mapping->gfp_mask);
|
||||||
if (!folio)
|
if (IS_ERR(folio)) {
|
||||||
|
ret = PTR_ERR(folio);
|
||||||
goto error;
|
goto error;
|
||||||
|
}
|
||||||
folio_attach_private(folio, (void *)1);
|
folio_attach_private(folio, (void *)1);
|
||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
}
|
}
|
||||||
|
@ -524,7 +524,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
|
||||||
*/
|
*/
|
||||||
folio = __filemap_get_folio(dir->i_mapping, ctx->pos / PAGE_SIZE,
|
folio = __filemap_get_folio(dir->i_mapping, ctx->pos / PAGE_SIZE,
|
||||||
FGP_ACCESSED, 0);
|
FGP_ACCESSED, 0);
|
||||||
if (!folio) {
|
if (IS_ERR(folio)) {
|
||||||
ret = afs_bad(dvnode, afs_file_error_dir_missing_page);
|
ret = afs_bad(dvnode, afs_file_error_dir_missing_page);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -115,7 +115,7 @@ static struct folio *afs_dir_get_folio(struct afs_vnode *vnode, pgoff_t index)
|
||||||
folio = __filemap_get_folio(mapping, index,
|
folio = __filemap_get_folio(mapping, index,
|
||||||
FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
|
FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
|
||||||
mapping->gfp_mask);
|
mapping->gfp_mask);
|
||||||
if (!folio)
|
if (IS_ERR(folio))
|
||||||
clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
|
clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
|
||||||
else if (folio && !folio_test_private(folio))
|
else if (folio && !folio_test_private(folio))
|
||||||
folio_attach_private(folio, (void *)1);
|
folio_attach_private(folio, (void *)1);
|
||||||
|
|
|
@ -232,7 +232,7 @@ static void afs_kill_pages(struct address_space *mapping,
|
||||||
_debug("kill %lx (to %lx)", index, last);
|
_debug("kill %lx (to %lx)", index, last);
|
||||||
|
|
||||||
folio = filemap_get_folio(mapping, index);
|
folio = filemap_get_folio(mapping, index);
|
||||||
if (!folio) {
|
if (IS_ERR(folio)) {
|
||||||
next = index + 1;
|
next = index + 1;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -270,7 +270,7 @@ static void afs_redirty_pages(struct writeback_control *wbc,
|
||||||
_debug("redirty %llx @%llx", len, start);
|
_debug("redirty %llx @%llx", len, start);
|
||||||
|
|
||||||
folio = filemap_get_folio(mapping, index);
|
folio = filemap_get_folio(mapping, index);
|
||||||
if (!folio) {
|
if (IS_ERR(folio)) {
|
||||||
next = index + 1;
|
next = index + 1;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
|
@ -5395,7 +5395,7 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
|
||||||
while (1) {
|
while (1) {
|
||||||
struct folio *folio = filemap_lock_folio(inode->i_mapping,
|
struct folio *folio = filemap_lock_folio(inode->i_mapping,
|
||||||
inode->i_size >> PAGE_SHIFT);
|
inode->i_size >> PAGE_SHIFT);
|
||||||
if (!folio)
|
if (IS_ERR(folio))
|
||||||
return;
|
return;
|
||||||
ret = __ext4_journalled_invalidate_folio(folio, offset,
|
ret = __ext4_journalled_invalidate_folio(folio, offset,
|
||||||
folio_size(folio) - offset);
|
folio_size(folio) - offset);
|
||||||
|
|
|
@ -141,18 +141,18 @@ mext_folio_double_lock(struct inode *inode1, struct inode *inode2,
|
||||||
flags = memalloc_nofs_save();
|
flags = memalloc_nofs_save();
|
||||||
folio[0] = __filemap_get_folio(mapping[0], index1, fgp_flags,
|
folio[0] = __filemap_get_folio(mapping[0], index1, fgp_flags,
|
||||||
mapping_gfp_mask(mapping[0]));
|
mapping_gfp_mask(mapping[0]));
|
||||||
if (!folio[0]) {
|
if (IS_ERR(folio[0])) {
|
||||||
memalloc_nofs_restore(flags);
|
memalloc_nofs_restore(flags);
|
||||||
return -ENOMEM;
|
return PTR_ERR(folio[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
folio[1] = __filemap_get_folio(mapping[1], index2, fgp_flags,
|
folio[1] = __filemap_get_folio(mapping[1], index2, fgp_flags,
|
||||||
mapping_gfp_mask(mapping[1]));
|
mapping_gfp_mask(mapping[1]));
|
||||||
memalloc_nofs_restore(flags);
|
memalloc_nofs_restore(flags);
|
||||||
if (!folio[1]) {
|
if (IS_ERR(folio[1])) {
|
||||||
folio_unlock(folio[0]);
|
folio_unlock(folio[0]);
|
||||||
folio_put(folio[0]);
|
folio_put(folio[0]);
|
||||||
return -ENOMEM;
|
return PTR_ERR(folio[1]);
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* __filemap_get_folio() may not wait on folio's writeback if
|
* __filemap_get_folio() may not wait on folio's writeback if
|
||||||
|
|
|
@ -697,7 +697,7 @@ static void hugetlbfs_zero_partial_page(struct hstate *h,
|
||||||
struct folio *folio;
|
struct folio *folio;
|
||||||
|
|
||||||
folio = filemap_lock_folio(mapping, idx);
|
folio = filemap_lock_folio(mapping, idx);
|
||||||
if (!folio)
|
if (IS_ERR(folio))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
start = start & ~huge_page_mask(h);
|
start = start & ~huge_page_mask(h);
|
||||||
|
|
|
@ -468,19 +468,12 @@ EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
|
||||||
struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos)
|
struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos)
|
||||||
{
|
{
|
||||||
unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS;
|
unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS;
|
||||||
struct folio *folio;
|
|
||||||
|
|
||||||
if (iter->flags & IOMAP_NOWAIT)
|
if (iter->flags & IOMAP_NOWAIT)
|
||||||
fgp |= FGP_NOWAIT;
|
fgp |= FGP_NOWAIT;
|
||||||
|
|
||||||
folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
|
return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
|
||||||
fgp, mapping_gfp_mask(iter->inode->i_mapping));
|
fgp, mapping_gfp_mask(iter->inode->i_mapping));
|
||||||
if (folio)
|
|
||||||
return folio;
|
|
||||||
|
|
||||||
if (iter->flags & IOMAP_NOWAIT)
|
|
||||||
return ERR_PTR(-EAGAIN);
|
|
||||||
return ERR_PTR(-ENOMEM);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iomap_get_folio);
|
EXPORT_SYMBOL_GPL(iomap_get_folio);
|
||||||
|
|
||||||
|
@ -911,7 +904,7 @@ static int iomap_write_delalloc_scan(struct inode *inode,
|
||||||
/* grab locked page */
|
/* grab locked page */
|
||||||
folio = filemap_lock_folio(inode->i_mapping,
|
folio = filemap_lock_folio(inode->i_mapping,
|
||||||
start_byte >> PAGE_SHIFT);
|
start_byte >> PAGE_SHIFT);
|
||||||
if (!folio) {
|
if (IS_ERR(folio)) {
|
||||||
start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) +
|
start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) +
|
||||||
PAGE_SIZE;
|
PAGE_SIZE;
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -350,8 +350,8 @@ int netfs_write_begin(struct netfs_inode *ctx,
|
||||||
retry:
|
retry:
|
||||||
folio = __filemap_get_folio(mapping, index, fgp_flags,
|
folio = __filemap_get_folio(mapping, index, fgp_flags,
|
||||||
mapping_gfp_mask(mapping));
|
mapping_gfp_mask(mapping));
|
||||||
if (!folio)
|
if (IS_ERR(folio))
|
||||||
return -ENOMEM;
|
return PTR_ERR(folio);
|
||||||
|
|
||||||
if (ctx->ops->check_write_begin) {
|
if (ctx->ops->check_write_begin) {
|
||||||
/* Allow the netfs (eg. ceph) to flush conflicts. */
|
/* Allow the netfs (eg. ceph) to flush conflicts. */
|
||||||
|
|
|
@ -336,8 +336,8 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
|
||||||
|
|
||||||
start:
|
start:
|
||||||
folio = nfs_folio_grab_cache_write_begin(mapping, pos >> PAGE_SHIFT);
|
folio = nfs_folio_grab_cache_write_begin(mapping, pos >> PAGE_SHIFT);
|
||||||
if (!folio)
|
if (IS_ERR(folio))
|
||||||
return -ENOMEM;
|
return PTR_ERR(folio);
|
||||||
*pagep = &folio->page;
|
*pagep = &folio->page;
|
||||||
|
|
||||||
ret = nfs_flush_incompatible(file, folio);
|
ret = nfs_flush_incompatible(file, folio);
|
||||||
|
|
|
@ -259,10 +259,10 @@ repeat:
|
||||||
NILFS_PAGE_BUG(&folio->page, "inconsistent dirty state");
|
NILFS_PAGE_BUG(&folio->page, "inconsistent dirty state");
|
||||||
|
|
||||||
dfolio = filemap_grab_folio(dmap, folio->index);
|
dfolio = filemap_grab_folio(dmap, folio->index);
|
||||||
if (unlikely(!dfolio)) {
|
if (unlikely(IS_ERR(dfolio))) {
|
||||||
/* No empty page is added to the page cache */
|
/* No empty page is added to the page cache */
|
||||||
err = -ENOMEM;
|
|
||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
|
err = PTR_ERR(dfolio);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (unlikely(!folio_buffers(folio)))
|
if (unlikely(!folio_buffers(folio)))
|
||||||
|
@ -311,7 +311,7 @@ repeat:
|
||||||
|
|
||||||
folio_lock(folio);
|
folio_lock(folio);
|
||||||
dfolio = filemap_lock_folio(dmap, index);
|
dfolio = filemap_lock_folio(dmap, index);
|
||||||
if (dfolio) {
|
if (!IS_ERR(dfolio)) {
|
||||||
/* overwrite existing folio in the destination cache */
|
/* overwrite existing folio in the destination cache */
|
||||||
WARN_ON(folio_test_dirty(dfolio));
|
WARN_ON(folio_test_dirty(dfolio));
|
||||||
nilfs_copy_page(&dfolio->page, &folio->page, 0);
|
nilfs_copy_page(&dfolio->page, &folio->page, 0);
|
||||||
|
|
|
@ -520,7 +520,8 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
|
||||||
* Looks up the page cache entry at @mapping & @index. If a folio is
|
* Looks up the page cache entry at @mapping & @index. If a folio is
|
||||||
* present, it is returned with an increased refcount.
|
* present, it is returned with an increased refcount.
|
||||||
*
|
*
|
||||||
* Otherwise, %NULL is returned.
|
* Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
|
||||||
|
* this index. Will not return a shadow, swap or DAX entry.
|
||||||
*/
|
*/
|
||||||
static inline struct folio *filemap_get_folio(struct address_space *mapping,
|
static inline struct folio *filemap_get_folio(struct address_space *mapping,
|
||||||
pgoff_t index)
|
pgoff_t index)
|
||||||
|
@ -537,8 +538,8 @@ static inline struct folio *filemap_get_folio(struct address_space *mapping,
|
||||||
* present, it is returned locked with an increased refcount.
|
* present, it is returned locked with an increased refcount.
|
||||||
*
|
*
|
||||||
* Context: May sleep.
|
* Context: May sleep.
|
||||||
* Return: A folio or %NULL if there is no folio in the cache for this
|
* Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
|
||||||
* index. Will not return a shadow, swap or DAX entry.
|
* this index. Will not return a shadow, swap or DAX entry.
|
||||||
*/
|
*/
|
||||||
static inline struct folio *filemap_lock_folio(struct address_space *mapping,
|
static inline struct folio *filemap_lock_folio(struct address_space *mapping,
|
||||||
pgoff_t index)
|
pgoff_t index)
|
||||||
|
@ -555,8 +556,8 @@ static inline struct folio *filemap_lock_folio(struct address_space *mapping,
|
||||||
* a new folio is created. The folio is locked, marked as accessed, and
|
* a new folio is created. The folio is locked, marked as accessed, and
|
||||||
* returned.
|
* returned.
|
||||||
*
|
*
|
||||||
* Return: A found or created folio. NULL if no folio is found and failed to
|
* Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found
|
||||||
* create a folio.
|
* and failed to create a folio.
|
||||||
*/
|
*/
|
||||||
static inline struct folio *filemap_grab_folio(struct address_space *mapping,
|
static inline struct folio *filemap_grab_folio(struct address_space *mapping,
|
||||||
pgoff_t index)
|
pgoff_t index)
|
||||||
|
|
14
mm/filemap.c
14
mm/filemap.c
|
@ -1907,7 +1907,7 @@ out:
|
||||||
*
|
*
|
||||||
* If there is a page cache page, it is returned with an increased refcount.
|
* If there is a page cache page, it is returned with an increased refcount.
|
||||||
*
|
*
|
||||||
* Return: The found folio or %NULL otherwise.
|
* Return: The found folio or an ERR_PTR() otherwise.
|
||||||
*/
|
*/
|
||||||
struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
|
struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
|
||||||
int fgp_flags, gfp_t gfp)
|
int fgp_flags, gfp_t gfp)
|
||||||
|
@ -1925,7 +1925,7 @@ repeat:
|
||||||
if (fgp_flags & FGP_NOWAIT) {
|
if (fgp_flags & FGP_NOWAIT) {
|
||||||
if (!folio_trylock(folio)) {
|
if (!folio_trylock(folio)) {
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
return NULL;
|
return ERR_PTR(-EAGAIN);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
folio_lock(folio);
|
folio_lock(folio);
|
||||||
|
@ -1964,7 +1964,7 @@ no_page:
|
||||||
|
|
||||||
folio = filemap_alloc_folio(gfp, 0);
|
folio = filemap_alloc_folio(gfp, 0);
|
||||||
if (!folio)
|
if (!folio)
|
||||||
return NULL;
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
|
if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
|
||||||
fgp_flags |= FGP_LOCK;
|
fgp_flags |= FGP_LOCK;
|
||||||
|
@ -1989,6 +1989,8 @@ no_page:
|
||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!folio)
|
||||||
|
return ERR_PTR(-ENOENT);
|
||||||
return folio;
|
return folio;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__filemap_get_folio);
|
EXPORT_SYMBOL(__filemap_get_folio);
|
||||||
|
@ -3258,7 +3260,7 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
|
||||||
* Do we have something in the page cache already?
|
* Do we have something in the page cache already?
|
||||||
*/
|
*/
|
||||||
folio = filemap_get_folio(mapping, index);
|
folio = filemap_get_folio(mapping, index);
|
||||||
if (likely(folio)) {
|
if (likely(!IS_ERR(folio))) {
|
||||||
/*
|
/*
|
||||||
* We found the page, so try async readahead before waiting for
|
* We found the page, so try async readahead before waiting for
|
||||||
* the lock.
|
* the lock.
|
||||||
|
@ -3287,7 +3289,7 @@ retry_find:
|
||||||
folio = __filemap_get_folio(mapping, index,
|
folio = __filemap_get_folio(mapping, index,
|
||||||
FGP_CREAT|FGP_FOR_MMAP,
|
FGP_CREAT|FGP_FOR_MMAP,
|
||||||
vmf->gfp_mask);
|
vmf->gfp_mask);
|
||||||
if (!folio) {
|
if (IS_ERR(folio)) {
|
||||||
if (fpin)
|
if (fpin)
|
||||||
goto out_retry;
|
goto out_retry;
|
||||||
filemap_invalidate_unlock_shared(mapping);
|
filemap_invalidate_unlock_shared(mapping);
|
||||||
|
@ -3638,7 +3640,7 @@ static struct folio *do_read_cache_folio(struct address_space *mapping,
|
||||||
filler = mapping->a_ops->read_folio;
|
filler = mapping->a_ops->read_folio;
|
||||||
repeat:
|
repeat:
|
||||||
folio = filemap_get_folio(mapping, index);
|
folio = filemap_get_folio(mapping, index);
|
||||||
if (!folio) {
|
if (IS_ERR(folio)) {
|
||||||
folio = filemap_alloc_folio(gfp, 0);
|
folio = filemap_alloc_folio(gfp, 0);
|
||||||
if (!folio)
|
if (!folio)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
|
@ -97,7 +97,7 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
|
||||||
struct folio *folio;
|
struct folio *folio;
|
||||||
|
|
||||||
folio = __filemap_get_folio(mapping, index, fgp_flags, gfp);
|
folio = __filemap_get_folio(mapping, index, fgp_flags, gfp);
|
||||||
if (!folio)
|
if (IS_ERR(folio))
|
||||||
return NULL;
|
return NULL;
|
||||||
return folio_file_page(folio, index);
|
return folio_file_page(folio, index);
|
||||||
}
|
}
|
||||||
|
|
|
@ -3092,7 +3092,7 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
|
||||||
struct folio *folio = filemap_get_folio(mapping, index);
|
struct folio *folio = filemap_get_folio(mapping, index);
|
||||||
|
|
||||||
nr_pages = 1;
|
nr_pages = 1;
|
||||||
if (!folio)
|
if (IS_ERR(folio))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (!folio_test_large(folio))
|
if (!folio_test_large(folio))
|
||||||
|
|
|
@ -5780,7 +5780,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
|
||||||
*/
|
*/
|
||||||
new_folio = false;
|
new_folio = false;
|
||||||
folio = filemap_lock_folio(mapping, idx);
|
folio = filemap_lock_folio(mapping, idx);
|
||||||
if (!folio) {
|
if (IS_ERR(folio)) {
|
||||||
size = i_size_read(mapping->host) >> huge_page_shift(h);
|
size = i_size_read(mapping->host) >> huge_page_shift(h);
|
||||||
if (idx >= size)
|
if (idx >= size)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -6071,6 +6071,8 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
vma_end_reservation(h, vma, haddr);
|
vma_end_reservation(h, vma, haddr);
|
||||||
|
|
||||||
pagecache_folio = filemap_lock_folio(mapping, idx);
|
pagecache_folio = filemap_lock_folio(mapping, idx);
|
||||||
|
if (IS_ERR(pagecache_folio))
|
||||||
|
pagecache_folio = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
ptl = huge_pte_lock(h, mm, ptep);
|
ptl = huge_pte_lock(h, mm, ptep);
|
||||||
|
@ -6182,7 +6184,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
|
||||||
if (is_continue) {
|
if (is_continue) {
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
folio = filemap_lock_folio(mapping, idx);
|
folio = filemap_lock_folio(mapping, idx);
|
||||||
if (!folio)
|
if (IS_ERR(folio))
|
||||||
goto out;
|
goto out;
|
||||||
folio_in_pagecache = true;
|
folio_in_pagecache = true;
|
||||||
} else if (!*pagep) {
|
} else if (!*pagep) {
|
||||||
|
|
|
@ -5705,7 +5705,7 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
|
||||||
/* shmem/tmpfs may report page out on swap: account for that too. */
|
/* shmem/tmpfs may report page out on swap: account for that too. */
|
||||||
index = linear_page_index(vma, addr);
|
index = linear_page_index(vma, addr);
|
||||||
folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index);
|
folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index);
|
||||||
if (!folio)
|
if (IS_ERR(folio))
|
||||||
return NULL;
|
return NULL;
|
||||||
return folio_file_page(folio, index);
|
return folio_file_page(folio, index);
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,7 +61,7 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t index)
|
||||||
* tmpfs's .fault). So swapped out tmpfs mappings are tested here.
|
* tmpfs's .fault). So swapped out tmpfs mappings are tested here.
|
||||||
*/
|
*/
|
||||||
folio = filemap_get_incore_folio(mapping, index);
|
folio = filemap_get_incore_folio(mapping, index);
|
||||||
if (folio) {
|
if (!IS_ERR(folio)) {
|
||||||
present = folio_test_uptodate(folio);
|
present = folio_test_uptodate(folio);
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
}
|
}
|
||||||
|
|
|
@ -605,7 +605,7 @@ next:
|
||||||
|
|
||||||
index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT;
|
index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT;
|
||||||
folio = filemap_get_folio(inode->i_mapping, index);
|
folio = filemap_get_folio(inode->i_mapping, index);
|
||||||
if (!folio)
|
if (IS_ERR(folio))
|
||||||
goto drop;
|
goto drop;
|
||||||
|
|
||||||
/* No huge page at the end of the file: nothing to split */
|
/* No huge page at the end of the file: nothing to split */
|
||||||
|
@ -3214,7 +3214,7 @@ static const char *shmem_get_link(struct dentry *dentry,
|
||||||
|
|
||||||
if (!dentry) {
|
if (!dentry) {
|
||||||
folio = filemap_get_folio(inode->i_mapping, 0);
|
folio = filemap_get_folio(inode->i_mapping, 0);
|
||||||
if (!folio)
|
if (IS_ERR(folio))
|
||||||
return ERR_PTR(-ECHILD);
|
return ERR_PTR(-ECHILD);
|
||||||
if (PageHWPoison(folio_page(folio, 0)) ||
|
if (PageHWPoison(folio_page(folio, 0)) ||
|
||||||
!folio_test_uptodate(folio)) {
|
!folio_test_uptodate(folio)) {
|
||||||
|
|
|
@ -336,7 +336,7 @@ struct folio *swap_cache_get_folio(swp_entry_t entry,
|
||||||
struct folio *folio;
|
struct folio *folio;
|
||||||
|
|
||||||
folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
|
folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
|
||||||
if (folio) {
|
if (!IS_ERR(folio)) {
|
||||||
bool vma_ra = swap_use_vma_readahead();
|
bool vma_ra = swap_use_vma_readahead();
|
||||||
bool readahead;
|
bool readahead;
|
||||||
|
|
||||||
|
@ -366,6 +366,8 @@ struct folio *swap_cache_get_folio(swp_entry_t entry,
|
||||||
if (!vma || !vma_ra)
|
if (!vma || !vma_ra)
|
||||||
atomic_inc(&swapin_readahead_hits);
|
atomic_inc(&swapin_readahead_hits);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
folio = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return folio;
|
return folio;
|
||||||
|
@ -388,23 +390,24 @@ struct folio *filemap_get_incore_folio(struct address_space *mapping,
|
||||||
struct swap_info_struct *si;
|
struct swap_info_struct *si;
|
||||||
struct folio *folio = filemap_get_entry(mapping, index);
|
struct folio *folio = filemap_get_entry(mapping, index);
|
||||||
|
|
||||||
|
if (!folio)
|
||||||
|
return ERR_PTR(-ENOENT);
|
||||||
if (!xa_is_value(folio))
|
if (!xa_is_value(folio))
|
||||||
goto out;
|
return folio;
|
||||||
if (!shmem_mapping(mapping))
|
if (!shmem_mapping(mapping))
|
||||||
return NULL;
|
return ERR_PTR(-ENOENT);
|
||||||
|
|
||||||
swp = radix_to_swp_entry(folio);
|
swp = radix_to_swp_entry(folio);
|
||||||
/* There might be swapin error entries in shmem mapping. */
|
/* There might be swapin error entries in shmem mapping. */
|
||||||
if (non_swap_entry(swp))
|
if (non_swap_entry(swp))
|
||||||
return NULL;
|
return ERR_PTR(-ENOENT);
|
||||||
/* Prevent swapoff from happening to us */
|
/* Prevent swapoff from happening to us */
|
||||||
si = get_swap_device(swp);
|
si = get_swap_device(swp);
|
||||||
if (!si)
|
if (!si)
|
||||||
return NULL;
|
return ERR_PTR(-ENOENT);
|
||||||
index = swp_offset(swp);
|
index = swp_offset(swp);
|
||||||
folio = filemap_get_folio(swap_address_space(swp), index);
|
folio = filemap_get_folio(swap_address_space(swp), index);
|
||||||
put_swap_device(si);
|
put_swap_device(si);
|
||||||
out:
|
|
||||||
return folio;
|
return folio;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -431,7 +434,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
||||||
folio = filemap_get_folio(swap_address_space(entry),
|
folio = filemap_get_folio(swap_address_space(entry),
|
||||||
swp_offset(entry));
|
swp_offset(entry));
|
||||||
put_swap_device(si);
|
put_swap_device(si);
|
||||||
if (folio)
|
if (!IS_ERR(folio))
|
||||||
return folio_file_page(folio, swp_offset(entry));
|
return folio_file_page(folio, swp_offset(entry));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -136,7 +136,7 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si,
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
folio = filemap_get_folio(swap_address_space(entry), offset);
|
folio = filemap_get_folio(swap_address_space(entry), offset);
|
||||||
if (!folio)
|
if (IS_ERR(folio))
|
||||||
return 0;
|
return 0;
|
||||||
/*
|
/*
|
||||||
* When this function is called from scan_swap_map_slots() and it's
|
* When this function is called from scan_swap_map_slots() and it's
|
||||||
|
@ -2095,7 +2095,7 @@ retry:
|
||||||
|
|
||||||
entry = swp_entry(type, i);
|
entry = swp_entry(type, i);
|
||||||
folio = filemap_get_folio(swap_address_space(entry), i);
|
folio = filemap_get_folio(swap_address_space(entry), i);
|
||||||
if (!folio)
|
if (IS_ERR(folio))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -375,7 +375,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
||||||
|
|
||||||
same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
|
same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
|
||||||
folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0);
|
folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0);
|
||||||
if (folio) {
|
if (!IS_ERR(folio)) {
|
||||||
same_folio = lend < folio_pos(folio) + folio_size(folio);
|
same_folio = lend < folio_pos(folio) + folio_size(folio);
|
||||||
if (!truncate_inode_partial_folio(folio, lstart, lend)) {
|
if (!truncate_inode_partial_folio(folio, lstart, lend)) {
|
||||||
start = folio->index + folio_nr_pages(folio);
|
start = folio->index + folio_nr_pages(folio);
|
||||||
|
@ -387,14 +387,15 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
||||||
folio = NULL;
|
folio = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!same_folio)
|
if (!same_folio) {
|
||||||
folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT,
|
folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT,
|
||||||
FGP_LOCK, 0);
|
FGP_LOCK, 0);
|
||||||
if (folio) {
|
if (!IS_ERR(folio)) {
|
||||||
if (!truncate_inode_partial_folio(folio, lstart, lend))
|
if (!truncate_inode_partial_folio(folio, lstart, lend))
|
||||||
end = folio->index;
|
end = folio->index;
|
||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
index = start;
|
index = start;
|
||||||
|
|
Loading…
Add table
Reference in a new issue