mm/truncate: add folio_unmap_invalidate() helper
Add a folio_unmap_invalidate() helper, which unmaps and invalidates a given folio. The caller must already have locked the folio. Embed the old invalidate_complete_folio2() helper in there as well, as nobody else calls it. Use this new helper in invalidate_inode_pages2_range(), rather than duplicate the code there. In preparation for using this elsewhere as well, have it take a gfp_t mask rather than assume GFP_KERNEL is the right choice. This bubbles back to invalidate_complete_folio2() as well. Link: https://lkml.kernel.org/r/20241220154831.1086649-7-axboe@kernel.dk Signed-off-by: Jens Axboe <axboe@kernel.dk> Cc: Brian Foster <bfoster@redhat.com> Cc: Chris Mason <clm@meta.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
77d075221a
commit
4a9e23159f
2 changed files with 30 additions and 25 deletions
|
@ -392,6 +392,8 @@ void unmap_page_range(struct mmu_gather *tlb,
|
||||||
struct vm_area_struct *vma,
|
struct vm_area_struct *vma,
|
||||||
unsigned long addr, unsigned long end,
|
unsigned long addr, unsigned long end,
|
||||||
struct zap_details *details);
|
struct zap_details *details);
|
||||||
|
int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
|
||||||
|
gfp_t gfp);
|
||||||
|
|
||||||
void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
|
void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
|
||||||
unsigned int order);
|
unsigned int order);
|
||||||
|
|
|
@ -525,6 +525,15 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(invalidate_mapping_pages);
|
EXPORT_SYMBOL(invalidate_mapping_pages);
|
||||||
|
|
||||||
|
static int folio_launder(struct address_space *mapping, struct folio *folio)
|
||||||
|
{
|
||||||
|
if (!folio_test_dirty(folio))
|
||||||
|
return 0;
|
||||||
|
if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
|
||||||
|
return 0;
|
||||||
|
return mapping->a_ops->launder_folio(folio);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is like mapping_evict_folio(), except it ignores the folio's
|
* This is like mapping_evict_folio(), except it ignores the folio's
|
||||||
* refcount. We do this because invalidate_inode_pages2() needs stronger
|
* refcount. We do this because invalidate_inode_pages2() needs stronger
|
||||||
|
@ -532,14 +541,26 @@ EXPORT_SYMBOL(invalidate_mapping_pages);
|
||||||
* shrink_folio_list() has a temp ref on them, or because they're transiently
|
* shrink_folio_list() has a temp ref on them, or because they're transiently
|
||||||
* sitting in the folio_add_lru() caches.
|
* sitting in the folio_add_lru() caches.
|
||||||
*/
|
*/
|
||||||
static int invalidate_complete_folio2(struct address_space *mapping,
|
int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
|
||||||
struct folio *folio)
|
gfp_t gfp)
|
||||||
{
|
{
|
||||||
if (folio->mapping != mapping)
|
int ret;
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (!filemap_release_folio(folio, GFP_KERNEL))
|
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
|
||||||
|
|
||||||
|
if (folio_test_dirty(folio))
|
||||||
return 0;
|
return 0;
|
||||||
|
if (folio_mapped(folio))
|
||||||
|
unmap_mapping_folio(folio);
|
||||||
|
BUG_ON(folio_mapped(folio));
|
||||||
|
|
||||||
|
ret = folio_launder(mapping, folio);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
if (folio->mapping != mapping)
|
||||||
|
return -EBUSY;
|
||||||
|
if (!filemap_release_folio(folio, gfp))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
spin_lock(&mapping->host->i_lock);
|
spin_lock(&mapping->host->i_lock);
|
||||||
xa_lock_irq(&mapping->i_pages);
|
xa_lock_irq(&mapping->i_pages);
|
||||||
|
@ -558,16 +579,7 @@ static int invalidate_complete_folio2(struct address_space *mapping,
|
||||||
failed:
|
failed:
|
||||||
xa_unlock_irq(&mapping->i_pages);
|
xa_unlock_irq(&mapping->i_pages);
|
||||||
spin_unlock(&mapping->host->i_lock);
|
spin_unlock(&mapping->host->i_lock);
|
||||||
return 0;
|
return -EBUSY;
|
||||||
}
|
|
||||||
|
|
||||||
static int folio_launder(struct address_space *mapping, struct folio *folio)
|
|
||||||
{
|
|
||||||
if (!folio_test_dirty(folio))
|
|
||||||
return 0;
|
|
||||||
if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
|
|
||||||
return 0;
|
|
||||||
return mapping->a_ops->launder_folio(folio);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -631,16 +643,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
|
||||||
}
|
}
|
||||||
VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
|
VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
|
||||||
folio_wait_writeback(folio);
|
folio_wait_writeback(folio);
|
||||||
|
ret2 = folio_unmap_invalidate(mapping, folio, GFP_KERNEL);
|
||||||
if (folio_mapped(folio))
|
|
||||||
unmap_mapping_folio(folio);
|
|
||||||
BUG_ON(folio_mapped(folio));
|
|
||||||
|
|
||||||
ret2 = folio_launder(mapping, folio);
|
|
||||||
if (ret2 == 0) {
|
|
||||||
if (!invalidate_complete_folio2(mapping, folio))
|
|
||||||
ret2 = -EBUSY;
|
|
||||||
}
|
|
||||||
if (ret2 < 0)
|
if (ret2 < 0)
|
||||||
ret = ret2;
|
ret = ret2;
|
||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
|
|
Loading…
Add table
Reference in a new issue