mm/damon: Convert damon_pa_mkold() to use a folio
Ensure that we're passing the entire folio to rmap_walk(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
This commit is contained in:
parent
4eecb8b916
commit
6d42dba3cc
1 changed files with 9 additions and 7 deletions
|
@ -33,6 +33,7 @@ static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
|
||||||
|
|
||||||
static void damon_pa_mkold(unsigned long paddr)
|
static void damon_pa_mkold(unsigned long paddr)
|
||||||
{
|
{
|
||||||
|
struct folio *folio;
|
||||||
struct page *page = damon_get_page(PHYS_PFN(paddr));
|
struct page *page = damon_get_page(PHYS_PFN(paddr));
|
||||||
struct rmap_walk_control rwc = {
|
struct rmap_walk_control rwc = {
|
||||||
.rmap_one = __damon_pa_mkold,
|
.rmap_one = __damon_pa_mkold,
|
||||||
|
@ -42,23 +43,24 @@ static void damon_pa_mkold(unsigned long paddr)
|
||||||
|
|
||||||
if (!page)
|
if (!page)
|
||||||
return;
|
return;
|
||||||
|
folio = page_folio(page);
|
||||||
|
|
||||||
if (!page_mapped(page) || !page_rmapping(page)) {
|
if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
|
||||||
set_page_idle(page);
|
folio_set_idle(folio);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
need_lock = !PageAnon(page) || PageKsm(page);
|
need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
|
||||||
if (need_lock && !trylock_page(page))
|
if (need_lock && !folio_trylock(folio))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
rmap_walk(page, &rwc);
|
rmap_walk(&folio->page, &rwc);
|
||||||
|
|
||||||
if (need_lock)
|
if (need_lock)
|
||||||
unlock_page(page);
|
folio_unlock(folio);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
put_page(page);
|
folio_put(folio);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __damon_pa_prepare_access_check(struct damon_ctx *ctx,
|
static void __damon_pa_prepare_access_check(struct damon_ctx *ctx,
|
||||||
|
|
Loading…
Add table
Reference in a new issue