1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

mm: separate move/undo parts from migrate_pages_batch()

Functionally, no change.  This is a preparation for luf mechanism that
requires to use separated folio lists for its own handling during
migration.  Refactored migrate_pages_batch() so as to separate move/undo
parts from migrate_pages_batch().

Link: https://lkml.kernel.org/r/20250115103403.11882-1-byungchul@sk.com
Signed-off-by: Byungchul Park <byungchul@sk.com>
Reviewed-by: Shivank Garg <shivankg@amd.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Byungchul Park 2024-08-08 15:53:58 +09:00 committed by Andrew Morton
parent ff9b7e0b17
commit f752e677f8

View file

@ -1687,6 +1687,81 @@ static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
return nr_failed;
}
static void migrate_folios_move(struct list_head *src_folios,
struct list_head *dst_folios,
free_folio_t put_new_folio, unsigned long private,
enum migrate_mode mode, int reason,
struct list_head *ret_folios,
struct migrate_pages_stats *stats,
int *retry, int *thp_retry, int *nr_failed,
int *nr_retry_pages)
{
struct folio *folio, *folio2, *dst, *dst2;
bool is_thp;
int nr_pages;
int rc;
dst = list_first_entry(dst_folios, struct folio, lru);
dst2 = list_next_entry(dst, lru);
list_for_each_entry_safe(folio, folio2, src_folios, lru) {
is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
nr_pages = folio_nr_pages(folio);
cond_resched();
rc = migrate_folio_move(put_new_folio, private,
folio, dst, mode,
reason, ret_folios);
/*
* The rules are:
* Success: folio will be freed
* -EAGAIN: stay on the unmap_folios list
* Other errno: put on ret_folios list
*/
switch (rc) {
case -EAGAIN:
*retry += 1;
*thp_retry += is_thp;
*nr_retry_pages += nr_pages;
break;
case MIGRATEPAGE_SUCCESS:
stats->nr_succeeded += nr_pages;
stats->nr_thp_succeeded += is_thp;
break;
default:
*nr_failed += 1;
stats->nr_thp_failed += is_thp;
stats->nr_failed_pages += nr_pages;
break;
}
dst = dst2;
dst2 = list_next_entry(dst, lru);
}
}
static void migrate_folios_undo(struct list_head *src_folios,
struct list_head *dst_folios,
free_folio_t put_new_folio, unsigned long private,
struct list_head *ret_folios)
{
struct folio *folio, *folio2, *dst, *dst2;
dst = list_first_entry(dst_folios, struct folio, lru);
dst2 = list_next_entry(dst, lru);
list_for_each_entry_safe(folio, folio2, src_folios, lru) {
int old_page_state = 0;
struct anon_vma *anon_vma = NULL;
__migrate_folio_extract(dst, &old_page_state, &anon_vma);
migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
anon_vma, true, ret_folios);
list_del(&dst->lru);
migrate_folio_undo_dst(dst, true, put_new_folio, private);
dst = dst2;
dst2 = list_next_entry(dst, lru);
}
}
/*
* migrate_pages_batch() first unmaps folios in the from list as many as
* possible, then move the unmapped folios.
@ -1709,7 +1784,7 @@ static int migrate_pages_batch(struct list_head *from,
int pass = 0;
bool is_thp = false;
bool is_large = false;
struct folio *folio, *folio2, *dst = NULL, *dst2;
struct folio *folio, *folio2, *dst = NULL;
int rc, rc_saved = 0, nr_pages;
LIST_HEAD(unmap_folios);
LIST_HEAD(dst_folios);
@ -1880,42 +1955,11 @@ move:
thp_retry = 0;
nr_retry_pages = 0;
dst = list_first_entry(&dst_folios, struct folio, lru);
dst2 = list_next_entry(dst, lru);
list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
nr_pages = folio_nr_pages(folio);
cond_resched();
rc = migrate_folio_move(put_new_folio, private,
folio, dst, mode,
reason, ret_folios);
/*
* The rules are:
* Success: folio will be freed
* -EAGAIN: stay on the unmap_folios list
* Other errno: put on ret_folios list
*/
switch(rc) {
case -EAGAIN:
retry++;
thp_retry += is_thp;
nr_retry_pages += nr_pages;
break;
case MIGRATEPAGE_SUCCESS:
stats->nr_succeeded += nr_pages;
stats->nr_thp_succeeded += is_thp;
break;
default:
nr_failed++;
stats->nr_thp_failed += is_thp;
stats->nr_failed_pages += nr_pages;
break;
}
dst = dst2;
dst2 = list_next_entry(dst, lru);
}
/* Move the unmapped folios */
migrate_folios_move(&unmap_folios, &dst_folios,
put_new_folio, private, mode, reason,
ret_folios, stats, &retry, &thp_retry,
&nr_failed, &nr_retry_pages);
}
nr_failed += retry;
stats->nr_thp_failed += thp_retry;
@ -1924,20 +1968,8 @@ move:
rc = rc_saved ? : nr_failed;
out:
/* Cleanup remaining folios */
dst = list_first_entry(&dst_folios, struct folio, lru);
dst2 = list_next_entry(dst, lru);
list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
int old_page_state = 0;
struct anon_vma *anon_vma = NULL;
__migrate_folio_extract(dst, &old_page_state, &anon_vma);
migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
anon_vma, true, ret_folios);
list_del(&dst->lru);
migrate_folio_undo_dst(dst, true, put_new_folio, private);
dst = dst2;
dst2 = list_next_entry(dst, lru);
}
migrate_folios_undo(&unmap_folios, &dst_folios,
put_new_folio, private, ret_folios);
return rc;
}