1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

fs/proc/task_mmu: properly detect PM_MMAP_EXCLUSIVE per page of PMD-mapped THPs

We added PM_MMAP_EXCLUSIVE in 2015 via commit 77bb499bb6 ("pagemap: add
mmap-exclusive bit for marking pages mapped only here"), when THPs could
not be partially mapped and page_mapcount() returned something that was
true for all pages of the THP.

In 2016, we added support for partially mapping THPs via commit
53f9263bab ("mm: rework mapcount accounting to enable 4k mapping of
THPs") but missed to determine PM_MMAP_EXCLUSIVE as well per page.

Checking page_mapcount() on the head page does not tell the whole story.

We should check each individual page.  In a future without per-page
mapcounts it will be different, but we'll change that to be consistent
with PTE-mapped THPs once we deal with that.

Link: https://lkml.kernel.org/r/20240607122357.115423-4-david@redhat.com
Fixes: 53f9263bab ("mm: rework mapcount accounting to enable 4k mapping of THPs")
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Lance Yang <ioworker0@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
David Hildenbrand 2024-06-07 14:23:54 +02:00 committed by Andrew Morton
parent da7f31ed0f
commit 2c1f057e5b

View file

@ -1477,6 +1477,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
ptl = pmd_trans_huge_lock(pmdp, vma); ptl = pmd_trans_huge_lock(pmdp, vma);
if (ptl) { if (ptl) {
unsigned int idx = (addr & ~PMD_MASK) >> PAGE_SHIFT;
u64 flags = 0, frame = 0; u64 flags = 0, frame = 0;
pmd_t pmd = *pmdp; pmd_t pmd = *pmdp;
struct page *page = NULL; struct page *page = NULL;
@ -1493,8 +1494,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
if (pmd_uffd_wp(pmd)) if (pmd_uffd_wp(pmd))
flags |= PM_UFFD_WP; flags |= PM_UFFD_WP;
if (pm->show_pfn) if (pm->show_pfn)
frame = pmd_pfn(pmd) + frame = pmd_pfn(pmd) + idx;
((addr & ~PMD_MASK) >> PAGE_SHIFT);
} }
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
else if (is_swap_pmd(pmd)) { else if (is_swap_pmd(pmd)) {
@ -1503,11 +1503,9 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
if (pm->show_pfn) { if (pm->show_pfn) {
if (is_pfn_swap_entry(entry)) if (is_pfn_swap_entry(entry))
offset = swp_offset_pfn(entry); offset = swp_offset_pfn(entry) + idx;
else else
offset = swp_offset(entry); offset = swp_offset(entry) + idx;
offset = offset +
((addr & ~PMD_MASK) >> PAGE_SHIFT);
frame = swp_type(entry) | frame = swp_type(entry) |
(offset << MAX_SWAPFILES_SHIFT); (offset << MAX_SWAPFILES_SHIFT);
} }
@ -1523,12 +1521,16 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
if (page && !PageAnon(page)) if (page && !PageAnon(page))
flags |= PM_FILE; flags |= PM_FILE;
if (page && (flags & PM_PRESENT) && page_mapcount(page) == 1)
flags |= PM_MMAP_EXCLUSIVE;
for (; addr != end; addr += PAGE_SIZE) { for (; addr != end; addr += PAGE_SIZE, idx++) {
pagemap_entry_t pme = make_pme(frame, flags); unsigned long cur_flags = flags;
pagemap_entry_t pme;
if (page && (flags & PM_PRESENT) &&
page_mapcount(page + idx) == 1)
cur_flags |= PM_MMAP_EXCLUSIVE;
pme = make_pme(frame, cur_flags);
err = add_to_pagemap(&pme, pm); err = add_to_pagemap(&pme, pm);
if (err) if (err)
break; break;