mm/page_alloc: place pages to tail in __free_pages_core()
__free_pages_core() is used when exposing fresh memory to the buddy during system boot and when onlining memory in generic_online_page(). generic_online_page() is used in two cases: 1. Direct memory onlining in online_pages(). 2. Deferred memory onlining in memory-ballooning-like mechanisms (HyperV balloon and virtio-mem), when parts of a section are kept fake-offline to be fake-onlined later on. In 1, we already place pages to the tail of the freelist. Pages will be freed to MIGRATE_ISOLATE lists first and moved to the tail of the freelists via undo_isolate_page_range(). In 2, we currently don't implement a proper rule. In case of virtio-mem, where we currently always online MAX_ORDER - 1 pages, the pages will be placed to the HEAD of the freelist - undesireable. While the hyper-v balloon calls generic_online_page() with single pages, usually it will call it on successive single pages in a larger block. The pages are fresh, so place them to the tail of the freelist and avoid the PCP. In __free_pages_core(), remove the now superflouos call to set_page_refcounted() and add a comment regarding page initialization and the refcount. Note: In 2. we currently don't shuffle. If ever relevant (page shuffling is usually of limited use in virtualized environments), we might want to shuffle after a sequence of generic_online_page() calls in the relevant callers. Signed-off-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Oscar Salvador <osalvador@suse.de> Reviewed-by: Wei Yang <richard.weiyang@linux.alibaba.com> Acked-by: Pankaj Gupta <pankaj.gupta.linux@gmail.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: "K. Y. Srinivasan" <kys@microsoft.com> Cc: Haiyang Zhang <haiyangz@microsoft.com> Cc: Stephen Hemminger <sthemmin@microsoft.com> Cc: Wei Liu <wei.liu@kernel.org> Cc: Matthew Wilcox <willy@infradead.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@kernel.org> Cc: Scott Cheloha <cheloha@linux.ibm.com> Link: https://lkml.kernel.org/r/20201005121534.15649-5-david@redhat.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
293ffa5ebb
commit
7fef431be9
1 changed files with 23 additions and 10 deletions
|
@ -275,7 +275,8 @@ bool pm_suspended_storage(void)
|
||||||
unsigned int pageblock_order __read_mostly;
|
unsigned int pageblock_order __read_mostly;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void __free_pages_ok(struct page *page, unsigned int order);
|
static void __free_pages_ok(struct page *page, unsigned int order,
|
||||||
|
fpi_t fpi_flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* results with 256, 32 in the lowmem_reserve sysctl:
|
* results with 256, 32 in the lowmem_reserve sysctl:
|
||||||
|
@ -687,7 +688,7 @@ out:
|
||||||
void free_compound_page(struct page *page)
|
void free_compound_page(struct page *page)
|
||||||
{
|
{
|
||||||
mem_cgroup_uncharge(page);
|
mem_cgroup_uncharge(page);
|
||||||
__free_pages_ok(page, compound_order(page));
|
__free_pages_ok(page, compound_order(page), FPI_NONE);
|
||||||
}
|
}
|
||||||
|
|
||||||
void prep_compound_page(struct page *page, unsigned int order)
|
void prep_compound_page(struct page *page, unsigned int order)
|
||||||
|
@ -1423,14 +1424,14 @@ static void free_pcppages_bulk(struct zone *zone, int count,
|
||||||
static void free_one_page(struct zone *zone,
|
static void free_one_page(struct zone *zone,
|
||||||
struct page *page, unsigned long pfn,
|
struct page *page, unsigned long pfn,
|
||||||
unsigned int order,
|
unsigned int order,
|
||||||
int migratetype)
|
int migratetype, fpi_t fpi_flags)
|
||||||
{
|
{
|
||||||
spin_lock(&zone->lock);
|
spin_lock(&zone->lock);
|
||||||
if (unlikely(has_isolate_pageblock(zone) ||
|
if (unlikely(has_isolate_pageblock(zone) ||
|
||||||
is_migrate_isolate(migratetype))) {
|
is_migrate_isolate(migratetype))) {
|
||||||
migratetype = get_pfnblock_migratetype(page, pfn);
|
migratetype = get_pfnblock_migratetype(page, pfn);
|
||||||
}
|
}
|
||||||
__free_one_page(page, pfn, zone, order, migratetype, FPI_NONE);
|
__free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
|
||||||
spin_unlock(&zone->lock);
|
spin_unlock(&zone->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1508,7 +1509,8 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __free_pages_ok(struct page *page, unsigned int order)
|
static void __free_pages_ok(struct page *page, unsigned int order,
|
||||||
|
fpi_t fpi_flags)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int migratetype;
|
int migratetype;
|
||||||
|
@ -1520,7 +1522,8 @@ static void __free_pages_ok(struct page *page, unsigned int order)
|
||||||
migratetype = get_pfnblock_migratetype(page, pfn);
|
migratetype = get_pfnblock_migratetype(page, pfn);
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
__count_vm_events(PGFREE, 1 << order);
|
__count_vm_events(PGFREE, 1 << order);
|
||||||
free_one_page(page_zone(page), page, pfn, order, migratetype);
|
free_one_page(page_zone(page), page, pfn, order, migratetype,
|
||||||
|
fpi_flags);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1530,6 +1533,11 @@ void __free_pages_core(struct page *page, unsigned int order)
|
||||||
struct page *p = page;
|
struct page *p = page;
|
||||||
unsigned int loop;
|
unsigned int loop;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When initializing the memmap, __init_single_page() sets the refcount
|
||||||
|
* of all pages to 1 ("allocated"/"not free"). We have to set the
|
||||||
|
* refcount of all involved pages to 0.
|
||||||
|
*/
|
||||||
prefetchw(p);
|
prefetchw(p);
|
||||||
for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
|
for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
|
||||||
prefetchw(p + 1);
|
prefetchw(p + 1);
|
||||||
|
@ -1540,8 +1548,12 @@ void __free_pages_core(struct page *page, unsigned int order)
|
||||||
set_page_count(p, 0);
|
set_page_count(p, 0);
|
||||||
|
|
||||||
atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
|
atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
|
||||||
set_page_refcounted(page);
|
|
||||||
__free_pages(page, order);
|
/*
|
||||||
|
* Bypass PCP and place fresh pages right to the tail, primarily
|
||||||
|
* relevant for memory onlining.
|
||||||
|
*/
|
||||||
|
__free_pages_ok(page, order, FPI_TO_TAIL);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NEED_MULTIPLE_NODES
|
#ifdef CONFIG_NEED_MULTIPLE_NODES
|
||||||
|
@ -3168,7 +3180,8 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn)
|
||||||
*/
|
*/
|
||||||
if (migratetype >= MIGRATE_PCPTYPES) {
|
if (migratetype >= MIGRATE_PCPTYPES) {
|
||||||
if (unlikely(is_migrate_isolate(migratetype))) {
|
if (unlikely(is_migrate_isolate(migratetype))) {
|
||||||
free_one_page(zone, page, pfn, 0, migratetype);
|
free_one_page(zone, page, pfn, 0, migratetype,
|
||||||
|
FPI_NONE);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
migratetype = MIGRATE_MOVABLE;
|
migratetype = MIGRATE_MOVABLE;
|
||||||
|
@ -4991,7 +5004,7 @@ static inline void free_the_page(struct page *page, unsigned int order)
|
||||||
if (order == 0) /* Via pcp? */
|
if (order == 0) /* Via pcp? */
|
||||||
free_unref_page(page);
|
free_unref_page(page);
|
||||||
else
|
else
|
||||||
__free_pages_ok(page, order);
|
__free_pages_ok(page, order, FPI_NONE);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __free_pages(struct page *page, unsigned int order)
|
void __free_pages(struct page *page, unsigned int order)
|
||||||
|
|
Loading…
Add table
Reference in a new issue