mm/memory_hotplug: introduce add_pages
There are new users of memory hotplug emerging. Some of them require different subset of arch_add_memory. There are some which only require allocation of struct pages without mapping those pages to the kernel address space. We currently have __add_pages for that purpose. But this is rather lowlevel and not very suitable for the code outside of the memory hotplug. E.g. x86_64 wants to update max_pfn which should be done by the caller. Introduce add_pages() which should care about those details if they are needed. Each architecture should define its implementation and select CONFIG_ARCH_HAS_ADD_PAGES. All others use the currently existing __add_pages. Link: http://lkml.kernel.org/r/20170817000548.32038-7-jglisse@redhat.com Signed-off-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Acked-by: Balbir Singh <bsingharora@gmail.com> Cc: Aneesh Kumar <aneesh.kumar@linux.vnet.ibm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Dan Williams <dan.j.williams@intel.com> Cc: David Nellans <dnellans@nvidia.com> Cc: Evgeny Baskakov <ebaskakov@nvidia.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Mark Hairgrove <mhairgrove@nvidia.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Cc: Sherry Cheung <SCheung@nvidia.com> Cc: Subhash Gutti <sgutti@nvidia.com> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Bob Liu <liubo95@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
74eee180b9
commit
3072e413e3
3 changed files with 30 additions and 7 deletions
|
@ -2323,6 +2323,10 @@ source "kernel/livepatch/Kconfig"
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
||||||
|
config ARCH_HAS_ADD_PAGES
|
||||||
|
def_bool y
|
||||||
|
depends on X86_64 && ARCH_ENABLE_MEMORY_HOTPLUG
|
||||||
|
|
||||||
config ARCH_ENABLE_MEMORY_HOTPLUG
|
config ARCH_ENABLE_MEMORY_HOTPLUG
|
||||||
def_bool y
|
def_bool y
|
||||||
depends on X86_64 || (X86_32 && HIGHMEM)
|
depends on X86_64 || (X86_32 && HIGHMEM)
|
||||||
|
|
|
@ -761,7 +761,7 @@ void __init paging_init(void)
|
||||||
* After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
|
* After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
|
||||||
* updating.
|
* updating.
|
||||||
*/
|
*/
|
||||||
static void update_end_of_memory_vars(u64 start, u64 size)
|
static void update_end_of_memory_vars(u64 start, u64 size)
|
||||||
{
|
{
|
||||||
unsigned long end_pfn = PFN_UP(start + size);
|
unsigned long end_pfn = PFN_UP(start + size);
|
||||||
|
|
||||||
|
@ -772,22 +772,30 @@ static void update_end_of_memory_vars(u64 start, u64 size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
|
int add_pages(int nid, unsigned long start_pfn,
|
||||||
|
unsigned long nr_pages, bool want_memblock)
|
||||||
{
|
{
|
||||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
|
||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
init_memory_mapping(start, start + size);
|
|
||||||
|
|
||||||
ret = __add_pages(nid, start_pfn, nr_pages, want_memblock);
|
ret = __add_pages(nid, start_pfn, nr_pages, want_memblock);
|
||||||
WARN_ON_ONCE(ret);
|
WARN_ON_ONCE(ret);
|
||||||
|
|
||||||
/* update max_pfn, max_low_pfn and high_memory */
|
/* update max_pfn, max_low_pfn and high_memory */
|
||||||
update_end_of_memory_vars(start, size);
|
update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
|
||||||
|
nr_pages << PAGE_SHIFT);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
|
||||||
|
{
|
||||||
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||||
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
init_memory_mapping(start, start + size);
|
||||||
|
|
||||||
|
return add_pages(nid, start_pfn, nr_pages, want_memblock);
|
||||||
|
}
|
||||||
EXPORT_SYMBOL_GPL(arch_add_memory);
|
EXPORT_SYMBOL_GPL(arch_add_memory);
|
||||||
|
|
||||||
#define PAGE_INUSE 0xFD
|
#define PAGE_INUSE 0xFD
|
||||||
|
|
|
@ -133,6 +133,17 @@ extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
|
||||||
extern int __add_pages(int nid, unsigned long start_pfn,
|
extern int __add_pages(int nid, unsigned long start_pfn,
|
||||||
unsigned long nr_pages, bool want_memblock);
|
unsigned long nr_pages, bool want_memblock);
|
||||||
|
|
||||||
|
#ifndef CONFIG_ARCH_HAS_ADD_PAGES
|
||||||
|
static inline int add_pages(int nid, unsigned long start_pfn,
|
||||||
|
unsigned long nr_pages, bool want_memblock)
|
||||||
|
{
|
||||||
|
return __add_pages(nid, start_pfn, nr_pages, want_memblock);
|
||||||
|
}
|
||||||
|
#else /* ARCH_HAS_ADD_PAGES */
|
||||||
|
int add_pages(int nid, unsigned long start_pfn,
|
||||||
|
unsigned long nr_pages, bool want_memblock);
|
||||||
|
#endif /* ARCH_HAS_ADD_PAGES */
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
extern int memory_add_physaddr_to_nid(u64 start);
|
extern int memory_add_physaddr_to_nid(u64 start);
|
||||||
#else
|
#else
|
||||||
|
|
Loading…
Add table
Reference in a new issue