mm: alloc_pages_bulk: rename API
The previous commit removed the page_list argument from alloc_pages_bulk_noprof() along with the alloc_pages_bulk_list() function. Now that only the *_array() flavour of the API remains, we can do the following renaming (along with the _noprof() ones): alloc_pages_bulk_array -> alloc_pages_bulk alloc_pages_bulk_array_mempolicy -> alloc_pages_bulk_mempolicy alloc_pages_bulk_array_node -> alloc_pages_bulk_node Link: https://lkml.kernel.org/r/275a3bbc0be20fbe9002297d60045e67ab3d4ada.1734991165.git.luizcap@redhat.com Signed-off-by: Luiz Capitulino <luizcap@redhat.com> Acked-by: David Hildenbrand <david@redhat.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Yunsheng Lin <linyunsheng@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
c8b979530f
commit
6bf9b5b40a
17 changed files with 45 additions and 47 deletions
|
@ -624,10 +624,10 @@ static int alloc_private_pages(struct hmm_buffer_object *bo)
|
|||
const gfp_t gfp = __GFP_NOWARN | __GFP_RECLAIM | __GFP_FS;
|
||||
int ret;
|
||||
|
||||
ret = alloc_pages_bulk_array(gfp, bo->pgnr, bo->pages);
|
||||
ret = alloc_pages_bulk(gfp, bo->pgnr, bo->pages);
|
||||
if (ret != bo->pgnr) {
|
||||
free_pages_bulk_array(ret, bo->pages);
|
||||
dev_err(atomisp_dev, "alloc_pages_bulk_array() failed\n");
|
||||
dev_err(atomisp_dev, "alloc_pages_bulk() failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
|
@ -408,7 +408,7 @@ void mlx5vf_free_data_buffer(struct mlx5_vhca_data_buffer *buf)
|
|||
buf->dma_dir, 0);
|
||||
}
|
||||
|
||||
/* Undo alloc_pages_bulk_array() */
|
||||
/* Undo alloc_pages_bulk() */
|
||||
for_each_sgtable_page(&buf->table.sgt, &sg_iter, 0)
|
||||
__free_page(sg_page_iter_page(&sg_iter));
|
||||
sg_free_append_table(&buf->table);
|
||||
|
@ -431,8 +431,8 @@ static int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf,
|
|||
return -ENOMEM;
|
||||
|
||||
do {
|
||||
filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT, to_fill,
|
||||
page_list);
|
||||
filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT, to_fill,
|
||||
page_list);
|
||||
if (!filled) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
|
@ -1342,7 +1342,7 @@ static void free_recv_pages(struct mlx5_vhca_recv_buf *recv_buf)
|
|||
{
|
||||
int i;
|
||||
|
||||
/* Undo alloc_pages_bulk_array() */
|
||||
/* Undo alloc_pages_bulk() */
|
||||
for (i = 0; i < recv_buf->npages; i++)
|
||||
__free_page(recv_buf->page_list[i]);
|
||||
|
||||
|
@ -1361,9 +1361,9 @@ static int alloc_recv_pages(struct mlx5_vhca_recv_buf *recv_buf,
|
|||
return -ENOMEM;
|
||||
|
||||
for (;;) {
|
||||
filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT,
|
||||
npages - done,
|
||||
recv_buf->page_list + done);
|
||||
filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT,
|
||||
npages - done,
|
||||
recv_buf->page_list + done);
|
||||
if (!filled)
|
||||
goto err;
|
||||
|
||||
|
|
|
@ -77,8 +77,8 @@ static int virtiovf_add_migration_pages(struct virtiovf_data_buffer *buf,
|
|||
return -ENOMEM;
|
||||
|
||||
do {
|
||||
filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT, to_fill,
|
||||
page_list);
|
||||
filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT, to_fill,
|
||||
page_list);
|
||||
if (!filled) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
|
@ -112,7 +112,7 @@ static void virtiovf_free_data_buffer(struct virtiovf_data_buffer *buf)
|
|||
{
|
||||
struct sg_page_iter sg_iter;
|
||||
|
||||
/* Undo alloc_pages_bulk_array() */
|
||||
/* Undo alloc_pages_bulk() */
|
||||
for_each_sgtable_page(&buf->table.sgt, &sg_iter, 0)
|
||||
__free_page(sg_page_iter_page(&sg_iter));
|
||||
sg_free_append_table(&buf->table);
|
||||
|
|
|
@ -632,7 +632,7 @@ int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
|
|||
for (allocated = 0; allocated < nr_pages;) {
|
||||
unsigned int last = allocated;
|
||||
|
||||
allocated = alloc_pages_bulk_array(gfp, nr_pages, page_array);
|
||||
allocated = alloc_pages_bulk(gfp, nr_pages, page_array);
|
||||
if (unlikely(allocated == last)) {
|
||||
/* No progress, fail and do cleanup. */
|
||||
for (int i = 0; i < allocated; i++) {
|
||||
|
|
|
@ -87,8 +87,8 @@ int z_erofs_gbuf_growsize(unsigned int nrpages)
|
|||
tmp_pages[j] = gbuf->pages[j];
|
||||
do {
|
||||
last = j;
|
||||
j = alloc_pages_bulk_array(GFP_KERNEL, nrpages,
|
||||
tmp_pages);
|
||||
j = alloc_pages_bulk(GFP_KERNEL, nrpages,
|
||||
tmp_pages);
|
||||
if (last == j)
|
||||
goto out;
|
||||
} while (j != nrpages);
|
||||
|
|
|
@ -342,7 +342,7 @@ ssize_t copy_splice_read(struct file *in, loff_t *ppos,
|
|||
return -ENOMEM;
|
||||
|
||||
pages = (struct page **)(bv + npages);
|
||||
npages = alloc_pages_bulk_array(GFP_USER, npages, pages);
|
||||
npages = alloc_pages_bulk(GFP_USER, npages, pages);
|
||||
if (!npages) {
|
||||
kfree(bv);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -395,8 +395,8 @@ xfs_buf_alloc_pages(
|
|||
for (;;) {
|
||||
long last = filled;
|
||||
|
||||
filled = alloc_pages_bulk_array(gfp_mask, bp->b_page_count,
|
||||
bp->b_pages);
|
||||
filled = alloc_pages_bulk(gfp_mask, bp->b_page_count,
|
||||
bp->b_pages);
|
||||
if (filled == bp->b_page_count) {
|
||||
XFS_STATS_INC(bp->b_mount, xb_page_found);
|
||||
break;
|
||||
|
|
|
@ -215,18 +215,18 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
|
|||
struct page **page_array);
|
||||
#define __alloc_pages_bulk(...) alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__))
|
||||
|
||||
unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
|
||||
unsigned long alloc_pages_bulk_mempolicy_noprof(gfp_t gfp,
|
||||
unsigned long nr_pages,
|
||||
struct page **page_array);
|
||||
#define alloc_pages_bulk_array_mempolicy(...) \
|
||||
alloc_hooks(alloc_pages_bulk_array_mempolicy_noprof(__VA_ARGS__))
|
||||
#define alloc_pages_bulk_mempolicy(...) \
|
||||
alloc_hooks(alloc_pages_bulk_mempolicy_noprof(__VA_ARGS__))
|
||||
|
||||
/* Bulk allocate order-0 pages */
|
||||
#define alloc_pages_bulk_array(_gfp, _nr_pages, _page_array) \
|
||||
#define alloc_pages_bulk(_gfp, _nr_pages, _page_array) \
|
||||
__alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _page_array)
|
||||
|
||||
static inline unsigned long
|
||||
alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
|
||||
alloc_pages_bulk_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
|
||||
struct page **page_array)
|
||||
{
|
||||
if (nid == NUMA_NO_NODE)
|
||||
|
@ -235,8 +235,8 @@ alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
|
|||
return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array);
|
||||
}
|
||||
|
||||
#define alloc_pages_bulk_array_node(...) \
|
||||
alloc_hooks(alloc_pages_bulk_array_node_noprof(__VA_ARGS__))
|
||||
#define alloc_pages_bulk_node(...) \
|
||||
alloc_hooks(alloc_pages_bulk_node_noprof(__VA_ARGS__))
|
||||
|
||||
static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
|
||||
{
|
||||
|
|
|
@ -443,7 +443,7 @@ static long arena_alloc_pages(struct bpf_arena *arena, long uaddr, long page_cnt
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* zeroing is needed, since alloc_pages_bulk_array() only fills in non-zero entries */
|
||||
/* zeroing is needed, since alloc_pages_bulk() only fills in non-zero entries */
|
||||
pages = kvcalloc(page_cnt, sizeof(struct page *), GFP_KERNEL);
|
||||
if (!pages)
|
||||
return 0;
|
||||
|
|
|
@ -420,8 +420,8 @@ static int vm_module_tags_populate(void)
|
|||
unsigned long nr;
|
||||
|
||||
more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT;
|
||||
nr = alloc_pages_bulk_array_node(GFP_KERNEL | __GFP_NOWARN,
|
||||
NUMA_NO_NODE, more_pages, next_page);
|
||||
nr = alloc_pages_bulk_node(GFP_KERNEL | __GFP_NOWARN,
|
||||
NUMA_NO_NODE, more_pages, next_page);
|
||||
if (nr < more_pages ||
|
||||
vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL,
|
||||
next_page, PAGE_SHIFT) < 0) {
|
||||
|
|
|
@ -57,7 +57,7 @@ static void *__init iov_kunit_create_buffer(struct kunit *test,
|
|||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pages);
|
||||
*ppages = pages;
|
||||
|
||||
got = alloc_pages_bulk_array(GFP_KERNEL, npages, pages);
|
||||
got = alloc_pages_bulk(GFP_KERNEL, npages, pages);
|
||||
if (got != npages) {
|
||||
release_pages(pages, got);
|
||||
KUNIT_ASSERT_EQ(test, got, npages);
|
||||
|
|
|
@ -373,7 +373,7 @@ vm_map_ram_test(void)
|
|||
if (!pages)
|
||||
return -1;
|
||||
|
||||
nr_allocated = alloc_pages_bulk_array(GFP_KERNEL, map_nr_pages, pages);
|
||||
nr_allocated = alloc_pages_bulk(GFP_KERNEL, map_nr_pages, pages);
|
||||
if (nr_allocated != map_nr_pages)
|
||||
goto cleanup;
|
||||
|
||||
|
|
|
@ -2372,7 +2372,7 @@ struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
|
|||
}
|
||||
EXPORT_SYMBOL(folio_alloc_noprof);
|
||||
|
||||
static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
|
||||
static unsigned long alloc_pages_bulk_interleave(gfp_t gfp,
|
||||
struct mempolicy *pol, unsigned long nr_pages,
|
||||
struct page **page_array)
|
||||
{
|
||||
|
@ -2407,7 +2407,7 @@ static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
|
|||
return total_allocated;
|
||||
}
|
||||
|
||||
static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
|
||||
static unsigned long alloc_pages_bulk_weighted_interleave(gfp_t gfp,
|
||||
struct mempolicy *pol, unsigned long nr_pages,
|
||||
struct page **page_array)
|
||||
{
|
||||
|
@ -2522,7 +2522,7 @@ static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
|
|||
return total_allocated;
|
||||
}
|
||||
|
||||
static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
|
||||
static unsigned long alloc_pages_bulk_preferred_many(gfp_t gfp, int nid,
|
||||
struct mempolicy *pol, unsigned long nr_pages,
|
||||
struct page **page_array)
|
||||
{
|
||||
|
@ -2548,7 +2548,7 @@ static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
|
|||
* It can accelerate memory allocation especially interleaving
|
||||
* allocate memory.
|
||||
*/
|
||||
unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
|
||||
unsigned long alloc_pages_bulk_mempolicy_noprof(gfp_t gfp,
|
||||
unsigned long nr_pages, struct page **page_array)
|
||||
{
|
||||
struct mempolicy *pol = &default_policy;
|
||||
|
@ -2559,15 +2559,15 @@ unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
|
|||
pol = get_task_policy(current);
|
||||
|
||||
if (pol->mode == MPOL_INTERLEAVE)
|
||||
return alloc_pages_bulk_array_interleave(gfp, pol,
|
||||
return alloc_pages_bulk_interleave(gfp, pol,
|
||||
nr_pages, page_array);
|
||||
|
||||
if (pol->mode == MPOL_WEIGHTED_INTERLEAVE)
|
||||
return alloc_pages_bulk_array_weighted_interleave(
|
||||
return alloc_pages_bulk_weighted_interleave(
|
||||
gfp, pol, nr_pages, page_array);
|
||||
|
||||
if (pol->mode == MPOL_PREFERRED_MANY)
|
||||
return alloc_pages_bulk_array_preferred_many(gfp,
|
||||
return alloc_pages_bulk_preferred_many(gfp,
|
||||
numa_node_id(), pol, nr_pages, page_array);
|
||||
|
||||
nid = numa_node_id();
|
||||
|
|
|
@ -3562,11 +3562,11 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
|
|||
* but mempolicy wants to alloc memory by interleaving.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
|
||||
nr = alloc_pages_bulk_array_mempolicy_noprof(gfp,
|
||||
nr = alloc_pages_bulk_mempolicy_noprof(gfp,
|
||||
nr_pages_request,
|
||||
pages + nr_allocated);
|
||||
else
|
||||
nr = alloc_pages_bulk_array_node_noprof(gfp, nid,
|
||||
nr = alloc_pages_bulk_node_noprof(gfp, nid,
|
||||
nr_pages_request,
|
||||
pages + nr_allocated);
|
||||
|
||||
|
|
|
@ -532,12 +532,11 @@ static noinline netmem_ref __page_pool_alloc_pages_slow(struct page_pool *pool,
|
|||
if (unlikely(pool->alloc.count > 0))
|
||||
return pool->alloc.cache[--pool->alloc.count];
|
||||
|
||||
/* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
|
||||
/* Mark empty alloc.cache slots "empty" for alloc_pages_bulk */
|
||||
memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
|
||||
|
||||
nr_pages = alloc_pages_bulk_array_node(gfp,
|
||||
pool->p.nid, bulk,
|
||||
(struct page **)pool->alloc.cache);
|
||||
nr_pages = alloc_pages_bulk_node(gfp, pool->p.nid, bulk,
|
||||
(struct page **)pool->alloc.cache);
|
||||
if (unlikely(!nr_pages))
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -651,8 +651,8 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
|
|||
if (pages > RPCSVC_MAXPAGES)
|
||||
pages = RPCSVC_MAXPAGES;
|
||||
|
||||
ret = alloc_pages_bulk_array_node(GFP_KERNEL, node, pages,
|
||||
rqstp->rq_pages);
|
||||
ret = alloc_pages_bulk_node(GFP_KERNEL, node, pages,
|
||||
rqstp->rq_pages);
|
||||
return ret == pages;
|
||||
}
|
||||
|
||||
|
|
|
@ -671,8 +671,7 @@ static bool svc_alloc_arg(struct svc_rqst *rqstp)
|
|||
}
|
||||
|
||||
for (filled = 0; filled < pages; filled = ret) {
|
||||
ret = alloc_pages_bulk_array(GFP_KERNEL, pages,
|
||||
rqstp->rq_pages);
|
||||
ret = alloc_pages_bulk(GFP_KERNEL, pages, rqstp->rq_pages);
|
||||
if (ret > filled)
|
||||
/* Made progress, don't sleep yet */
|
||||
continue;
|
||||
|
|
Loading…
Add table
Reference in a new issue