diff --git a/.mailmap b/.mailmap index 1c007dec12d8..ae0adc499f4a 100644 --- a/.mailmap +++ b/.mailmap @@ -150,7 +150,9 @@ Bryan Tan Cai Huoqing Can Guo Carl Huang -Carlos Bilbao +Carlos Bilbao +Carlos Bilbao +Carlos Bilbao Changbin Du Changbin Du Chao Yu @@ -167,6 +169,7 @@ Christian Brauner Christian Brauner Christian Marangi Christophe Ricard +Christopher Obbard Christoph Hellwig Chuck Lever Chuck Lever @@ -263,6 +266,7 @@ Guo Ren Guru Das Srinagesh Gustavo Padovan Gustavo Padovan +Hamza Mahfooz Hanjun Guo Hans Verkuil Hans Verkuil @@ -763,6 +767,7 @@ Wolfram Sang Yakir Yang Yanteng Si Ying Huang +Yosry Ahmed Yusuke Goda Zack Rusin Zhu Yanjun diff --git a/Documentation/translations/sp_SP/index.rst b/Documentation/translations/sp_SP/index.rst index aae7018b0d1a..2b50283e1608 100644 --- a/Documentation/translations/sp_SP/index.rst +++ b/Documentation/translations/sp_SP/index.rst @@ -7,7 +7,7 @@ Traducción al español \kerneldocCJKoff -:maintainer: Carlos Bilbao +:maintainer: Carlos Bilbao .. _sp_disclaimer: diff --git a/MAINTAINERS b/MAINTAINERS index 857b3014f853..896a307fa065 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1090,7 +1090,7 @@ F: drivers/video/fbdev/geode/ AMD HSMP DRIVER M: Naveen Krishna Chatradhi -R: Carlos Bilbao +R: Carlos Bilbao L: platform-driver-x86@vger.kernel.org S: Maintained F: Documentation/arch/x86/amd_hsmp.rst @@ -5857,7 +5857,7 @@ F: drivers/usb/atm/cxacru.c CONFIDENTIAL COMPUTING THREAT MODEL FOR X86 VIRTUALIZATION (SNP/TDX) M: Elena Reshetova -M: Carlos Bilbao +M: Carlos Bilbao S: Maintained F: Documentation/security/snp-tdx-threat-model.rst @@ -11331,7 +11331,7 @@ S: Orphan F: drivers/video/fbdev/imsttfb.c INDEX OF FURTHER KERNEL DOCUMENTATION -M: Carlos Bilbao +M: Carlos Bilbao S: Maintained F: Documentation/process/kernel-docs.rst @@ -22215,7 +22215,7 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/ F: drivers/media/dvb-frontends/sp2* SPANISH DOCUMENTATION -M: Carlos Bilbao +M: Carlos Bilbao R: Avadhut Naik S: Maintained F: Documentation/translations/sp_SP/ @@ -25739,11 +25739,13 @@ F: arch/x86/entry/vdso/ XARRAY M: Matthew Wilcox L: linux-fsdevel@vger.kernel.org +L: linux-mm@kvack.org S: Supported F: Documentation/core-api/xarray.rst F: include/linux/idr.h F: include/linux/xarray.h F: lib/idr.c +F: lib/test_xarray.c F: lib/xarray.c F: tools/testing/radix-tree @@ -26223,7 +26225,7 @@ K: zstd ZSWAP COMPRESSED SWAP CACHING M: Johannes Weiner -M: Yosry Ahmed +M: Yosry Ahmed M: Nhat Pham R: Chengming Zhou L: linux-mm@kvack.org diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 8acfa66e1095..dbf2ea561c85 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -626,6 +626,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 35e9a0872304..b0fd199cc0a4 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -583,6 +583,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index 32891ddd3cc5..bb5b2d3b6c10 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -603,6 +603,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index ca276f0db3dd..8315a13bab73 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -575,6 +575,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index e83f14fe1a4f..350370657e5f 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -585,6 +585,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index 6b58be24da79..f942b4755702 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -602,6 +602,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 0e8d24f82565..b1eaad02efab 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -689,6 +689,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index 24a7608c13ac..6309a4442bb3 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -575,6 +575,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index c415f75821f3..3feb0731f814 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -576,6 +576,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index 2c715a8ff551..ea04b1b0da7d 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -592,6 +592,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 15ff37fcccbf..f52d9af92153 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -572,6 +572,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 40a44bf9f48d..f348447824da 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -573,6 +573,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index e9c46b59ebbc..465eb96c755e 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -448,6 +448,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c index 59fffe34c9d0..00ac0d7bb8c9 100644 --- a/drivers/acpi/numa/srat.c +++ b/drivers/acpi/numa/srat.c @@ -95,9 +95,13 @@ int __init fix_pxm_node_maps(int max_nid) int i, j, index = -1, count = 0; nodemask_t nodes_to_enable; - if (numa_off || srat_disabled()) + if (numa_off) return -1; + /* no or incomplete node/PXM mapping set, nothing to do */ + if (srat_disabled()) + return 0; + /* find fake nodes PXM mapping */ for (i = 0; i < MAX_NUMNODES; i++) { if (node_to_pxm_map[i] != PXM_INVAL) { @@ -117,6 +121,11 @@ int __init fix_pxm_node_maps(int max_nid) } } } + if (index == -1) { + pr_debug("No node/PXM mapping has been set\n"); + /* nothing more to be done */ + return 0; + } if (WARN(index != max_nid, "%d max nid when expected %d\n", index, max_nid)) return -1; diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index e8015d24a82c..6613b8fcceb0 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -1186,7 +1186,7 @@ int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, if (size) { if (phys && blkphy << blkbits == phys + size) { /* The current extent goes on */ - size += n << blkbits; + size += (u64)n << blkbits; } else { /* Terminate the current extent */ ret = fiemap_fill_next_extent( @@ -1199,14 +1199,14 @@ int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, flags = FIEMAP_EXTENT_MERGED; logical = blkoff << blkbits; phys = blkphy << blkbits; - size = n << blkbits; + size = (u64)n << blkbits; } } else { /* Start a new extent */ flags = FIEMAP_EXTENT_MERGED; logical = blkoff << blkbits; phys = blkphy << blkbits; - size = n << blkbits; + size = (u64)n << blkbits; } blkoff += n; } diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index e0b91dbaa0ac..8bb5022f3082 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -2285,7 +2285,7 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di, mlog(ML_ERROR, "found superblock with incorrect block " "size bits: found %u, should be 9, 10, 11, or 12\n", blksz_bits); - } else if ((1 << le32_to_cpu(blksz_bits)) != blksz) { + } else if ((1 << blksz_bits) != blksz) { mlog(ML_ERROR, "found superblock with incorrect block " "size: found %u, should be %u\n", 1 << blksz_bits, blksz); } else if (le16_to_cpu(di->id2.i_super.s_major_rev_level) != diff --git a/include/linux/swap.h b/include/linux/swap.h index a5f475335aea..b13b72645db3 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -222,6 +222,7 @@ enum { }; #define SWAP_CLUSTER_MAX 32UL +#define SWAP_CLUSTER_MAX_SKIPPED (SWAP_CLUSTER_MAX << 10) #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX /* Bit flag in swap_map */ diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index e421a5f2ec7d..2ca797cbe465 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -28,6 +28,7 @@ #include #include #include +#include /* check_stable_address_space */ #include @@ -1260,6 +1261,9 @@ register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new) * returns NULL in find_active_uprobe_rcu(). */ mmap_write_lock(mm); + if (check_stable_address_space(mm)) + goto unlock; + vma = find_vma(mm, info->vaddr); if (!vma || !valid_vma(vma, is_register) || file_inode(vma->vm_file) != uprobe->inode) diff --git a/kernel/fork.c b/kernel/fork.c index cba5ede2c639..735405a9c5f3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -760,7 +760,8 @@ loop_out: mt_set_in_rcu(vmi.mas.tree); ksm_fork(mm, oldmm); khugepaged_fork(mm, oldmm); - } else if (mpnt) { + } else { + /* * The entire maple tree has already been duplicated. If the * mmap duplication fails, mark the failure point with @@ -768,8 +769,18 @@ loop_out: * stop releasing VMAs that have not been duplicated after this * point. */ - mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1); - mas_store(&vmi.mas, XA_ZERO_ENTRY); + if (mpnt) { + mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1); + mas_store(&vmi.mas, XA_ZERO_ENTRY); + /* Avoid OOM iterating a broken tree */ + set_bit(MMF_OOM_SKIP, &mm->flags); + } + /* + * The mm_struct is going to exit, but the locks will be dropped + * first. Set the mm_struct as unstable is advisable as it is + * not fully initialised. + */ + set_bit(MMF_UNSTABLE, &mm->flags); } out: mmap_write_unlock(mm); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 775966cf6114..1af972a92d06 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2456,22 +2456,8 @@ config TEST_BITMAP config TEST_UUID tristate "Test functions located in the uuid module at runtime" -config XARRAY_KUNIT - tristate "KUnit test XArray code at runtime" if !KUNIT_ALL_TESTS - depends on KUNIT - default KUNIT_ALL_TESTS - help - Enable this option to test the Xarray code at boot. - - KUnit tests run during boot and output the results to the debug log - in TAP format (http://testanything.org/). Only useful for kernel devs - running the KUnit test harness, and not intended for inclusion into a - production build. - - For more information on KUnit and unit tests in general please refer - to the KUnit documentation in Documentation/dev-tools/kunit/. - - If unsure, say N. +config TEST_XARRAY + tristate "Test the XArray code at runtime" config TEST_MAPLE_TREE tristate "Test the Maple Tree code at runtime or module load" diff --git a/lib/Makefile b/lib/Makefile index f1c6e9d76a7c..d5cfc7afbbb8 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -94,6 +94,7 @@ GCOV_PROFILE_test_bitmap.o := n endif obj-$(CONFIG_TEST_UUID) += test_uuid.o +obj-$(CONFIG_TEST_XARRAY) += test_xarray.o obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o obj-$(CONFIG_TEST_PARMAN) += test_parman.o obj-$(CONFIG_TEST_KMOD) += test_kmod.o @@ -372,7 +373,6 @@ CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN) obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o obj-$(CONFIG_CHECKSUM_KUNIT) += checksum_kunit.o obj-$(CONFIG_UTIL_MACROS_KUNIT) += util_macros_kunit.o -obj-$(CONFIG_XARRAY_KUNIT) += test_xarray.o obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o obj-$(CONFIG_HASHTABLE_KUNIT_TEST) += hashtable_test.o obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o diff --git a/lib/test_xarray.c b/lib/test_xarray.c index eab5971d0a48..6932a26f4927 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -6,10 +6,11 @@ * Author: Matthew Wilcox */ -#include - -#include #include +#include + +static unsigned int tests_run; +static unsigned int tests_passed; static const unsigned int order_limit = IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1; @@ -19,12 +20,15 @@ static const unsigned int order_limit = void xa_dump(const struct xarray *xa) { } # endif #undef XA_BUG_ON -#define XA_BUG_ON(xa, x) do { \ - if (x) { \ - KUNIT_FAIL(test, #x); \ - xa_dump(xa); \ - dump_stack(); \ - } \ +#define XA_BUG_ON(xa, x) do { \ + tests_run++; \ + if (x) { \ + printk("BUG at %s:%d\n", __func__, __LINE__); \ + xa_dump(xa); \ + dump_stack(); \ + } else { \ + tests_passed++; \ + } \ } while (0) #endif @@ -38,13 +42,13 @@ static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) return xa_store(xa, index, xa_mk_index(index), gfp); } -static void xa_insert_index(struct kunit *test, struct xarray *xa, unsigned long index) +static void xa_insert_index(struct xarray *xa, unsigned long index) { XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index), GFP_KERNEL) != 0); } -static void xa_alloc_index(struct kunit *test, struct xarray *xa, unsigned long index, gfp_t gfp) +static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) { u32 id; @@ -53,7 +57,7 @@ static void xa_alloc_index(struct kunit *test, struct xarray *xa, unsigned long XA_BUG_ON(xa, id != index); } -static void xa_erase_index(struct kunit *test, struct xarray *xa, unsigned long index) +static void xa_erase_index(struct xarray *xa, unsigned long index) { XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index)); XA_BUG_ON(xa, xa_load(xa, index) != NULL); @@ -79,15 +83,8 @@ static void *xa_store_order(struct xarray *xa, unsigned long index, return curr; } -static inline struct xarray *xa_param(struct kunit *test) +static noinline void check_xa_err(struct xarray *xa) { - return *(struct xarray **)test->param_value; -} - -static noinline void check_xa_err(struct kunit *test) -{ - struct xarray *xa = xa_param(test); - XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0); XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0); #ifndef __KERNEL__ @@ -102,10 +99,8 @@ static noinline void check_xa_err(struct kunit *test) // XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL); } -static noinline void check_xas_retry(struct kunit *test) +static noinline void check_xas_retry(struct xarray *xa) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0); void *entry; @@ -114,7 +109,7 @@ static noinline void check_xas_retry(struct kunit *test) rcu_read_lock(); XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); - xa_erase_index(test, xa, 1); + xa_erase_index(xa, 1); XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); XA_BUG_ON(xa, xas_retry(&xas, NULL)); XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); @@ -145,14 +140,12 @@ static noinline void check_xas_retry(struct kunit *test) } xas_unlock(&xas); - xa_erase_index(test, xa, 0); - xa_erase_index(test, xa, 1); + xa_erase_index(xa, 0); + xa_erase_index(xa, 1); } -static noinline void check_xa_load(struct kunit *test) +static noinline void check_xa_load(struct xarray *xa) { - struct xarray *xa = xa_param(test); - unsigned long i, j; for (i = 0; i < 1024; i++) { @@ -174,15 +167,13 @@ static noinline void check_xa_load(struct kunit *test) else XA_BUG_ON(xa, entry); } - xa_erase_index(test, xa, i); + xa_erase_index(xa, i); } XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_xa_mark_1(struct kunit *test, unsigned long index) +static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) { - struct xarray *xa = xa_param(test); - unsigned int order; unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1; @@ -202,7 +193,7 @@ static noinline void check_xa_mark_1(struct kunit *test, unsigned long index) XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1)); /* Storing NULL clears marks, and they can't be set again */ - xa_erase_index(test, xa, index); + xa_erase_index(xa, index); XA_BUG_ON(xa, !xa_empty(xa)); XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); xa_set_mark(xa, index, XA_MARK_0); @@ -253,17 +244,15 @@ static noinline void check_xa_mark_1(struct kunit *test, unsigned long index) XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0)); XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1)); XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2)); - xa_erase_index(test, xa, index); - xa_erase_index(test, xa, next); + xa_erase_index(xa, index); + xa_erase_index(xa, next); XA_BUG_ON(xa, !xa_empty(xa)); } XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_xa_mark_2(struct kunit *test) +static noinline void check_xa_mark_2(struct xarray *xa) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0); unsigned long index; unsigned int count = 0; @@ -300,11 +289,9 @@ static noinline void check_xa_mark_2(struct kunit *test) xa_destroy(xa); } -static noinline void check_xa_mark_3(struct kunit *test) +static noinline void check_xa_mark_3(struct xarray *xa) { #ifdef CONFIG_XARRAY_MULTI - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0x41); void *entry; int count = 0; @@ -323,21 +310,19 @@ static noinline void check_xa_mark_3(struct kunit *test) #endif } -static noinline void check_xa_mark(struct kunit *test) +static noinline void check_xa_mark(struct xarray *xa) { unsigned long index; for (index = 0; index < 16384; index += 4) - check_xa_mark_1(test, index); + check_xa_mark_1(xa, index); - check_xa_mark_2(test); - check_xa_mark_3(test); + check_xa_mark_2(xa); + check_xa_mark_3(xa); } -static noinline void check_xa_shrink(struct kunit *test) +static noinline void check_xa_shrink(struct xarray *xa) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 1); struct xa_node *node; unsigned int order; @@ -362,7 +347,7 @@ static noinline void check_xa_shrink(struct kunit *test) XA_BUG_ON(xa, xas_load(&xas) != NULL); xas_unlock(&xas); XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); - xa_erase_index(test, xa, 0); + xa_erase_index(xa, 0); XA_BUG_ON(xa, !xa_empty(xa)); for (order = 0; order < max_order; order++) { @@ -379,49 +364,45 @@ static noinline void check_xa_shrink(struct kunit *test) XA_BUG_ON(xa, xa_head(xa) == node); rcu_read_unlock(); XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); - xa_erase_index(test, xa, ULONG_MAX); + xa_erase_index(xa, ULONG_MAX); XA_BUG_ON(xa, xa->xa_head != node); - xa_erase_index(test, xa, 0); + xa_erase_index(xa, 0); } } -static noinline void check_insert(struct kunit *test) +static noinline void check_insert(struct xarray *xa) { - struct xarray *xa = xa_param(test); - unsigned long i; for (i = 0; i < 1024; i++) { - xa_insert_index(test, xa, i); + xa_insert_index(xa, i); XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL); XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL); - xa_erase_index(test, xa, i); + xa_erase_index(xa, i); } for (i = 10; i < BITS_PER_LONG; i++) { - xa_insert_index(test, xa, 1UL << i); + xa_insert_index(xa, 1UL << i); XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL); XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL); - xa_erase_index(test, xa, 1UL << i); + xa_erase_index(xa, 1UL << i); - xa_insert_index(test, xa, (1UL << i) - 1); + xa_insert_index(xa, (1UL << i) - 1); XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL); XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL); - xa_erase_index(test, xa, (1UL << i) - 1); + xa_erase_index(xa, (1UL << i) - 1); } - xa_insert_index(test, xa, ~0UL); + xa_insert_index(xa, ~0UL); XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL); XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL); - xa_erase_index(test, xa, ~0UL); + xa_erase_index(xa, ~0UL); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_cmpxchg(struct kunit *test) +static noinline void check_cmpxchg(struct xarray *xa) { - struct xarray *xa = xa_param(test); - void *FIVE = xa_mk_value(5); void *SIX = xa_mk_value(6); void *LOTS = xa_mk_value(12345678); @@ -437,16 +418,14 @@ static noinline void check_cmpxchg(struct kunit *test) XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY); XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE); XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY); - xa_erase_index(test, xa, 12345678); - xa_erase_index(test, xa, 5); + xa_erase_index(xa, 12345678); + xa_erase_index(xa, 5); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_cmpxchg_order(struct kunit *test) +static noinline void check_cmpxchg_order(struct xarray *xa) { #ifdef CONFIG_XARRAY_MULTI - struct xarray *xa = xa_param(test); - void *FIVE = xa_mk_value(5); unsigned int i, order = 3; @@ -497,10 +476,8 @@ static noinline void check_cmpxchg_order(struct kunit *test) #endif } -static noinline void check_reserve(struct kunit *test) +static noinline void check_reserve(struct xarray *xa) { - struct xarray *xa = xa_param(test); - void *entry; unsigned long index; int count; @@ -517,7 +494,7 @@ static noinline void check_reserve(struct kunit *test) XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL); xa_release(xa, 12345678); - xa_erase_index(test, xa, 12345678); + xa_erase_index(xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); /* cmpxchg sees a reserved entry as ZERO */ @@ -525,7 +502,7 @@ static noinline void check_reserve(struct kunit *test) XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY, xa_mk_value(12345678), GFP_NOWAIT) != NULL); xa_release(xa, 12345678); - xa_erase_index(test, xa, 12345678); + xa_erase_index(xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); /* xa_insert treats it as busy */ @@ -565,10 +542,8 @@ static noinline void check_reserve(struct kunit *test) xa_destroy(xa); } -static noinline void check_xas_erase(struct kunit *test) +static noinline void check_xas_erase(struct xarray *xa) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0); void *entry; unsigned long i, j; @@ -606,11 +581,9 @@ static noinline void check_xas_erase(struct kunit *test) } #ifdef CONFIG_XARRAY_MULTI -static noinline void check_multi_store_1(struct kunit *test, unsigned long index, +static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, unsigned int order) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, index); unsigned long min = index & ~((1UL << order) - 1); unsigned long max = min + (1UL << order); @@ -629,15 +602,13 @@ static noinline void check_multi_store_1(struct kunit *test, unsigned long index XA_BUG_ON(xa, xa_load(xa, max) != NULL); XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); - xa_erase_index(test, xa, min); + xa_erase_index(xa, min); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_multi_store_2(struct kunit *test, unsigned long index, +static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, unsigned int order) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, index); xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL); @@ -649,11 +620,9 @@ static noinline void check_multi_store_2(struct kunit *test, unsigned long index XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_multi_store_3(struct kunit *test, unsigned long index, +static noinline void check_multi_store_3(struct xarray *xa, unsigned long index, unsigned int order) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0); void *entry; int n = 0; @@ -678,11 +647,9 @@ static noinline void check_multi_store_3(struct kunit *test, unsigned long index } #endif -static noinline void check_multi_store(struct kunit *test) +static noinline void check_multi_store(struct xarray *xa) { #ifdef CONFIG_XARRAY_MULTI - struct xarray *xa = xa_param(test); - unsigned long i, j, k; unsigned int max_order = (sizeof(long) == 4) ? 30 : 60; @@ -747,28 +714,26 @@ static noinline void check_multi_store(struct kunit *test) } for (i = 0; i < 20; i++) { - check_multi_store_1(test, 200, i); - check_multi_store_1(test, 0, i); - check_multi_store_1(test, (1UL << i) + 1, i); + check_multi_store_1(xa, 200, i); + check_multi_store_1(xa, 0, i); + check_multi_store_1(xa, (1UL << i) + 1, i); } - check_multi_store_2(test, 4095, 9); + check_multi_store_2(xa, 4095, 9); for (i = 1; i < 20; i++) { - check_multi_store_3(test, 0, i); - check_multi_store_3(test, 1UL << i, i); + check_multi_store_3(xa, 0, i); + check_multi_store_3(xa, 1UL << i, i); } #endif } #ifdef CONFIG_XARRAY_MULTI /* mimics page cache __filemap_add_folio() */ -static noinline void check_xa_multi_store_adv_add(struct kunit *test, +static noinline void check_xa_multi_store_adv_add(struct xarray *xa, unsigned long index, unsigned int order, void *p) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, index); unsigned int nrpages = 1UL << order; @@ -796,12 +761,10 @@ static noinline void check_xa_multi_store_adv_add(struct kunit *test, } /* mimics page_cache_delete() */ -static noinline void check_xa_multi_store_adv_del_entry(struct kunit *test, +static noinline void check_xa_multi_store_adv_del_entry(struct xarray *xa, unsigned long index, unsigned int order) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, index); xas_set_order(&xas, index, order); @@ -809,14 +772,12 @@ static noinline void check_xa_multi_store_adv_del_entry(struct kunit *test, xas_init_marks(&xas); } -static noinline void check_xa_multi_store_adv_delete(struct kunit *test, +static noinline void check_xa_multi_store_adv_delete(struct xarray *xa, unsigned long index, unsigned int order) { - struct xarray *xa = xa_param(test); - xa_lock_irq(xa); - check_xa_multi_store_adv_del_entry(test, index, order); + check_xa_multi_store_adv_del_entry(xa, index, order); xa_unlock_irq(xa); } @@ -853,12 +814,10 @@ static unsigned long some_val = 0xdeadbeef; static unsigned long some_val_2 = 0xdeaddead; /* mimics the page cache usage */ -static noinline void check_xa_multi_store_adv(struct kunit *test, +static noinline void check_xa_multi_store_adv(struct xarray *xa, unsigned long pos, unsigned int order) { - struct xarray *xa = xa_param(test); - unsigned int nrpages = 1UL << order; unsigned long index, base, next_index, next_next_index; unsigned int i; @@ -868,7 +827,7 @@ static noinline void check_xa_multi_store_adv(struct kunit *test, next_index = round_down(base + nrpages, nrpages); next_next_index = round_down(next_index + nrpages, nrpages); - check_xa_multi_store_adv_add(test, base, order, &some_val); + check_xa_multi_store_adv_add(xa, base, order, &some_val); for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, base + i) != &some_val); @@ -876,20 +835,20 @@ static noinline void check_xa_multi_store_adv(struct kunit *test, XA_BUG_ON(xa, test_get_entry(xa, next_index) != NULL); /* Use order 0 for the next item */ - check_xa_multi_store_adv_add(test, next_index, 0, &some_val_2); + check_xa_multi_store_adv_add(xa, next_index, 0, &some_val_2); XA_BUG_ON(xa, test_get_entry(xa, next_index) != &some_val_2); /* Remove the next item */ - check_xa_multi_store_adv_delete(test, next_index, 0); + check_xa_multi_store_adv_delete(xa, next_index, 0); /* Now use order for a new pointer */ - check_xa_multi_store_adv_add(test, next_index, order, &some_val_2); + check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2); for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2); - check_xa_multi_store_adv_delete(test, next_index, order); - check_xa_multi_store_adv_delete(test, base, order); + check_xa_multi_store_adv_delete(xa, next_index, order); + check_xa_multi_store_adv_delete(xa, base, order); XA_BUG_ON(xa, !xa_empty(xa)); /* starting fresh again */ @@ -897,7 +856,7 @@ static noinline void check_xa_multi_store_adv(struct kunit *test, /* let's test some holes now */ /* hole at base and next_next */ - check_xa_multi_store_adv_add(test, next_index, order, &some_val_2); + check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2); for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); @@ -908,12 +867,12 @@ static noinline void check_xa_multi_store_adv(struct kunit *test, for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != NULL); - check_xa_multi_store_adv_delete(test, next_index, order); + check_xa_multi_store_adv_delete(xa, next_index, order); XA_BUG_ON(xa, !xa_empty(xa)); /* hole at base and next */ - check_xa_multi_store_adv_add(test, next_next_index, order, &some_val_2); + check_xa_multi_store_adv_add(xa, next_next_index, order, &some_val_2); for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); @@ -924,12 +883,12 @@ static noinline void check_xa_multi_store_adv(struct kunit *test, for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != &some_val_2); - check_xa_multi_store_adv_delete(test, next_next_index, order); + check_xa_multi_store_adv_delete(xa, next_next_index, order); XA_BUG_ON(xa, !xa_empty(xa)); } #endif -static noinline void check_multi_store_advanced(struct kunit *test) +static noinline void check_multi_store_advanced(struct xarray *xa) { #ifdef CONFIG_XARRAY_MULTI unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; @@ -941,59 +900,59 @@ static noinline void check_multi_store_advanced(struct kunit *test) */ for (pos = 7; pos < end; pos = (pos * pos) + 564) { for (i = 0; i < max_order; i++) { - check_xa_multi_store_adv(test, pos, i); - check_xa_multi_store_adv(test, pos + 157, i); + check_xa_multi_store_adv(xa, pos, i); + check_xa_multi_store_adv(xa, pos + 157, i); } } #endif } -static noinline void check_xa_alloc_1(struct kunit *test, struct xarray *xa, unsigned int base) +static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base) { int i; u32 id; XA_BUG_ON(xa, !xa_empty(xa)); /* An empty array should assign %base to the first alloc */ - xa_alloc_index(test, xa, base, GFP_KERNEL); + xa_alloc_index(xa, base, GFP_KERNEL); /* Erasing it should make the array empty again */ - xa_erase_index(test, xa, base); + xa_erase_index(xa, base); XA_BUG_ON(xa, !xa_empty(xa)); /* And it should assign %base again */ - xa_alloc_index(test, xa, base, GFP_KERNEL); + xa_alloc_index(xa, base, GFP_KERNEL); /* Allocating and then erasing a lot should not lose base */ for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++) - xa_alloc_index(test, xa, i, GFP_KERNEL); + xa_alloc_index(xa, i, GFP_KERNEL); for (i = base; i < 2 * XA_CHUNK_SIZE; i++) - xa_erase_index(test, xa, i); - xa_alloc_index(test, xa, base, GFP_KERNEL); + xa_erase_index(xa, i); + xa_alloc_index(xa, base, GFP_KERNEL); /* Destroying the array should do the same as erasing */ xa_destroy(xa); /* And it should assign %base again */ - xa_alloc_index(test, xa, base, GFP_KERNEL); + xa_alloc_index(xa, base, GFP_KERNEL); /* The next assigned ID should be base+1 */ - xa_alloc_index(test, xa, base + 1, GFP_KERNEL); - xa_erase_index(test, xa, base + 1); + xa_alloc_index(xa, base + 1, GFP_KERNEL); + xa_erase_index(xa, base + 1); /* Storing a value should mark it used */ xa_store_index(xa, base + 1, GFP_KERNEL); - xa_alloc_index(test, xa, base + 2, GFP_KERNEL); + xa_alloc_index(xa, base + 2, GFP_KERNEL); /* If we then erase base, it should be free */ - xa_erase_index(test, xa, base); - xa_alloc_index(test, xa, base, GFP_KERNEL); + xa_erase_index(xa, base); + xa_alloc_index(xa, base, GFP_KERNEL); - xa_erase_index(test, xa, base + 1); - xa_erase_index(test, xa, base + 2); + xa_erase_index(xa, base + 1); + xa_erase_index(xa, base + 2); for (i = 1; i < 5000; i++) { - xa_alloc_index(test, xa, base + i, GFP_KERNEL); + xa_alloc_index(xa, base + i, GFP_KERNEL); } xa_destroy(xa); @@ -1016,14 +975,14 @@ static noinline void check_xa_alloc_1(struct kunit *test, struct xarray *xa, uns XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), GFP_KERNEL) != -EBUSY); - XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != NULL); + XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), GFP_KERNEL) != -EBUSY); - xa_erase_index(test, xa, 3); + xa_erase_index(xa, 3); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_xa_alloc_2(struct kunit *test, struct xarray *xa, unsigned int base) +static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) { unsigned int i, id; unsigned long index; @@ -1059,7 +1018,7 @@ static noinline void check_xa_alloc_2(struct kunit *test, struct xarray *xa, uns XA_BUG_ON(xa, id != 5); xa_for_each(xa, index, entry) { - xa_erase_index(test, xa, index); + xa_erase_index(xa, index); } for (i = base; i < base + 9; i++) { @@ -1074,7 +1033,7 @@ static noinline void check_xa_alloc_2(struct kunit *test, struct xarray *xa, uns xa_destroy(xa); } -static noinline void check_xa_alloc_3(struct kunit *test, struct xarray *xa, unsigned int base) +static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) { struct xa_limit limit = XA_LIMIT(1, 0x3fff); u32 next = 0; @@ -1090,8 +1049,8 @@ static noinline void check_xa_alloc_3(struct kunit *test, struct xarray *xa, uns XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit, &next, GFP_KERNEL) != 0); XA_BUG_ON(xa, id != 0x3ffd); - xa_erase_index(test, xa, 0x3ffd); - xa_erase_index(test, xa, 1); + xa_erase_index(xa, 0x3ffd); + xa_erase_index(xa, 1); XA_BUG_ON(xa, !xa_empty(xa)); for (i = 0x3ffe; i < 0x4003; i++) { @@ -1106,8 +1065,8 @@ static noinline void check_xa_alloc_3(struct kunit *test, struct xarray *xa, uns /* Check wrap-around is handled correctly */ if (base != 0) - xa_erase_index(test, xa, base); - xa_erase_index(test, xa, base + 1); + xa_erase_index(xa, base); + xa_erase_index(xa, base + 1); next = UINT_MAX; XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX), xa_limit_32b, &next, GFP_KERNEL) != 0); @@ -1120,7 +1079,7 @@ static noinline void check_xa_alloc_3(struct kunit *test, struct xarray *xa, uns XA_BUG_ON(xa, id != base + 1); xa_for_each(xa, index, entry) - xa_erase_index(test, xa, index); + xa_erase_index(xa, index); XA_BUG_ON(xa, !xa_empty(xa)); } @@ -1128,21 +1087,19 @@ static noinline void check_xa_alloc_3(struct kunit *test, struct xarray *xa, uns static DEFINE_XARRAY_ALLOC(xa0); static DEFINE_XARRAY_ALLOC1(xa1); -static noinline void check_xa_alloc(struct kunit *test) +static noinline void check_xa_alloc(void) { - check_xa_alloc_1(test, &xa0, 0); - check_xa_alloc_1(test, &xa1, 1); - check_xa_alloc_2(test, &xa0, 0); - check_xa_alloc_2(test, &xa1, 1); - check_xa_alloc_3(test, &xa0, 0); - check_xa_alloc_3(test, &xa1, 1); + check_xa_alloc_1(&xa0, 0); + check_xa_alloc_1(&xa1, 1); + check_xa_alloc_2(&xa0, 0); + check_xa_alloc_2(&xa1, 1); + check_xa_alloc_3(&xa0, 0); + check_xa_alloc_3(&xa1, 1); } -static noinline void __check_store_iter(struct kunit *test, unsigned long start, +static noinline void __check_store_iter(struct xarray *xa, unsigned long start, unsigned int order, unsigned int present) { - struct xarray *xa = xa_param(test); - XA_STATE_ORDER(xas, xa, start, order); void *entry; unsigned int count = 0; @@ -1166,54 +1123,50 @@ retry: XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start)); XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != xa_mk_index(start)); - xa_erase_index(test, xa, start); + xa_erase_index(xa, start); } -static noinline void check_store_iter(struct kunit *test) +static noinline void check_store_iter(struct xarray *xa) { - struct xarray *xa = xa_param(test); - unsigned int i, j; unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; for (i = 0; i < max_order; i++) { unsigned int min = 1 << i; unsigned int max = (2 << i) - 1; - __check_store_iter(test, 0, i, 0); + __check_store_iter(xa, 0, i, 0); XA_BUG_ON(xa, !xa_empty(xa)); - __check_store_iter(test, min, i, 0); + __check_store_iter(xa, min, i, 0); XA_BUG_ON(xa, !xa_empty(xa)); xa_store_index(xa, min, GFP_KERNEL); - __check_store_iter(test, min, i, 1); + __check_store_iter(xa, min, i, 1); XA_BUG_ON(xa, !xa_empty(xa)); xa_store_index(xa, max, GFP_KERNEL); - __check_store_iter(test, min, i, 1); + __check_store_iter(xa, min, i, 1); XA_BUG_ON(xa, !xa_empty(xa)); for (j = 0; j < min; j++) xa_store_index(xa, j, GFP_KERNEL); - __check_store_iter(test, 0, i, min); + __check_store_iter(xa, 0, i, min); XA_BUG_ON(xa, !xa_empty(xa)); for (j = 0; j < min; j++) xa_store_index(xa, min + j, GFP_KERNEL); - __check_store_iter(test, min, i, min); + __check_store_iter(xa, min, i, min); XA_BUG_ON(xa, !xa_empty(xa)); } #ifdef CONFIG_XARRAY_MULTI xa_store_index(xa, 63, GFP_KERNEL); xa_store_index(xa, 65, GFP_KERNEL); - __check_store_iter(test, 64, 2, 1); - xa_erase_index(test, xa, 63); + __check_store_iter(xa, 64, 2, 1); + xa_erase_index(xa, 63); #endif XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_multi_find_1(struct kunit *test, unsigned int order) +static noinline void check_multi_find_1(struct xarray *xa, unsigned order) { #ifdef CONFIG_XARRAY_MULTI - struct xarray *xa = xa_param(test); - unsigned long multi = 3 << order; unsigned long next = 4 << order; unsigned long index; @@ -1236,17 +1189,15 @@ static noinline void check_multi_find_1(struct kunit *test, unsigned int order) XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL); XA_BUG_ON(xa, index != next); - xa_erase_index(test, xa, multi); - xa_erase_index(test, xa, next); - xa_erase_index(test, xa, next + 1); + xa_erase_index(xa, multi); + xa_erase_index(xa, next); + xa_erase_index(xa, next + 1); XA_BUG_ON(xa, !xa_empty(xa)); #endif } -static noinline void check_multi_find_2(struct kunit *test) +static noinline void check_multi_find_2(struct xarray *xa) { - struct xarray *xa = xa_param(test); - unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1; unsigned int i, j; void *entry; @@ -1260,19 +1211,17 @@ static noinline void check_multi_find_2(struct kunit *test) GFP_KERNEL); rcu_read_lock(); xas_for_each(&xas, entry, ULONG_MAX) { - xa_erase_index(test, xa, index); + xa_erase_index(xa, index); } rcu_read_unlock(); - xa_erase_index(test, xa, index - 1); + xa_erase_index(xa, index - 1); XA_BUG_ON(xa, !xa_empty(xa)); } } } -static noinline void check_multi_find_3(struct kunit *test) +static noinline void check_multi_find_3(struct xarray *xa) { - struct xarray *xa = xa_param(test); - unsigned int order; for (order = 5; order < order_limit; order++) { @@ -1281,14 +1230,12 @@ static noinline void check_multi_find_3(struct kunit *test) XA_BUG_ON(xa, !xa_empty(xa)); xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL); XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT)); - xa_erase_index(test, xa, 0); + xa_erase_index(xa, 0); } } -static noinline void check_find_1(struct kunit *test) +static noinline void check_find_1(struct xarray *xa) { - struct xarray *xa = xa_param(test); - unsigned long i, j, k; XA_BUG_ON(xa, !xa_empty(xa)); @@ -1325,20 +1272,18 @@ static noinline void check_find_1(struct kunit *test) else XA_BUG_ON(xa, entry != NULL); } - xa_erase_index(test, xa, j); + xa_erase_index(xa, j); XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0)); XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); } - xa_erase_index(test, xa, i); + xa_erase_index(xa, i); XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0)); } XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_find_2(struct kunit *test) +static noinline void check_find_2(struct xarray *xa) { - struct xarray *xa = xa_param(test); - void *entry; unsigned long i, j, index; @@ -1358,10 +1303,8 @@ static noinline void check_find_2(struct kunit *test) xa_destroy(xa); } -static noinline void check_find_3(struct kunit *test) +static noinline void check_find_3(struct xarray *xa) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0); unsigned long i, j, k; void *entry; @@ -1385,10 +1328,8 @@ static noinline void check_find_3(struct kunit *test) xa_destroy(xa); } -static noinline void check_find_4(struct kunit *test) +static noinline void check_find_4(struct xarray *xa) { - struct xarray *xa = xa_param(test); - unsigned long index = 0; void *entry; @@ -1400,22 +1341,22 @@ static noinline void check_find_4(struct kunit *test) entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT); XA_BUG_ON(xa, entry); - xa_erase_index(test, xa, ULONG_MAX); + xa_erase_index(xa, ULONG_MAX); } -static noinline void check_find(struct kunit *test) +static noinline void check_find(struct xarray *xa) { unsigned i; - check_find_1(test); - check_find_2(test); - check_find_3(test); - check_find_4(test); + check_find_1(xa); + check_find_2(xa); + check_find_3(xa); + check_find_4(xa); for (i = 2; i < 10; i++) - check_multi_find_1(test, i); - check_multi_find_2(test); - check_multi_find_3(test); + check_multi_find_1(xa, i); + check_multi_find_2(xa); + check_multi_find_3(xa); } /* See find_swap_entry() in mm/shmem.c */ @@ -1441,10 +1382,8 @@ static noinline unsigned long xa_find_entry(struct xarray *xa, void *item) return entry ? xas.xa_index : -1; } -static noinline void check_find_entry(struct kunit *test) +static noinline void check_find_entry(struct xarray *xa) { - struct xarray *xa = xa_param(test); - #ifdef CONFIG_XARRAY_MULTI unsigned int order; unsigned long offset, index; @@ -1471,14 +1410,12 @@ static noinline void check_find_entry(struct kunit *test) xa_store_index(xa, ULONG_MAX, GFP_KERNEL); XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1); - xa_erase_index(test, xa, ULONG_MAX); + xa_erase_index(xa, ULONG_MAX); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_pause(struct kunit *test) +static noinline void check_pause(struct xarray *xa) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0); void *entry; unsigned int order; @@ -1548,10 +1485,8 @@ static noinline void check_pause(struct kunit *test) } -static noinline void check_move_tiny(struct kunit *test) +static noinline void check_move_tiny(struct xarray *xa) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0); XA_BUG_ON(xa, !xa_empty(xa)); @@ -1568,14 +1503,12 @@ static noinline void check_move_tiny(struct kunit *test) XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0)); XA_BUG_ON(xa, xas_prev(&xas) != NULL); rcu_read_unlock(); - xa_erase_index(test, xa, 0); + xa_erase_index(xa, 0); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_move_max(struct kunit *test) +static noinline void check_move_max(struct xarray *xa) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0); xa_store_index(xa, ULONG_MAX, GFP_KERNEL); @@ -1591,14 +1524,12 @@ static noinline void check_move_max(struct kunit *test) XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL); rcu_read_unlock(); - xa_erase_index(test, xa, ULONG_MAX); + xa_erase_index(xa, ULONG_MAX); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_move_small(struct kunit *test, unsigned long idx) +static noinline void check_move_small(struct xarray *xa, unsigned long idx) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0); unsigned long i; @@ -1640,15 +1571,13 @@ static noinline void check_move_small(struct kunit *test, unsigned long idx) XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); rcu_read_unlock(); - xa_erase_index(test, xa, 0); - xa_erase_index(test, xa, idx); + xa_erase_index(xa, 0); + xa_erase_index(xa, idx); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_move(struct kunit *test) +static noinline void check_move(struct xarray *xa) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, (1 << 16) - 1); unsigned long i; @@ -1675,7 +1604,7 @@ static noinline void check_move(struct kunit *test) rcu_read_unlock(); for (i = (1 << 8); i < (1 << 15); i++) - xa_erase_index(test, xa, i); + xa_erase_index(xa, i); i = xas.xa_index; @@ -1706,17 +1635,17 @@ static noinline void check_move(struct kunit *test) xa_destroy(xa); - check_move_tiny(test); - check_move_max(test); + check_move_tiny(xa); + check_move_max(xa); for (i = 0; i < 16; i++) - check_move_small(test, 1UL << i); + check_move_small(xa, 1UL << i); for (i = 2; i < 16; i++) - check_move_small(test, (1UL << i) - 1); + check_move_small(xa, (1UL << i) - 1); } -static noinline void xa_store_many_order(struct kunit *test, struct xarray *xa, +static noinline void xa_store_many_order(struct xarray *xa, unsigned long index, unsigned order) { XA_STATE_ORDER(xas, xa, index, order); @@ -1739,34 +1668,30 @@ unlock: XA_BUG_ON(xa, xas_error(&xas)); } -static noinline void check_create_range_1(struct kunit *test, +static noinline void check_create_range_1(struct xarray *xa, unsigned long index, unsigned order) { - struct xarray *xa = xa_param(test); - unsigned long i; - xa_store_many_order(test, xa, index, order); + xa_store_many_order(xa, index, order); for (i = index; i < index + (1UL << order); i++) - xa_erase_index(test, xa, i); + xa_erase_index(xa, i); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_create_range_2(struct kunit *test, unsigned int order) +static noinline void check_create_range_2(struct xarray *xa, unsigned order) { - struct xarray *xa = xa_param(test); - unsigned long i; unsigned long nr = 1UL << order; for (i = 0; i < nr * nr; i += nr) - xa_store_many_order(test, xa, i, order); + xa_store_many_order(xa, i, order); for (i = 0; i < nr * nr; i++) - xa_erase_index(test, xa, i); + xa_erase_index(xa, i); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_create_range_3(struct kunit *test) +static noinline void check_create_range_3(void) { XA_STATE(xas, NULL, 0); xas_set_err(&xas, -EEXIST); @@ -1774,11 +1699,9 @@ static noinline void check_create_range_3(struct kunit *test) XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST); } -static noinline void check_create_range_4(struct kunit *test, +static noinline void check_create_range_4(struct xarray *xa, unsigned long index, unsigned order) { - struct xarray *xa = xa_param(test); - XA_STATE_ORDER(xas, xa, index, order); unsigned long base = xas.xa_index; unsigned long i = 0; @@ -1804,15 +1727,13 @@ unlock: XA_BUG_ON(xa, xas_error(&xas)); for (i = base; i < base + (1UL << order); i++) - xa_erase_index(test, xa, i); + xa_erase_index(xa, i); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_create_range_5(struct kunit *test, +static noinline void check_create_range_5(struct xarray *xa, unsigned long index, unsigned int order) { - struct xarray *xa = xa_param(test); - XA_STATE_ORDER(xas, xa, index, order); unsigned int i; @@ -1829,46 +1750,44 @@ static noinline void check_create_range_5(struct kunit *test, xa_destroy(xa); } -static noinline void check_create_range(struct kunit *test) +static noinline void check_create_range(struct xarray *xa) { unsigned int order; unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1; for (order = 0; order < max_order; order++) { - check_create_range_1(test, 0, order); - check_create_range_1(test, 1U << order, order); - check_create_range_1(test, 2U << order, order); - check_create_range_1(test, 3U << order, order); - check_create_range_1(test, 1U << 24, order); + check_create_range_1(xa, 0, order); + check_create_range_1(xa, 1U << order, order); + check_create_range_1(xa, 2U << order, order); + check_create_range_1(xa, 3U << order, order); + check_create_range_1(xa, 1U << 24, order); if (order < 10) - check_create_range_2(test, order); + check_create_range_2(xa, order); - check_create_range_4(test, 0, order); - check_create_range_4(test, 1U << order, order); - check_create_range_4(test, 2U << order, order); - check_create_range_4(test, 3U << order, order); - check_create_range_4(test, 1U << 24, order); + check_create_range_4(xa, 0, order); + check_create_range_4(xa, 1U << order, order); + check_create_range_4(xa, 2U << order, order); + check_create_range_4(xa, 3U << order, order); + check_create_range_4(xa, 1U << 24, order); - check_create_range_4(test, 1, order); - check_create_range_4(test, (1U << order) + 1, order); - check_create_range_4(test, (2U << order) + 1, order); - check_create_range_4(test, (2U << order) - 1, order); - check_create_range_4(test, (3U << order) + 1, order); - check_create_range_4(test, (3U << order) - 1, order); - check_create_range_4(test, (1U << 24) + 1, order); + check_create_range_4(xa, 1, order); + check_create_range_4(xa, (1U << order) + 1, order); + check_create_range_4(xa, (2U << order) + 1, order); + check_create_range_4(xa, (2U << order) - 1, order); + check_create_range_4(xa, (3U << order) + 1, order); + check_create_range_4(xa, (3U << order) - 1, order); + check_create_range_4(xa, (1U << 24) + 1, order); - check_create_range_5(test, 0, order); - check_create_range_5(test, (1U << order), order); + check_create_range_5(xa, 0, order); + check_create_range_5(xa, (1U << order), order); } - check_create_range_3(test); + check_create_range_3(); } -static noinline void __check_store_range(struct kunit *test, unsigned long first, +static noinline void __check_store_range(struct xarray *xa, unsigned long first, unsigned long last) { - struct xarray *xa = xa_param(test); - #ifdef CONFIG_XARRAY_MULTI xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL); @@ -1883,28 +1802,26 @@ static noinline void __check_store_range(struct kunit *test, unsigned long first XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_store_range(struct kunit *test) +static noinline void check_store_range(struct xarray *xa) { unsigned long i, j; for (i = 0; i < 128; i++) { for (j = i; j < 128; j++) { - __check_store_range(test, i, j); - __check_store_range(test, 128 + i, 128 + j); - __check_store_range(test, 4095 + i, 4095 + j); - __check_store_range(test, 4096 + i, 4096 + j); - __check_store_range(test, 123456 + i, 123456 + j); - __check_store_range(test, (1 << 24) + i, (1 << 24) + j); + __check_store_range(xa, i, j); + __check_store_range(xa, 128 + i, 128 + j); + __check_store_range(xa, 4095 + i, 4095 + j); + __check_store_range(xa, 4096 + i, 4096 + j); + __check_store_range(xa, 123456 + i, 123456 + j); + __check_store_range(xa, (1 << 24) + i, (1 << 24) + j); } } } #ifdef CONFIG_XARRAY_MULTI -static void check_split_1(struct kunit *test, unsigned long index, +static void check_split_1(struct xarray *xa, unsigned long index, unsigned int order, unsigned int new_order) { - struct xarray *xa = xa_param(test); - XA_STATE_ORDER(xas, xa, index, new_order); unsigned int i, found; void *entry; @@ -1940,30 +1857,26 @@ static void check_split_1(struct kunit *test, unsigned long index, xa_destroy(xa); } -static noinline void check_split(struct kunit *test) +static noinline void check_split(struct xarray *xa) { - struct xarray *xa = xa_param(test); - unsigned int order, new_order; XA_BUG_ON(xa, !xa_empty(xa)); for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) { for (new_order = 0; new_order < order; new_order++) { - check_split_1(test, 0, order, new_order); - check_split_1(test, 1UL << order, order, new_order); - check_split_1(test, 3UL << order, order, new_order); + check_split_1(xa, 0, order, new_order); + check_split_1(xa, 1UL << order, order, new_order); + check_split_1(xa, 3UL << order, order, new_order); } } } #else -static void check_split(struct kunit *test) { } +static void check_split(struct xarray *xa) { } #endif -static void check_align_1(struct kunit *test, char *name) +static void check_align_1(struct xarray *xa, char *name) { - struct xarray *xa = xa_param(test); - int i; unsigned int id; unsigned long index; @@ -1983,10 +1896,8 @@ static void check_align_1(struct kunit *test, char *name) * We should always be able to store without allocating memory after * reserving a slot. */ -static void check_align_2(struct kunit *test, char *name) +static void check_align_2(struct xarray *xa, char *name) { - struct xarray *xa = xa_param(test); - int i; XA_BUG_ON(xa, !xa_empty(xa)); @@ -2005,15 +1916,15 @@ static void check_align_2(struct kunit *test, char *name) XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_align(struct kunit *test) +static noinline void check_align(struct xarray *xa) { char name[] = "Motorola 68000"; - check_align_1(test, name); - check_align_1(test, name + 1); - check_align_1(test, name + 2); - check_align_1(test, name + 3); - check_align_2(test, name); + check_align_1(xa, name); + check_align_1(xa, name + 1); + check_align_1(xa, name + 2); + check_align_1(xa, name + 3); + check_align_2(xa, name); } static LIST_HEAD(shadow_nodes); @@ -2029,7 +1940,7 @@ static void test_update_node(struct xa_node *node) } } -static noinline void shadow_remove(struct kunit *test, struct xarray *xa) +static noinline void shadow_remove(struct xarray *xa) { struct xa_node *node; @@ -2043,17 +1954,8 @@ static noinline void shadow_remove(struct kunit *test, struct xarray *xa) xa_unlock(xa); } -struct workingset_testcase { - struct xarray *xa; - unsigned long index; -}; - -static noinline void check_workingset(struct kunit *test) +static noinline void check_workingset(struct xarray *xa, unsigned long index) { - struct workingset_testcase tc = *(struct workingset_testcase *)test->param_value; - struct xarray *xa = tc.xa; - unsigned long index = tc.index; - XA_STATE(xas, xa, index); xas_set_update(&xas, test_update_node); @@ -2076,7 +1978,7 @@ static noinline void check_workingset(struct kunit *test) xas_unlock(&xas); XA_BUG_ON(xa, list_empty(&shadow_nodes)); - shadow_remove(test, xa); + shadow_remove(xa); XA_BUG_ON(xa, !list_empty(&shadow_nodes)); XA_BUG_ON(xa, !xa_empty(xa)); } @@ -2085,11 +1987,9 @@ static noinline void check_workingset(struct kunit *test) * Check that the pointer / value / sibling entries are accounted the * way we expect them to be. */ -static noinline void check_account(struct kunit *test) +static noinline void check_account(struct xarray *xa) { #ifdef CONFIG_XARRAY_MULTI - struct xarray *xa = xa_param(test); - unsigned int order; for (order = 1; order < 12; order++) { @@ -2116,10 +2016,8 @@ static noinline void check_account(struct kunit *test) #endif } -static noinline void check_get_order(struct kunit *test) +static noinline void check_get_order(struct xarray *xa) { - struct xarray *xa = xa_param(test); - unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; unsigned int order; unsigned long i, j; @@ -2138,10 +2036,8 @@ static noinline void check_get_order(struct kunit *test) } } -static noinline void check_xas_get_order(struct kunit *test) +static noinline void check_xas_get_order(struct xarray *xa) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0); unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; @@ -2173,10 +2069,8 @@ static noinline void check_xas_get_order(struct kunit *test) } } -static noinline void check_xas_conflict_get_order(struct kunit *test) +static noinline void check_xas_conflict_get_order(struct xarray *xa) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0); void *entry; @@ -2233,10 +2127,8 @@ static noinline void check_xas_conflict_get_order(struct kunit *test) } -static noinline void check_destroy(struct kunit *test) +static noinline void check_destroy(struct xarray *xa) { - struct xarray *xa = xa_param(test); - unsigned long index; XA_BUG_ON(xa, !xa_empty(xa)); @@ -2269,59 +2161,52 @@ static noinline void check_destroy(struct kunit *test) } static DEFINE_XARRAY(array); -static struct xarray *arrays[] = { &array }; -KUNIT_ARRAY_PARAM(array, arrays, NULL); -static struct xarray *xa0s[] = { &xa0 }; -KUNIT_ARRAY_PARAM(xa0, xa0s, NULL); +static int xarray_checks(void) +{ + check_xa_err(&array); + check_xas_retry(&array); + check_xa_load(&array); + check_xa_mark(&array); + check_xa_shrink(&array); + check_xas_erase(&array); + check_insert(&array); + check_cmpxchg(&array); + check_cmpxchg_order(&array); + check_reserve(&array); + check_reserve(&xa0); + check_multi_store(&array); + check_multi_store_advanced(&array); + check_get_order(&array); + check_xas_get_order(&array); + check_xas_conflict_get_order(&array); + check_xa_alloc(); + check_find(&array); + check_find_entry(&array); + check_pause(&array); + check_account(&array); + check_destroy(&array); + check_move(&array); + check_create_range(&array); + check_store_range(&array); + check_store_iter(&array); + check_align(&xa0); + check_split(&array); -static struct workingset_testcase workingset_testcases[] = { - { &array, 0 }, - { &array, 64 }, - { &array, 4096 }, -}; -KUNIT_ARRAY_PARAM(workingset, workingset_testcases, NULL); + check_workingset(&array, 0); + check_workingset(&array, 64); + check_workingset(&array, 4096); -static struct kunit_case xarray_cases[] = { - KUNIT_CASE_PARAM(check_xa_err, array_gen_params), - KUNIT_CASE_PARAM(check_xas_retry, array_gen_params), - KUNIT_CASE_PARAM(check_xa_load, array_gen_params), - KUNIT_CASE_PARAM(check_xa_mark, array_gen_params), - KUNIT_CASE_PARAM(check_xa_shrink, array_gen_params), - KUNIT_CASE_PARAM(check_xas_erase, array_gen_params), - KUNIT_CASE_PARAM(check_insert, array_gen_params), - KUNIT_CASE_PARAM(check_cmpxchg, array_gen_params), - KUNIT_CASE_PARAM(check_cmpxchg_order, array_gen_params), - KUNIT_CASE_PARAM(check_reserve, array_gen_params), - KUNIT_CASE_PARAM(check_reserve, xa0_gen_params), - KUNIT_CASE_PARAM(check_multi_store, array_gen_params), - KUNIT_CASE_PARAM(check_multi_store_advanced, array_gen_params), - KUNIT_CASE_PARAM(check_get_order, array_gen_params), - KUNIT_CASE_PARAM(check_xas_get_order, array_gen_params), - KUNIT_CASE_PARAM(check_xas_conflict_get_order, array_gen_params), - KUNIT_CASE(check_xa_alloc), - KUNIT_CASE_PARAM(check_find, array_gen_params), - KUNIT_CASE_PARAM(check_find_entry, array_gen_params), - KUNIT_CASE_PARAM(check_pause, array_gen_params), - KUNIT_CASE_PARAM(check_account, array_gen_params), - KUNIT_CASE_PARAM(check_destroy, array_gen_params), - KUNIT_CASE_PARAM(check_move, array_gen_params), - KUNIT_CASE_PARAM(check_create_range, array_gen_params), - KUNIT_CASE_PARAM(check_store_range, array_gen_params), - KUNIT_CASE_PARAM(check_store_iter, array_gen_params), - KUNIT_CASE_PARAM(check_align, xa0_gen_params), - KUNIT_CASE_PARAM(check_split, array_gen_params), - KUNIT_CASE_PARAM(check_workingset, workingset_gen_params), - {}, -}; + printk("XArray: %u of %u tests passed\n", tests_passed, tests_run); + return (tests_run == tests_passed) ? 0 : -EINVAL; +} -static struct kunit_suite xarray_suite = { - .name = "xarray", - .test_cases = xarray_cases, -}; - -kunit_test_suite(xarray_suite); +static void xarray_exit(void) +{ +} +module_init(xarray_checks); +module_exit(xarray_exit); MODULE_AUTHOR("Matthew Wilcox "); MODULE_DESCRIPTION("XArray API test module"); MODULE_LICENSE("GPL"); diff --git a/mm/compaction.c b/mm/compaction.c index bcc0df0066dc..12ed8425fa17 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2491,7 +2491,8 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, */ static enum compact_result compaction_suit_allocation_order(struct zone *zone, unsigned int order, - int highest_zoneidx, unsigned int alloc_flags) + int highest_zoneidx, unsigned int alloc_flags, + bool async) { unsigned long watermark; @@ -2500,6 +2501,23 @@ compaction_suit_allocation_order(struct zone *zone, unsigned int order, alloc_flags)) return COMPACT_SUCCESS; + /* + * For unmovable allocations (without ALLOC_CMA), check if there is enough + * free memory in the non-CMA pageblocks. Otherwise compaction could form + * the high-order page in CMA pageblocks, which would not help the + * allocation to succeed. However, limit the check to costly order async + * compaction (such as opportunistic THP attempts) because there is the + * possibility that compaction would migrate pages from non-CMA to CMA + * pageblock. + */ + if (order > PAGE_ALLOC_COSTLY_ORDER && async && + !(alloc_flags & ALLOC_CMA)) { + watermark = low_wmark_pages(zone) + compact_gap(order); + if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx, + 0, zone_page_state(zone, NR_FREE_PAGES))) + return COMPACT_SKIPPED; + } + if (!compaction_suitable(zone, order, highest_zoneidx)) return COMPACT_SKIPPED; @@ -2535,7 +2553,8 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) if (!is_via_compact_memory(cc->order)) { ret = compaction_suit_allocation_order(cc->zone, cc->order, cc->highest_zoneidx, - cc->alloc_flags); + cc->alloc_flags, + cc->mode == MIGRATE_ASYNC); if (ret != COMPACT_CONTINUE) return ret; } @@ -3038,7 +3057,8 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat) ret = compaction_suit_allocation_order(zone, pgdat->kcompactd_max_order, - highest_zoneidx, ALLOC_WMARK_MIN); + highest_zoneidx, ALLOC_WMARK_MIN, + false); if (ret == COMPACT_CONTINUE) return true; } @@ -3079,7 +3099,8 @@ static void kcompactd_do_work(pg_data_t *pgdat) continue; ret = compaction_suit_allocation_order(zone, - cc.order, zoneid, ALLOC_WMARK_MIN); + cc.order, zoneid, ALLOC_WMARK_MIN, + false); if (ret != COMPACT_CONTINUE) continue; diff --git a/mm/gup.c b/mm/gup.c index 9aaf338cc1f4..3883b307780e 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2320,13 +2320,13 @@ static void pofs_unpin(struct pages_or_folios *pofs) /* * Returns the number of collected folios. Return value is always >= 0. */ -static unsigned long collect_longterm_unpinnable_folios( +static void collect_longterm_unpinnable_folios( struct list_head *movable_folio_list, struct pages_or_folios *pofs) { - unsigned long i, collected = 0; struct folio *prev_folio = NULL; bool drain_allow = true; + unsigned long i; for (i = 0; i < pofs->nr_entries; i++) { struct folio *folio = pofs_get_folio(pofs, i); @@ -2338,8 +2338,6 @@ static unsigned long collect_longterm_unpinnable_folios( if (folio_is_longterm_pinnable(folio)) continue; - collected++; - if (folio_is_device_coherent(folio)) continue; @@ -2361,8 +2359,6 @@ static unsigned long collect_longterm_unpinnable_folios( NR_ISOLATED_ANON + folio_is_file_lru(folio), folio_nr_pages(folio)); } - - return collected; } /* @@ -2439,11 +2435,9 @@ static long check_and_migrate_movable_pages_or_folios(struct pages_or_folios *pofs) { LIST_HEAD(movable_folio_list); - unsigned long collected; - collected = collect_longterm_unpinnable_folios(&movable_folio_list, - pofs); - if (!collected) + collect_longterm_unpinnable_folios(&movable_folio_list, pofs); + if (list_empty(&movable_folio_list)) return 0; return migrate_longterm_unpinnable_folios(&movable_folio_list, pofs); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 3b25b69aa94f..65068671e460 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3309,7 +3309,7 @@ static void __init gather_bootmem_prealloc(void) .thread_fn = gather_bootmem_prealloc_parallel, .fn_arg = NULL, .start = 0, - .size = num_node_state(N_MEMORY), + .size = nr_node_ids, .align = 1, .min_chunk = 1, .max_threads = num_node_state(N_MEMORY), diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 67fc321db79b..102048821c22 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -1084,6 +1085,7 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) * properties (e.g. reside in DMAable memory). */ if ((flags & GFP_ZONEMASK) || + ((flags & __GFP_THISNODE) && num_online_nodes() > 1) || (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) { atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]); return NULL; diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 982bb5ef3233..c6ed68604136 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1689,7 +1689,7 @@ static void kmemleak_scan(void) unsigned long phys = object->pointer; if (PHYS_PFN(phys) < min_low_pfn || - PHYS_PFN(phys + object->size) >= max_low_pfn) + PHYS_PFN(phys + object->size) > max_low_pfn) __paint_it(object, KMEMLEAK_BLACK); } diff --git a/mm/swapfile.c b/mm/swapfile.c index 6e867c16ea93..ba19430dd4ea 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -794,7 +794,7 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, if (!cluster_scan_range(si, ci, offset, nr_pages, &need_reclaim)) continue; if (need_reclaim) { - ret = cluster_reclaim_range(si, ci, start, end); + ret = cluster_reclaim_range(si, ci, offset, offset + nr_pages); /* * Reclaim drops ci->lock and cluster could be used * by another order. Not checking flag as off-list diff --git a/mm/vmscan.c b/mm/vmscan.c index 683ec56d4f60..c767d71c43d7 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1086,7 +1086,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, struct folio_batch free_folios; LIST_HEAD(ret_folios); LIST_HEAD(demote_folios); - unsigned int nr_reclaimed = 0; + unsigned int nr_reclaimed = 0, nr_demoted = 0; unsigned int pgactivate = 0; bool do_demote_pass; struct swap_iocb *plug = NULL; @@ -1550,8 +1550,9 @@ keep: /* 'folio_list' is always empty here */ /* Migrate folios selected for demotion */ - stat->nr_demoted = demote_folio_list(&demote_folios, pgdat); - nr_reclaimed += stat->nr_demoted; + nr_demoted = demote_folio_list(&demote_folios, pgdat); + nr_reclaimed += nr_demoted; + stat->nr_demoted += nr_demoted; /* Folios that could not be demoted are still in @demote_folios */ if (!list_empty(&demote_folios)) { /* Folios which weren't demoted go back on @folio_list */ @@ -1692,6 +1693,7 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan, unsigned long nr_skipped[MAX_NR_ZONES] = { 0, }; unsigned long skipped = 0; unsigned long scan, total_scan, nr_pages; + unsigned long max_nr_skipped = 0; LIST_HEAD(folios_skipped); total_scan = 0; @@ -1706,9 +1708,12 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan, nr_pages = folio_nr_pages(folio); total_scan += nr_pages; - if (folio_zonenum(folio) > sc->reclaim_idx) { + /* Using max_nr_skipped to prevent hard LOCKUP*/ + if (max_nr_skipped < SWAP_CLUSTER_MAX_SKIPPED && + (folio_zonenum(folio) > sc->reclaim_idx)) { nr_skipped[folio_zonenum(folio)] += nr_pages; move_to = &folios_skipped; + max_nr_skipped++; goto move; } diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 817626a351f8..6d0e47f7ae33 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -452,7 +452,7 @@ static DEFINE_PER_CPU(struct mapping_area, zs_map_area) = { .lock = INIT_LOCAL_LOCK(lock), }; -static inline bool is_first_zpdesc(struct zpdesc *zpdesc) +static inline bool __maybe_unused is_first_zpdesc(struct zpdesc *zpdesc) { return PagePrivate(zpdesc_page(zpdesc)); } diff --git a/scripts/gdb/linux/cpus.py b/scripts/gdb/linux/cpus.py index 2f11c4f9c345..13eb8b3901b8 100644 --- a/scripts/gdb/linux/cpus.py +++ b/scripts/gdb/linux/cpus.py @@ -167,7 +167,7 @@ def get_current_task(cpu): var_ptr = gdb.parse_and_eval("&pcpu_hot.current_task") return per_cpu(var_ptr, cpu).dereference() elif utils.is_target_arch("aarch64"): - current_task_addr = gdb.parse_and_eval("$SP_EL0") + current_task_addr = gdb.parse_and_eval("(unsigned long)$SP_EL0") if (current_task_addr >> 63) != 0: current_task = current_task_addr.cast(task_ptr_type) return current_task.dereference()