18 hotfixes. 5 are cc:stable and the remainder address post-6.13 issues
or aren't considered necessary for -stable kernels. 10 are for MM and 8 are for non-MM. All are singletons, please see the changelogs for details. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZ7aKTwAKCRDdBJ7gKXxA jo9eAQD0GBh7LaeobM+OJBN0E+u/wKySR/QpGfQX1h/uTpcOPAEA+Q5yaNcmFIzO NB/htGoMpW2F9gru3pwAT7CgnE3qeg8= =Y0sw -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2025-02-19-17-49' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc fixes from Andrew Morton: "18 hotfixes. 5 are cc:stable and the remainder address post-6.13 issues or aren't considered necessary for -stable kernels. 10 are for MM and 8 are for non-MM. All are singletons, please see the changelogs for details" * tag 'mm-hotfixes-stable-2025-02-19-17-49' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: test_xarray: fix failure in check_pause when CONFIG_XARRAY_MULTI is not defined kasan: don't call find_vm_area() in a PREEMPT_RT kernel MAINTAINERS: update Nick's contact info selftests/mm: fix check for running THP tests mm: hugetlb: avoid fallback for specific node allocation of 1G pages memcg: avoid dead loop when setting memory.max mailmap: update Nick's entry mm: pgtable: fix incorrect reclaim of non-empty PTE pages taskstats: modify taskstats version getdelays: fix error format characters mm/migrate_device: don't add folio to be freed to LRU in migrate_device_finalize() tools/mm: fix build warnings with musl-libc mailmap: add entry for Feng Tang .mailmap: add entries for Jeff Johnson mm,madvise,hugetlb: check for 0-length range after end address adjustment mm/zswap: fix inconsistency when zswap_store_page() fails lib/iov_iter: fix import_iovec_ubuf iovec management procfs: fix a locking bug in a vmcore_add_device_dump() error path
This commit is contained in:
commit
87a132e739
18 changed files with 120 additions and 66 deletions
4
.mailmap
4
.mailmap
|
@ -226,6 +226,7 @@ Fangrui Song <i@maskray.me> <maskray@google.com>
|
|||
Felipe W Damasio <felipewd@terra.com.br>
|
||||
Felix Kuhling <fxkuehl@gmx.de>
|
||||
Felix Moeller <felix@derklecks.de>
|
||||
Feng Tang <feng.79.tang@gmail.com> <feng.tang@intel.com>
|
||||
Fenglin Wu <quic_fenglinw@quicinc.com> <fenglinw@codeaurora.org>
|
||||
Filipe Lautert <filipe@icewall.org>
|
||||
Finn Thain <fthain@linux-m68k.org> <fthain@telegraphics.com.au>
|
||||
|
@ -317,6 +318,8 @@ Jayachandran C <c.jayachandran@gmail.com> <jnair@caviumnetworks.com>
|
|||
Jean Tourrilhes <jt@hpl.hp.com>
|
||||
Jeevan Shriram <quic_jshriram@quicinc.com> <jshriram@codeaurora.org>
|
||||
Jeff Garzik <jgarzik@pretzel.yyz.us>
|
||||
Jeff Johnson <jeff.johnson@oss.qualcomm.com> <jjohnson@codeaurora.org>
|
||||
Jeff Johnson <jeff.johnson@oss.qualcomm.com> <quic_jjohnson@quicinc.com>
|
||||
Jeff Layton <jlayton@kernel.org> <jlayton@poochiereds.net>
|
||||
Jeff Layton <jlayton@kernel.org> <jlayton@primarydata.com>
|
||||
Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
|
||||
|
@ -531,6 +534,7 @@ Nicholas Piggin <npiggin@gmail.com> <npiggin@kernel.dk>
|
|||
Nicholas Piggin <npiggin@gmail.com> <npiggin@suse.de>
|
||||
Nicholas Piggin <npiggin@gmail.com> <nickpiggin@yahoo.com.au>
|
||||
Nicholas Piggin <npiggin@gmail.com> <piggin@cyberone.com.au>
|
||||
Nick Desaulniers <nick.desaulniers+lkml@gmail.com> <ndesaulniers@google.com>
|
||||
Nicolas Ferre <nicolas.ferre@microchip.com> <nicolas.ferre@atmel.com>
|
||||
Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
|
||||
Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org>
|
||||
|
|
|
@ -308,7 +308,7 @@ an involved disclosed party. The current ambassadors list:
|
|||
|
||||
Google Kees Cook <keescook@chromium.org>
|
||||
|
||||
LLVM Nick Desaulniers <ndesaulniers@google.com>
|
||||
LLVM Nick Desaulniers <nick.desaulniers+lkml@gmail.com>
|
||||
============= ========================================================
|
||||
|
||||
If you want your organization to be added to the ambassadors list, please
|
||||
|
|
|
@ -287,7 +287,7 @@ revelada involucrada. La lista de embajadores actuales:
|
|||
|
||||
Google Kees Cook <keescook@chromium.org>
|
||||
|
||||
LLVM Nick Desaulniers <ndesaulniers@google.com>
|
||||
LLVM Nick Desaulniers <nick.desaulniers+lkml@gmail.com>
|
||||
============= ========================================================
|
||||
|
||||
Si quiere que su organización se añada a la lista de embajadores, por
|
||||
|
|
|
@ -5655,7 +5655,7 @@ F: .clang-format
|
|||
|
||||
CLANG/LLVM BUILD SUPPORT
|
||||
M: Nathan Chancellor <nathan@kernel.org>
|
||||
R: Nick Desaulniers <ndesaulniers@google.com>
|
||||
R: Nick Desaulniers <nick.desaulniers+lkml@gmail.com>
|
||||
R: Bill Wendling <morbo@google.com>
|
||||
R: Justin Stitt <justinstitt@google.com>
|
||||
L: llvm@lists.linux.dev
|
||||
|
|
|
@ -1524,7 +1524,7 @@ int vmcore_add_device_dump(struct vmcoredd_data *data)
|
|||
pr_warn_once("Unexpected adding of device dump\n");
|
||||
if (vmcore_open) {
|
||||
ret = -EBUSY;
|
||||
goto out_err;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
list_add_tail(&dump->list, &vmcoredd_list);
|
||||
|
@ -1532,6 +1532,9 @@ int vmcore_add_device_dump(struct vmcoredd_data *data)
|
|||
mutex_unlock(&vmcore_mutex);
|
||||
return 0;
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&vmcore_mutex);
|
||||
|
||||
out_err:
|
||||
vfree(buf);
|
||||
vfree(dump);
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
*/
|
||||
|
||||
|
||||
#define TASKSTATS_VERSION 14
|
||||
#define TASKSTATS_VERSION 15
|
||||
#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
|
||||
* in linux/sched.h */
|
||||
|
||||
|
|
|
@ -1428,6 +1428,8 @@ static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
|
|||
struct iovec *iov = *iovp;
|
||||
ssize_t ret;
|
||||
|
||||
*iovp = NULL;
|
||||
|
||||
if (compat)
|
||||
ret = copy_compat_iovec_from_user(iov, uvec, 1);
|
||||
else
|
||||
|
@ -1438,7 +1440,6 @@ static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
|
|||
ret = import_ubuf(type, iov->iov_base, iov->iov_len, i);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
*iovp = NULL;
|
||||
return i->count;
|
||||
}
|
||||
|
||||
|
|
|
@ -1418,7 +1418,7 @@ static noinline void check_pause(struct xarray *xa)
|
|||
{
|
||||
XA_STATE(xas, xa, 0);
|
||||
void *entry;
|
||||
unsigned int order;
|
||||
int order;
|
||||
unsigned long index = 1;
|
||||
unsigned int count = 0;
|
||||
|
||||
|
@ -1450,7 +1450,7 @@ static noinline void check_pause(struct xarray *xa)
|
|||
xa_destroy(xa);
|
||||
|
||||
index = 0;
|
||||
for (order = XA_CHUNK_SHIFT; order > 0; order--) {
|
||||
for (order = order_limit - 1; order >= 0; order--) {
|
||||
XA_BUG_ON(xa, xa_store_order(xa, index, order,
|
||||
xa_mk_index(index), GFP_KERNEL));
|
||||
index += 1UL << order;
|
||||
|
@ -1462,24 +1462,25 @@ static noinline void check_pause(struct xarray *xa)
|
|||
rcu_read_lock();
|
||||
xas_for_each(&xas, entry, ULONG_MAX) {
|
||||
XA_BUG_ON(xa, entry != xa_mk_index(index));
|
||||
index += 1UL << (XA_CHUNK_SHIFT - count);
|
||||
index += 1UL << (order_limit - count - 1);
|
||||
count++;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
XA_BUG_ON(xa, count != XA_CHUNK_SHIFT);
|
||||
XA_BUG_ON(xa, count != order_limit);
|
||||
|
||||
index = 0;
|
||||
count = 0;
|
||||
xas_set(&xas, XA_CHUNK_SIZE / 2 + 1);
|
||||
/* test unaligned index */
|
||||
xas_set(&xas, 1 % (1UL << (order_limit - 1)));
|
||||
rcu_read_lock();
|
||||
xas_for_each(&xas, entry, ULONG_MAX) {
|
||||
XA_BUG_ON(xa, entry != xa_mk_index(index));
|
||||
index += 1UL << (XA_CHUNK_SHIFT - count);
|
||||
index += 1UL << (order_limit - count - 1);
|
||||
count++;
|
||||
xas_pause(&xas);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
XA_BUG_ON(xa, count != XA_CHUNK_SHIFT);
|
||||
XA_BUG_ON(xa, count != order_limit);
|
||||
|
||||
xa_destroy(xa);
|
||||
|
||||
|
|
|
@ -3145,7 +3145,7 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid)
|
|||
|
||||
/* do node specific alloc */
|
||||
if (nid != NUMA_NO_NODE) {
|
||||
m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h),
|
||||
m = memblock_alloc_exact_nid_raw(huge_page_size(h), huge_page_size(h),
|
||||
0, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
|
||||
if (!m)
|
||||
return 0;
|
||||
|
|
|
@ -370,6 +370,36 @@ static inline bool init_task_stack_addr(const void *addr)
|
|||
sizeof(init_thread_union.stack));
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is invoked with report_lock (a raw_spinlock) held. A
|
||||
* PREEMPT_RT kernel cannot call find_vm_area() as it will acquire a sleeping
|
||||
* rt_spinlock.
|
||||
*
|
||||
* For !RT kernel, the PROVE_RAW_LOCK_NESTING config option will print a
|
||||
* lockdep warning for this raw_spinlock -> spinlock dependency. This config
|
||||
* option is enabled by default to ensure better test coverage to expose this
|
||||
* kind of RT kernel problem. This lockdep splat, however, can be suppressed
|
||||
* by using DEFINE_WAIT_OVERRIDE_MAP() if it serves a useful purpose and the
|
||||
* invalid PREEMPT_RT case has been taken care of.
|
||||
*/
|
||||
static inline struct vm_struct *kasan_find_vm_area(void *addr)
|
||||
{
|
||||
static DEFINE_WAIT_OVERRIDE_MAP(vmalloc_map, LD_WAIT_SLEEP);
|
||||
struct vm_struct *va;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Suppress lockdep warning and fetch vmalloc area of the
|
||||
* offending address.
|
||||
*/
|
||||
lock_map_acquire_try(&vmalloc_map);
|
||||
va = find_vm_area(addr);
|
||||
lock_map_release(&vmalloc_map);
|
||||
return va;
|
||||
}
|
||||
|
||||
static void print_address_description(void *addr, u8 tag,
|
||||
struct kasan_report_info *info)
|
||||
{
|
||||
|
@ -399,7 +429,7 @@ static void print_address_description(void *addr, u8 tag,
|
|||
}
|
||||
|
||||
if (is_vmalloc_addr(addr)) {
|
||||
struct vm_struct *va = find_vm_area(addr);
|
||||
struct vm_struct *va = kasan_find_vm_area(addr);
|
||||
|
||||
if (va) {
|
||||
pr_err("The buggy address belongs to the virtual mapping at\n"
|
||||
|
@ -409,6 +439,8 @@ static void print_address_description(void *addr, u8 tag,
|
|||
pr_err("\n");
|
||||
|
||||
page = vmalloc_to_page(addr);
|
||||
} else {
|
||||
pr_err("The buggy address %px belongs to a vmalloc virtual mapping\n", addr);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
11
mm/madvise.c
11
mm/madvise.c
|
@ -933,7 +933,16 @@ static long madvise_dontneed_free(struct vm_area_struct *vma,
|
|||
*/
|
||||
end = vma->vm_end;
|
||||
}
|
||||
VM_WARN_ON(start >= end);
|
||||
/*
|
||||
* If the memory region between start and end was
|
||||
* originally backed by 4kB pages and then remapped to
|
||||
* be backed by hugepages while mmap_lock was dropped,
|
||||
* the adjustment for hugetlb vma above may have rounded
|
||||
* end down to the start address.
|
||||
*/
|
||||
if (start == end)
|
||||
return 0;
|
||||
VM_WARN_ON(start > end);
|
||||
}
|
||||
|
||||
if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED)
|
||||
|
|
|
@ -4166,6 +4166,7 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
|
|||
memcg_memory_event(memcg, MEMCG_OOM);
|
||||
if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
|
||||
break;
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
memcg_wb_domain_size_changed(memcg);
|
||||
|
|
17
mm/memory.c
17
mm/memory.c
|
@ -1719,7 +1719,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
|
|||
pmd_t pmdval;
|
||||
unsigned long start = addr;
|
||||
bool can_reclaim_pt = reclaim_pt_is_enabled(start, end, details);
|
||||
bool direct_reclaim = false;
|
||||
bool direct_reclaim = true;
|
||||
int nr;
|
||||
|
||||
retry:
|
||||
|
@ -1734,8 +1734,10 @@ retry:
|
|||
do {
|
||||
bool any_skipped = false;
|
||||
|
||||
if (need_resched())
|
||||
if (need_resched()) {
|
||||
direct_reclaim = false;
|
||||
break;
|
||||
}
|
||||
|
||||
nr = do_zap_pte_range(tlb, vma, pte, addr, end, details, rss,
|
||||
&force_flush, &force_break, &any_skipped);
|
||||
|
@ -1743,11 +1745,20 @@ retry:
|
|||
can_reclaim_pt = false;
|
||||
if (unlikely(force_break)) {
|
||||
addr += nr * PAGE_SIZE;
|
||||
direct_reclaim = false;
|
||||
break;
|
||||
}
|
||||
} while (pte += nr, addr += PAGE_SIZE * nr, addr != end);
|
||||
|
||||
if (can_reclaim_pt && addr == end)
|
||||
/*
|
||||
* Fast path: try to hold the pmd lock and unmap the PTE page.
|
||||
*
|
||||
* If the pte lock was released midway (retry case), or if the attempt
|
||||
* to hold the pmd lock failed, then we need to recheck all pte entries
|
||||
* to ensure they are still none, thereby preventing the pte entries
|
||||
* from being repopulated by another thread.
|
||||
*/
|
||||
if (can_reclaim_pt && direct_reclaim && addr == end)
|
||||
direct_reclaim = try_get_and_clear_pmd(mm, pmd, &pmdval);
|
||||
|
||||
add_mm_rss_vec(mm, rss);
|
||||
|
|
|
@ -840,20 +840,15 @@ void migrate_device_finalize(unsigned long *src_pfns,
|
|||
dst = src;
|
||||
}
|
||||
|
||||
if (!folio_is_zone_device(dst))
|
||||
folio_add_lru(dst);
|
||||
remove_migration_ptes(src, dst, 0);
|
||||
folio_unlock(src);
|
||||
|
||||
if (folio_is_zone_device(src))
|
||||
folio_put(src);
|
||||
else
|
||||
folio_putback_lru(src);
|
||||
folio_put(src);
|
||||
|
||||
if (dst != src) {
|
||||
folio_unlock(dst);
|
||||
if (folio_is_zone_device(dst))
|
||||
folio_put(dst);
|
||||
else
|
||||
folio_putback_lru(dst);
|
||||
folio_put(dst);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
35
mm/zswap.c
35
mm/zswap.c
|
@ -1445,9 +1445,9 @@ resched:
|
|||
* main API
|
||||
**********************************/
|
||||
|
||||
static ssize_t zswap_store_page(struct page *page,
|
||||
struct obj_cgroup *objcg,
|
||||
struct zswap_pool *pool)
|
||||
static bool zswap_store_page(struct page *page,
|
||||
struct obj_cgroup *objcg,
|
||||
struct zswap_pool *pool)
|
||||
{
|
||||
swp_entry_t page_swpentry = page_swap_entry(page);
|
||||
struct zswap_entry *entry, *old;
|
||||
|
@ -1456,7 +1456,7 @@ static ssize_t zswap_store_page(struct page *page,
|
|||
entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page));
|
||||
if (!entry) {
|
||||
zswap_reject_kmemcache_fail++;
|
||||
return -EINVAL;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!zswap_compress(page, entry, pool))
|
||||
|
@ -1483,13 +1483,17 @@ static ssize_t zswap_store_page(struct page *page,
|
|||
|
||||
/*
|
||||
* The entry is successfully compressed and stored in the tree, there is
|
||||
* no further possibility of failure. Grab refs to the pool and objcg.
|
||||
* These refs will be dropped by zswap_entry_free() when the entry is
|
||||
* removed from the tree.
|
||||
* no further possibility of failure. Grab refs to the pool and objcg,
|
||||
* charge zswap memory, and increment zswap_stored_pages.
|
||||
* The opposite actions will be performed by zswap_entry_free()
|
||||
* when the entry is removed from the tree.
|
||||
*/
|
||||
zswap_pool_get(pool);
|
||||
if (objcg)
|
||||
if (objcg) {
|
||||
obj_cgroup_get(objcg);
|
||||
obj_cgroup_charge_zswap(objcg, entry->length);
|
||||
}
|
||||
atomic_long_inc(&zswap_stored_pages);
|
||||
|
||||
/*
|
||||
* We finish initializing the entry while it's already in xarray.
|
||||
|
@ -1510,13 +1514,13 @@ static ssize_t zswap_store_page(struct page *page,
|
|||
zswap_lru_add(&zswap_list_lru, entry);
|
||||
}
|
||||
|
||||
return entry->length;
|
||||
return true;
|
||||
|
||||
store_failed:
|
||||
zpool_free(pool->zpool, entry->handle);
|
||||
compress_failed:
|
||||
zswap_entry_cache_free(entry);
|
||||
return -EINVAL;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool zswap_store(struct folio *folio)
|
||||
|
@ -1526,7 +1530,6 @@ bool zswap_store(struct folio *folio)
|
|||
struct obj_cgroup *objcg = NULL;
|
||||
struct mem_cgroup *memcg = NULL;
|
||||
struct zswap_pool *pool;
|
||||
size_t compressed_bytes = 0;
|
||||
bool ret = false;
|
||||
long index;
|
||||
|
||||
|
@ -1564,20 +1567,14 @@ bool zswap_store(struct folio *folio)
|
|||
|
||||
for (index = 0; index < nr_pages; ++index) {
|
||||
struct page *page = folio_page(folio, index);
|
||||
ssize_t bytes;
|
||||
|
||||
bytes = zswap_store_page(page, objcg, pool);
|
||||
if (bytes < 0)
|
||||
if (!zswap_store_page(page, objcg, pool))
|
||||
goto put_pool;
|
||||
compressed_bytes += bytes;
|
||||
}
|
||||
|
||||
if (objcg) {
|
||||
obj_cgroup_charge_zswap(objcg, compressed_bytes);
|
||||
if (objcg)
|
||||
count_objcg_events(objcg, ZSWPOUT, nr_pages);
|
||||
}
|
||||
|
||||
atomic_long_add(nr_pages, &zswap_stored_pages);
|
||||
count_vm_events(ZSWPOUT, nr_pages);
|
||||
|
||||
ret = true;
|
||||
|
|
|
@ -196,22 +196,22 @@ static int get_family_id(int sd)
|
|||
|
||||
static void print_delayacct(struct taskstats *t)
|
||||
{
|
||||
printf("\n\nCPU %15s%15s%15s%15s%15s%15s\n"
|
||||
" %15llu%15llu%15llu%15llu%15.3fms%13.6fms\n"
|
||||
"IO %15s%15s%15s%15s\n"
|
||||
" %15llu%15llu%15.3fms%13.6fms\n"
|
||||
"SWAP %15s%15s%15s%15s\n"
|
||||
" %15llu%15llu%15.3fms%13.6fms\n"
|
||||
"RECLAIM %12s%15s%15s%15s\n"
|
||||
" %15llu%15llu%15.3fms%13.6fms\n"
|
||||
"THRASHING%12s%15s%15s%15s\n"
|
||||
" %15llu%15llu%15.3fms%13.6fms\n"
|
||||
"COMPACT %12s%15s%15s%15s\n"
|
||||
" %15llu%15llu%15.3fms%13.6fms\n"
|
||||
"WPCOPY %12s%15s%15s%15s\n"
|
||||
" %15llu%15llu%15.3fms%13.6fms\n"
|
||||
"IRQ %15s%15s%15s%15s\n"
|
||||
" %15llu%15llu%15.3fms%13.6fms\n",
|
||||
printf("\n\nCPU %15s%15s%15s%15s%15s%15s%15s\n"
|
||||
" %15llu%15llu%15llu%15llu%15.3fms%13.6fms%13.6fms\n"
|
||||
"IO %15s%15s%15s%15s%15s\n"
|
||||
" %15llu%15llu%15.3fms%13.6fms%13.6fms\n"
|
||||
"SWAP %15s%15s%15s%15s%15s\n"
|
||||
" %15llu%15llu%15.3fms%13.6fms%13.6fms\n"
|
||||
"RECLAIM %12s%15s%15s%15s%15s\n"
|
||||
" %15llu%15llu%15.3fms%13.6fms%13.6fms\n"
|
||||
"THRASHING%12s%15s%15s%15s%15s\n"
|
||||
" %15llu%15llu%15.3fms%13.6fms%13.6fms\n"
|
||||
"COMPACT %12s%15s%15s%15s%15s\n"
|
||||
" %15llu%15llu%15.3fms%13.6fms%13.6fms\n"
|
||||
"WPCOPY %12s%15s%15s%15s%15s\n"
|
||||
" %15llu%15llu%15.3fms%13.6fms%13.6fms\n"
|
||||
"IRQ %15s%15s%15s%15s%15s\n"
|
||||
" %15llu%15llu%15.3fms%13.6fms%13.6fms\n",
|
||||
"count", "real total", "virtual total",
|
||||
"delay total", "delay average", "delay max", "delay min",
|
||||
(unsigned long long)t->cpu_count,
|
||||
|
|
|
@ -24,8 +24,8 @@
|
|||
#include <signal.h>
|
||||
#include <inttypes.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/errno.h>
|
||||
#include <sys/fcntl.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/statfs.h>
|
||||
#include <sys/mman.h>
|
||||
|
|
|
@ -220,7 +220,7 @@ run_test() {
|
|||
if test_selected ${CATEGORY}; then
|
||||
# On memory constrainted systems some tests can fail to allocate hugepages.
|
||||
# perform some cleanup before the test for a higher success rate.
|
||||
if [ ${CATEGORY} == "thp" ] | [ ${CATEGORY} == "hugetlb" ]; then
|
||||
if [ ${CATEGORY} == "thp" -o ${CATEGORY} == "hugetlb" ]; then
|
||||
echo 3 > /proc/sys/vm/drop_caches
|
||||
sleep 2
|
||||
echo 1 > /proc/sys/vm/compact_memory
|
||||
|
|
Loading…
Add table
Reference in a new issue