An unintended consequence of commit9c573cd313
("randomize_kstack: Improve entropy diffusion") was that the per-architecture entropy size filtering reduced how many bits were being added to the mix, rather than how many bits were being used during the offsetting. All architectures fell back to the existing default of 0x3FF (10 bits), which will consume at most 1KiB of stack space. It seems that this is working just fine, so let's avoid the confusion and update everything to use the default. The prior intent of the per-architecture limits were: arm64: capped at 0x1FF (9 bits), 5 bits effective powerpc: uncapped (10 bits), 6 or 7 bits effective riscv: uncapped (10 bits), 6 bits effective x86: capped at 0xFF (8 bits), 5 (x86_64) or 6 (ia32) bits effective s390: capped at 0xFF (8 bits), undocumented effective entropy Current discussion has led to just dropping the original per-architecture filters. The additional entropy appears to be safe for arm64, x86, and s390. Quoting Arnd, "There is no point pretending that 15.75KB is somehow safe to use while 15.00KB is not." Co-developed-by: Yuntao Liu <liuyuntao12@huawei.com> Signed-off-by: Yuntao Liu <liuyuntao12@huawei.com> Fixes:9c573cd313
("randomize_kstack: Improve entropy diffusion") Link: https://lore.kernel.org/r/20240617133721.377540-1-liuyuntao12@huawei.com Reviewed-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Heiko Carstens <hca@linux.ibm.com> # s390 Link: https://lore.kernel.org/r/20240619214711.work.953-kees@kernel.org Signed-off-by: Kees Cook <kees@kernel.org>
62 lines
1.4 KiB
C
62 lines
1.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef ARCH_S390_ENTRY_COMMON_H
|
|
#define ARCH_S390_ENTRY_COMMON_H
|
|
|
|
#include <linux/sched.h>
|
|
#include <linux/audit.h>
|
|
#include <linux/randomize_kstack.h>
|
|
#include <linux/processor.h>
|
|
#include <linux/uaccess.h>
|
|
#include <asm/timex.h>
|
|
#include <asm/fpu.h>
|
|
#include <asm/pai.h>
|
|
|
|
#define ARCH_EXIT_TO_USER_MODE_WORK (_TIF_GUARDED_STORAGE | _TIF_PER_TRAP)
|
|
|
|
void do_per_trap(struct pt_regs *regs);
|
|
|
|
static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs)
|
|
{
|
|
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
|
|
debug_user_asce(0);
|
|
|
|
pai_kernel_enter(regs);
|
|
}
|
|
|
|
#define arch_enter_from_user_mode arch_enter_from_user_mode
|
|
|
|
static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
|
|
unsigned long ti_work)
|
|
{
|
|
if (ti_work & _TIF_PER_TRAP) {
|
|
clear_thread_flag(TIF_PER_TRAP);
|
|
do_per_trap(regs);
|
|
}
|
|
|
|
if (ti_work & _TIF_GUARDED_STORAGE)
|
|
gs_load_bc_cb(regs);
|
|
}
|
|
|
|
#define arch_exit_to_user_mode_work arch_exit_to_user_mode_work
|
|
|
|
static __always_inline void arch_exit_to_user_mode(void)
|
|
{
|
|
load_user_fpu_regs();
|
|
|
|
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
|
|
debug_user_asce(1);
|
|
|
|
pai_kernel_exit(current_pt_regs());
|
|
}
|
|
|
|
#define arch_exit_to_user_mode arch_exit_to_user_mode
|
|
|
|
static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
|
|
unsigned long ti_work)
|
|
{
|
|
choose_random_kstack_offset(get_tod_clock_fast());
|
|
}
|
|
|
|
#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
|
|
|
|
#endif
|