With LPAE enabled, privileged no-access cannot be enforced using CPU domains as such feature is not available. This patch implements PAN by disabling TTBR0 page table walks while in kernel mode. The ARM architecture allows page table walks to be split between TTBR0 and TTBR1. With LPAE enabled, the split is defined by a combination of TTBCR T0SZ and T1SZ bits. Currently, an LPAE-enabled kernel uses TTBR0 for user addresses and TTBR1 for kernel addresses with the VMSPLIT_2G and VMSPLIT_3G configurations. The main advantage for the 3:1 split is that TTBR1 is reduced to 2 levels, so potentially faster TLB refill (though usually the first level entries are already cached in the TLB). The PAN support on LPAE-enabled kernels uses TTBR0 when running in user space or in kernel space during user access routines (TTBCR T0SZ and T1SZ are both 0). When running user accesses are disabled in kernel mode, TTBR0 page table walks are disabled by setting TTBCR.EPD0. TTBR1 is used for kernel accesses (including loadable modules; anything covered by swapper_pg_dir) by reducing the TTBCR.T0SZ to the minimum (2^(32-7) = 32MB). To avoid user accesses potentially hitting stale TLB entries, the ASID is switched to 0 (reserved) by setting TTBCR.A1 and using the ASID value in TTBR1. The difference from a non-PAN kernel is that with the 3:1 memory split, TTBR1 always uses 3 levels of page tables. As part of the change we are using preprocessor elif definied() clauses so balance these clauses by converting relevant precedingt ifdef clauses to if defined() clauses. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Kees Cook <keescook@chromium.org> Tested-by: Florian Fainelli <florian.fainelli@broadcom.com> Signed-off-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
202 lines
4.9 KiB
C
202 lines
4.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* arch/arm/include/asm/ptrace.h
|
|
*
|
|
* Copyright (C) 1996-2003 Russell King
|
|
*/
|
|
#ifndef __ASM_ARM_PTRACE_H
|
|
#define __ASM_ARM_PTRACE_H
|
|
|
|
#include <uapi/asm/ptrace.h>
|
|
|
|
#ifndef __ASSEMBLY__
|
|
#include <linux/bitfield.h>
|
|
#include <linux/types.h>
|
|
|
|
struct pt_regs {
|
|
unsigned long uregs[18];
|
|
};
|
|
|
|
struct svc_pt_regs {
|
|
struct pt_regs regs;
|
|
u32 dacr;
|
|
u32 ttbcr;
|
|
};
|
|
|
|
#define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs)
|
|
|
|
#define user_mode(regs) \
|
|
(((regs)->ARM_cpsr & 0xf) == 0)
|
|
|
|
#ifdef CONFIG_ARM_THUMB
|
|
#define thumb_mode(regs) \
|
|
(((regs)->ARM_cpsr & PSR_T_BIT))
|
|
#else
|
|
#define thumb_mode(regs) (0)
|
|
#endif
|
|
|
|
#ifndef CONFIG_CPU_V7M
|
|
#define isa_mode(regs) \
|
|
(FIELD_GET(PSR_J_BIT, (regs)->ARM_cpsr) << 1 | \
|
|
FIELD_GET(PSR_T_BIT, (regs)->ARM_cpsr))
|
|
#else
|
|
#define isa_mode(regs) 1 /* Thumb */
|
|
#endif
|
|
|
|
#define processor_mode(regs) \
|
|
((regs)->ARM_cpsr & MODE_MASK)
|
|
|
|
#define interrupts_enabled(regs) \
|
|
(!((regs)->ARM_cpsr & PSR_I_BIT))
|
|
|
|
#define fast_interrupts_enabled(regs) \
|
|
(!((regs)->ARM_cpsr & PSR_F_BIT))
|
|
|
|
/* Are the current registers suitable for user mode?
|
|
* (used to maintain security in signal handlers)
|
|
*/
|
|
static inline int valid_user_regs(struct pt_regs *regs)
|
|
{
|
|
#ifndef CONFIG_CPU_V7M
|
|
unsigned long mode = regs->ARM_cpsr & MODE_MASK;
|
|
|
|
/*
|
|
* Always clear the F (FIQ) and A (delayed abort) bits
|
|
*/
|
|
regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
|
|
|
|
if ((regs->ARM_cpsr & PSR_I_BIT) == 0) {
|
|
if (mode == USR_MODE)
|
|
return 1;
|
|
if (elf_hwcap & HWCAP_26BIT && mode == USR26_MODE)
|
|
return 1;
|
|
}
|
|
|
|
/*
|
|
* Force CPSR to something logical...
|
|
*/
|
|
regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT;
|
|
if (!(elf_hwcap & HWCAP_26BIT))
|
|
regs->ARM_cpsr |= USR_MODE;
|
|
|
|
return 0;
|
|
#else /* ifndef CONFIG_CPU_V7M */
|
|
return 1;
|
|
#endif
|
|
}
|
|
|
|
static inline long regs_return_value(struct pt_regs *regs)
|
|
{
|
|
return regs->ARM_r0;
|
|
}
|
|
|
|
#define instruction_pointer(regs) (regs)->ARM_pc
|
|
|
|
#ifdef CONFIG_THUMB2_KERNEL
|
|
#define frame_pointer(regs) (regs)->ARM_r7
|
|
#else
|
|
#define frame_pointer(regs) (regs)->ARM_fp
|
|
#endif
|
|
|
|
static inline void instruction_pointer_set(struct pt_regs *regs,
|
|
unsigned long val)
|
|
{
|
|
instruction_pointer(regs) = val;
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
extern unsigned long profile_pc(struct pt_regs *regs);
|
|
#else
|
|
#define profile_pc(regs) instruction_pointer(regs)
|
|
#endif
|
|
|
|
#define predicate(x) ((x) & 0xf0000000)
|
|
#define PREDICATE_ALWAYS 0xe0000000
|
|
|
|
/*
|
|
* True if instr is a 32-bit thumb instruction. This works if instr
|
|
* is the first or only half-word of a thumb instruction. It also works
|
|
* when instr holds all 32-bits of a wide thumb instruction if stored
|
|
* in the form (first_half<<16)|(second_half)
|
|
*/
|
|
#define is_wide_instruction(instr) ((unsigned)(instr) >= 0xe800)
|
|
|
|
/*
|
|
* kprobe-based event tracer support
|
|
*/
|
|
#include <linux/compiler.h>
|
|
#define MAX_REG_OFFSET (offsetof(struct pt_regs, ARM_ORIG_r0))
|
|
|
|
extern int regs_query_register_offset(const char *name);
|
|
extern const char *regs_query_register_name(unsigned int offset);
|
|
extern bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr);
|
|
extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
|
|
unsigned int n);
|
|
|
|
/**
|
|
* regs_get_register() - get register value from its offset
|
|
* @regs: pt_regs from which register value is gotten
|
|
* @offset: offset number of the register.
|
|
*
|
|
* regs_get_register returns the value of a register whose offset from @regs.
|
|
* The @offset is the offset of the register in struct pt_regs.
|
|
* If @offset is bigger than MAX_REG_OFFSET, this returns 0.
|
|
*/
|
|
static inline unsigned long regs_get_register(struct pt_regs *regs,
|
|
unsigned int offset)
|
|
{
|
|
if (unlikely(offset > MAX_REG_OFFSET))
|
|
return 0;
|
|
return *(unsigned long *)((unsigned long)regs + offset);
|
|
}
|
|
|
|
/* Valid only for Kernel mode traps. */
|
|
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
|
|
{
|
|
return regs->ARM_sp;
|
|
}
|
|
|
|
static inline unsigned long user_stack_pointer(struct pt_regs *regs)
|
|
{
|
|
return regs->ARM_sp;
|
|
}
|
|
|
|
#define current_pt_regs(void) ({ (struct pt_regs *) \
|
|
((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1; \
|
|
})
|
|
|
|
static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
|
|
{
|
|
regs->ARM_r0 = rc;
|
|
}
|
|
|
|
/*
|
|
* Update ITSTATE after normal execution of an IT block instruction.
|
|
*
|
|
* The 8 IT state bits are split into two parts in CPSR:
|
|
* ITSTATE<1:0> are in CPSR<26:25>
|
|
* ITSTATE<7:2> are in CPSR<15:10>
|
|
*/
|
|
static inline unsigned long it_advance(unsigned long cpsr)
|
|
{
|
|
if ((cpsr & 0x06000400) == 0) {
|
|
/* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
|
|
cpsr &= ~PSR_IT_MASK;
|
|
} else {
|
|
/* We need to shift left ITSTATE<4:0> */
|
|
const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */
|
|
unsigned long it = cpsr & mask;
|
|
it <<= 1;
|
|
it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */
|
|
it &= mask;
|
|
cpsr &= ~mask;
|
|
cpsr |= it;
|
|
}
|
|
return cpsr;
|
|
}
|
|
|
|
int syscall_trace_enter(struct pt_regs *regs);
|
|
void syscall_trace_exit(struct pt_regs *regs);
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
#endif
|