1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00
linux/arch/parisc/include/asm/special_insns.h
Helge Deller 8b1d723956 parisc: Fix random data corruption from exception handler
The current exception handler implementation, which assists when accessing
user space memory, may exhibit random data corruption if the compiler decides
to use a different register than the specified register %r29 (defined in
ASM_EXCEPTIONTABLE_REG) for the error code. If the compiler choose another
register, the fault handler will nevertheless store -EFAULT into %r29 and thus
trash whatever this register is used for.
Looking at the assembly I found that this happens sometimes in emulate_ldd().

To solve the issue, the easiest solution would be if it somehow is
possible to tell the fault handler which register is used to hold the error
code. Using %0 or %1 in the inline assembly is not posssible as it will show
up as e.g. %r29 (with the "%r" prefix), which the GNU assembler can not
convert to an integer.

This patch takes another, better and more flexible approach:
We extend the __ex_table (which is out of the execution path) by one 32-word.
In this word we tell the compiler to insert the assembler instruction
"or %r0,%r0,%reg", where %reg references the register which the compiler
choosed for the error return code.
In case of an access failure, the fault handler finds the __ex_table entry and
can examine the opcode. The used register is encoded in the lowest 5 bits, and
the fault handler can then store -EFAULT into this register.

Since we extend the __ex_table to 3 words we can't use the BUILDTIME_TABLE_SORT
config option any longer.

Signed-off-by: Helge Deller <deller@gmx.de>
Cc: <stable@vger.kernel.org> # v6.0+
2024-01-30 17:18:58 +01:00

73 lines
1.6 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PARISC_SPECIAL_INSNS_H
#define __PARISC_SPECIAL_INSNS_H
#define lpa(va) ({ \
unsigned long pa; \
__asm__ __volatile__( \
"copy %%r0,%0\n" \
"8:\tlpa %%r0(%1),%0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY(8b, 9b, \
"or %%r0,%%r0,%%r0") \
: "=&r" (pa) \
: "r" (va) \
: "memory" \
); \
pa; \
})
#define lpa_user(va) ({ \
unsigned long pa; \
__asm__ __volatile__( \
"copy %%r0,%0\n" \
"8:\tlpa %%r0(%%sr3,%1),%0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY(8b, 9b, \
"or %%r0,%%r0,%%r0") \
: "=&r" (pa) \
: "r" (va) \
: "memory" \
); \
pa; \
})
#define CR_EIEM 15 /* External Interrupt Enable Mask */
#define CR_CR16 16 /* CR16 Interval Timer */
#define CR_EIRR 23 /* External Interrupt Request Register */
#define mfctl(reg) ({ \
unsigned long cr; \
__asm__ __volatile__( \
"mfctl %1,%0" : \
"=r" (cr) : "i" (reg) \
); \
cr; \
})
#define mtctl(gr, cr) \
__asm__ __volatile__("mtctl %0,%1" \
: /* no outputs */ \
: "r" (gr), "i" (cr) : "memory")
#define get_eiem() mfctl(CR_EIEM)
#define set_eiem(val) mtctl(val, CR_EIEM)
#define mfsp(reg) ({ \
unsigned long cr; \
__asm__ __volatile__( \
"mfsp %%sr%1,%0" \
: "=r" (cr) : "i"(reg) \
); \
cr; \
})
#define mtsp(val, cr) \
{ if (__builtin_constant_p(val) && ((val) == 0)) \
__asm__ __volatile__("mtsp %%r0,%0" : : "i" (cr) : "memory"); \
else \
__asm__ __volatile__("mtsp %0,%1" \
: /* no outputs */ \
: "r" (val), "i" (cr) : "memory"); }
#endif /* __PARISC_SPECIAL_INSNS_H */