We'd like all architectures to convert to ARCH_ATOMIC, as once all architectures are converted it will be possible to make significant cleanups to the atomics headers, and this will make it much easier to generically enable atomic functionality (e.g. debug logic in the instrumented wrappers). As a step towards that, this patch migrates sh to ARCH_ATOMIC. The arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common code wraps these with optional instrumentation to provide the regular functions. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rich Felker <dalias@libc.org> Cc: Will Deacon <will@kernel.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20210525140232.53872-30-mark.rutland@arm.com
86 lines
2.6 KiB
C
86 lines
2.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_SH_ATOMIC_GRB_H
|
|
#define __ASM_SH_ATOMIC_GRB_H
|
|
|
|
#define ATOMIC_OP(op) \
|
|
static inline void arch_atomic_##op(int i, atomic_t *v) \
|
|
{ \
|
|
int tmp; \
|
|
\
|
|
__asm__ __volatile__ ( \
|
|
" .align 2 \n\t" \
|
|
" mova 1f, r0 \n\t" /* r0 = end point */ \
|
|
" mov r15, r1 \n\t" /* r1 = saved sp */ \
|
|
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \
|
|
" mov.l @%1, %0 \n\t" /* load old value */ \
|
|
" " #op " %2, %0 \n\t" /* $op */ \
|
|
" mov.l %0, @%1 \n\t" /* store new value */ \
|
|
"1: mov r1, r15 \n\t" /* LOGOUT */ \
|
|
: "=&r" (tmp), \
|
|
"+r" (v) \
|
|
: "r" (i) \
|
|
: "memory" , "r0", "r1"); \
|
|
} \
|
|
|
|
#define ATOMIC_OP_RETURN(op) \
|
|
static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
|
|
{ \
|
|
int tmp; \
|
|
\
|
|
__asm__ __volatile__ ( \
|
|
" .align 2 \n\t" \
|
|
" mova 1f, r0 \n\t" /* r0 = end point */ \
|
|
" mov r15, r1 \n\t" /* r1 = saved sp */ \
|
|
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \
|
|
" mov.l @%1, %0 \n\t" /* load old value */ \
|
|
" " #op " %2, %0 \n\t" /* $op */ \
|
|
" mov.l %0, @%1 \n\t" /* store new value */ \
|
|
"1: mov r1, r15 \n\t" /* LOGOUT */ \
|
|
: "=&r" (tmp), \
|
|
"+r" (v) \
|
|
: "r" (i) \
|
|
: "memory" , "r0", "r1"); \
|
|
\
|
|
return tmp; \
|
|
}
|
|
|
|
#define ATOMIC_FETCH_OP(op) \
|
|
static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
|
|
{ \
|
|
int res, tmp; \
|
|
\
|
|
__asm__ __volatile__ ( \
|
|
" .align 2 \n\t" \
|
|
" mova 1f, r0 \n\t" /* r0 = end point */ \
|
|
" mov r15, r1 \n\t" /* r1 = saved sp */ \
|
|
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \
|
|
" mov.l @%2, %0 \n\t" /* load old value */ \
|
|
" mov %0, %1 \n\t" /* save old value */ \
|
|
" " #op " %3, %0 \n\t" /* $op */ \
|
|
" mov.l %0, @%2 \n\t" /* store new value */ \
|
|
"1: mov r1, r15 \n\t" /* LOGOUT */ \
|
|
: "=&r" (tmp), "=&r" (res), "+r" (v) \
|
|
: "r" (i) \
|
|
: "memory" , "r0", "r1"); \
|
|
\
|
|
return res; \
|
|
}
|
|
|
|
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
|
|
|
|
ATOMIC_OPS(add)
|
|
ATOMIC_OPS(sub)
|
|
|
|
#undef ATOMIC_OPS
|
|
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
|
|
|
|
ATOMIC_OPS(and)
|
|
ATOMIC_OPS(or)
|
|
ATOMIC_OPS(xor)
|
|
|
|
#undef ATOMIC_OPS
|
|
#undef ATOMIC_FETCH_OP
|
|
#undef ATOMIC_OP_RETURN
|
|
#undef ATOMIC_OP
|
|
|
|
#endif /* __ASM_SH_ATOMIC_GRB_H */
|