We'd like all architectures to convert to ARCH_ATOMIC, as once all architectures are converted it will be possible to make significant cleanups to the atomics headers, and this will make it much easier to generically enable atomic functionality (e.g. debug logic in the instrumented wrappers). As a step towards that, this patch migrates riscv to ARCH_ATOMIC. The arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common code wraps these with optional instrumentation to provide the regular functions. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Palmer Dabbelt <palmerdabbelt@google.com> Acked-by: Palmer Dabbelt <palmerdabbelt@google.com> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20210525140232.53872-29-mark.rutland@arm.com
375 lines
9.3 KiB
C
375 lines
9.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2014 Regents of the University of California
|
|
*/
|
|
|
|
#ifndef _ASM_RISCV_CMPXCHG_H
|
|
#define _ASM_RISCV_CMPXCHG_H
|
|
|
|
#include <linux/bug.h>
|
|
|
|
#include <asm/barrier.h>
|
|
#include <asm/fence.h>
|
|
|
|
#define __xchg_relaxed(ptr, new, size) \
|
|
({ \
|
|
__typeof__(ptr) __ptr = (ptr); \
|
|
__typeof__(new) __new = (new); \
|
|
__typeof__(*(ptr)) __ret; \
|
|
switch (size) { \
|
|
case 4: \
|
|
__asm__ __volatile__ ( \
|
|
" amoswap.w %0, %2, %1\n" \
|
|
: "=r" (__ret), "+A" (*__ptr) \
|
|
: "r" (__new) \
|
|
: "memory"); \
|
|
break; \
|
|
case 8: \
|
|
__asm__ __volatile__ ( \
|
|
" amoswap.d %0, %2, %1\n" \
|
|
: "=r" (__ret), "+A" (*__ptr) \
|
|
: "r" (__new) \
|
|
: "memory"); \
|
|
break; \
|
|
default: \
|
|
BUILD_BUG(); \
|
|
} \
|
|
__ret; \
|
|
})
|
|
|
|
#define arch_xchg_relaxed(ptr, x) \
|
|
({ \
|
|
__typeof__(*(ptr)) _x_ = (x); \
|
|
(__typeof__(*(ptr))) __xchg_relaxed((ptr), \
|
|
_x_, sizeof(*(ptr))); \
|
|
})
|
|
|
|
#define __xchg_acquire(ptr, new, size) \
|
|
({ \
|
|
__typeof__(ptr) __ptr = (ptr); \
|
|
__typeof__(new) __new = (new); \
|
|
__typeof__(*(ptr)) __ret; \
|
|
switch (size) { \
|
|
case 4: \
|
|
__asm__ __volatile__ ( \
|
|
" amoswap.w %0, %2, %1\n" \
|
|
RISCV_ACQUIRE_BARRIER \
|
|
: "=r" (__ret), "+A" (*__ptr) \
|
|
: "r" (__new) \
|
|
: "memory"); \
|
|
break; \
|
|
case 8: \
|
|
__asm__ __volatile__ ( \
|
|
" amoswap.d %0, %2, %1\n" \
|
|
RISCV_ACQUIRE_BARRIER \
|
|
: "=r" (__ret), "+A" (*__ptr) \
|
|
: "r" (__new) \
|
|
: "memory"); \
|
|
break; \
|
|
default: \
|
|
BUILD_BUG(); \
|
|
} \
|
|
__ret; \
|
|
})
|
|
|
|
#define arch_xchg_acquire(ptr, x) \
|
|
({ \
|
|
__typeof__(*(ptr)) _x_ = (x); \
|
|
(__typeof__(*(ptr))) __xchg_acquire((ptr), \
|
|
_x_, sizeof(*(ptr))); \
|
|
})
|
|
|
|
#define __xchg_release(ptr, new, size) \
|
|
({ \
|
|
__typeof__(ptr) __ptr = (ptr); \
|
|
__typeof__(new) __new = (new); \
|
|
__typeof__(*(ptr)) __ret; \
|
|
switch (size) { \
|
|
case 4: \
|
|
__asm__ __volatile__ ( \
|
|
RISCV_RELEASE_BARRIER \
|
|
" amoswap.w %0, %2, %1\n" \
|
|
: "=r" (__ret), "+A" (*__ptr) \
|
|
: "r" (__new) \
|
|
: "memory"); \
|
|
break; \
|
|
case 8: \
|
|
__asm__ __volatile__ ( \
|
|
RISCV_RELEASE_BARRIER \
|
|
" amoswap.d %0, %2, %1\n" \
|
|
: "=r" (__ret), "+A" (*__ptr) \
|
|
: "r" (__new) \
|
|
: "memory"); \
|
|
break; \
|
|
default: \
|
|
BUILD_BUG(); \
|
|
} \
|
|
__ret; \
|
|
})
|
|
|
|
#define arch_xchg_release(ptr, x) \
|
|
({ \
|
|
__typeof__(*(ptr)) _x_ = (x); \
|
|
(__typeof__(*(ptr))) __xchg_release((ptr), \
|
|
_x_, sizeof(*(ptr))); \
|
|
})
|
|
|
|
#define __xchg(ptr, new, size) \
|
|
({ \
|
|
__typeof__(ptr) __ptr = (ptr); \
|
|
__typeof__(new) __new = (new); \
|
|
__typeof__(*(ptr)) __ret; \
|
|
switch (size) { \
|
|
case 4: \
|
|
__asm__ __volatile__ ( \
|
|
" amoswap.w.aqrl %0, %2, %1\n" \
|
|
: "=r" (__ret), "+A" (*__ptr) \
|
|
: "r" (__new) \
|
|
: "memory"); \
|
|
break; \
|
|
case 8: \
|
|
__asm__ __volatile__ ( \
|
|
" amoswap.d.aqrl %0, %2, %1\n" \
|
|
: "=r" (__ret), "+A" (*__ptr) \
|
|
: "r" (__new) \
|
|
: "memory"); \
|
|
break; \
|
|
default: \
|
|
BUILD_BUG(); \
|
|
} \
|
|
__ret; \
|
|
})
|
|
|
|
#define arch_xchg(ptr, x) \
|
|
({ \
|
|
__typeof__(*(ptr)) _x_ = (x); \
|
|
(__typeof__(*(ptr))) __xchg((ptr), _x_, sizeof(*(ptr))); \
|
|
})
|
|
|
|
#define xchg32(ptr, x) \
|
|
({ \
|
|
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
|
|
arch_xchg((ptr), (x)); \
|
|
})
|
|
|
|
#define xchg64(ptr, x) \
|
|
({ \
|
|
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
|
|
arch_xchg((ptr), (x)); \
|
|
})
|
|
|
|
/*
|
|
* Atomic compare and exchange. Compare OLD with MEM, if identical,
|
|
* store NEW in MEM. Return the initial value in MEM. Success is
|
|
* indicated by comparing RETURN with OLD.
|
|
*/
|
|
#define __cmpxchg_relaxed(ptr, old, new, size) \
|
|
({ \
|
|
__typeof__(ptr) __ptr = (ptr); \
|
|
__typeof__(*(ptr)) __old = (old); \
|
|
__typeof__(*(ptr)) __new = (new); \
|
|
__typeof__(*(ptr)) __ret; \
|
|
register unsigned int __rc; \
|
|
switch (size) { \
|
|
case 4: \
|
|
__asm__ __volatile__ ( \
|
|
"0: lr.w %0, %2\n" \
|
|
" bne %0, %z3, 1f\n" \
|
|
" sc.w %1, %z4, %2\n" \
|
|
" bnez %1, 0b\n" \
|
|
"1:\n" \
|
|
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
|
|
: "rJ" ((long)__old), "rJ" (__new) \
|
|
: "memory"); \
|
|
break; \
|
|
case 8: \
|
|
__asm__ __volatile__ ( \
|
|
"0: lr.d %0, %2\n" \
|
|
" bne %0, %z3, 1f\n" \
|
|
" sc.d %1, %z4, %2\n" \
|
|
" bnez %1, 0b\n" \
|
|
"1:\n" \
|
|
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
|
|
: "rJ" (__old), "rJ" (__new) \
|
|
: "memory"); \
|
|
break; \
|
|
default: \
|
|
BUILD_BUG(); \
|
|
} \
|
|
__ret; \
|
|
})
|
|
|
|
#define arch_cmpxchg_relaxed(ptr, o, n) \
|
|
({ \
|
|
__typeof__(*(ptr)) _o_ = (o); \
|
|
__typeof__(*(ptr)) _n_ = (n); \
|
|
(__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
|
|
_o_, _n_, sizeof(*(ptr))); \
|
|
})
|
|
|
|
#define __cmpxchg_acquire(ptr, old, new, size) \
|
|
({ \
|
|
__typeof__(ptr) __ptr = (ptr); \
|
|
__typeof__(*(ptr)) __old = (old); \
|
|
__typeof__(*(ptr)) __new = (new); \
|
|
__typeof__(*(ptr)) __ret; \
|
|
register unsigned int __rc; \
|
|
switch (size) { \
|
|
case 4: \
|
|
__asm__ __volatile__ ( \
|
|
"0: lr.w %0, %2\n" \
|
|
" bne %0, %z3, 1f\n" \
|
|
" sc.w %1, %z4, %2\n" \
|
|
" bnez %1, 0b\n" \
|
|
RISCV_ACQUIRE_BARRIER \
|
|
"1:\n" \
|
|
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
|
|
: "rJ" ((long)__old), "rJ" (__new) \
|
|
: "memory"); \
|
|
break; \
|
|
case 8: \
|
|
__asm__ __volatile__ ( \
|
|
"0: lr.d %0, %2\n" \
|
|
" bne %0, %z3, 1f\n" \
|
|
" sc.d %1, %z4, %2\n" \
|
|
" bnez %1, 0b\n" \
|
|
RISCV_ACQUIRE_BARRIER \
|
|
"1:\n" \
|
|
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
|
|
: "rJ" (__old), "rJ" (__new) \
|
|
: "memory"); \
|
|
break; \
|
|
default: \
|
|
BUILD_BUG(); \
|
|
} \
|
|
__ret; \
|
|
})
|
|
|
|
#define arch_cmpxchg_acquire(ptr, o, n) \
|
|
({ \
|
|
__typeof__(*(ptr)) _o_ = (o); \
|
|
__typeof__(*(ptr)) _n_ = (n); \
|
|
(__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
|
|
_o_, _n_, sizeof(*(ptr))); \
|
|
})
|
|
|
|
#define __cmpxchg_release(ptr, old, new, size) \
|
|
({ \
|
|
__typeof__(ptr) __ptr = (ptr); \
|
|
__typeof__(*(ptr)) __old = (old); \
|
|
__typeof__(*(ptr)) __new = (new); \
|
|
__typeof__(*(ptr)) __ret; \
|
|
register unsigned int __rc; \
|
|
switch (size) { \
|
|
case 4: \
|
|
__asm__ __volatile__ ( \
|
|
RISCV_RELEASE_BARRIER \
|
|
"0: lr.w %0, %2\n" \
|
|
" bne %0, %z3, 1f\n" \
|
|
" sc.w %1, %z4, %2\n" \
|
|
" bnez %1, 0b\n" \
|
|
"1:\n" \
|
|
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
|
|
: "rJ" ((long)__old), "rJ" (__new) \
|
|
: "memory"); \
|
|
break; \
|
|
case 8: \
|
|
__asm__ __volatile__ ( \
|
|
RISCV_RELEASE_BARRIER \
|
|
"0: lr.d %0, %2\n" \
|
|
" bne %0, %z3, 1f\n" \
|
|
" sc.d %1, %z4, %2\n" \
|
|
" bnez %1, 0b\n" \
|
|
"1:\n" \
|
|
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
|
|
: "rJ" (__old), "rJ" (__new) \
|
|
: "memory"); \
|
|
break; \
|
|
default: \
|
|
BUILD_BUG(); \
|
|
} \
|
|
__ret; \
|
|
})
|
|
|
|
#define arch_cmpxchg_release(ptr, o, n) \
|
|
({ \
|
|
__typeof__(*(ptr)) _o_ = (o); \
|
|
__typeof__(*(ptr)) _n_ = (n); \
|
|
(__typeof__(*(ptr))) __cmpxchg_release((ptr), \
|
|
_o_, _n_, sizeof(*(ptr))); \
|
|
})
|
|
|
|
#define __cmpxchg(ptr, old, new, size) \
|
|
({ \
|
|
__typeof__(ptr) __ptr = (ptr); \
|
|
__typeof__(*(ptr)) __old = (old); \
|
|
__typeof__(*(ptr)) __new = (new); \
|
|
__typeof__(*(ptr)) __ret; \
|
|
register unsigned int __rc; \
|
|
switch (size) { \
|
|
case 4: \
|
|
__asm__ __volatile__ ( \
|
|
"0: lr.w %0, %2\n" \
|
|
" bne %0, %z3, 1f\n" \
|
|
" sc.w.rl %1, %z4, %2\n" \
|
|
" bnez %1, 0b\n" \
|
|
" fence rw, rw\n" \
|
|
"1:\n" \
|
|
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
|
|
: "rJ" ((long)__old), "rJ" (__new) \
|
|
: "memory"); \
|
|
break; \
|
|
case 8: \
|
|
__asm__ __volatile__ ( \
|
|
"0: lr.d %0, %2\n" \
|
|
" bne %0, %z3, 1f\n" \
|
|
" sc.d.rl %1, %z4, %2\n" \
|
|
" bnez %1, 0b\n" \
|
|
" fence rw, rw\n" \
|
|
"1:\n" \
|
|
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
|
|
: "rJ" (__old), "rJ" (__new) \
|
|
: "memory"); \
|
|
break; \
|
|
default: \
|
|
BUILD_BUG(); \
|
|
} \
|
|
__ret; \
|
|
})
|
|
|
|
#define arch_cmpxchg(ptr, o, n) \
|
|
({ \
|
|
__typeof__(*(ptr)) _o_ = (o); \
|
|
__typeof__(*(ptr)) _n_ = (n); \
|
|
(__typeof__(*(ptr))) __cmpxchg((ptr), \
|
|
_o_, _n_, sizeof(*(ptr))); \
|
|
})
|
|
|
|
#define arch_cmpxchg_local(ptr, o, n) \
|
|
(__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
|
|
|
|
#define cmpxchg32(ptr, o, n) \
|
|
({ \
|
|
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
|
|
arch_cmpxchg((ptr), (o), (n)); \
|
|
})
|
|
|
|
#define cmpxchg32_local(ptr, o, n) \
|
|
({ \
|
|
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
|
|
arch_cmpxchg_relaxed((ptr), (o), (n)) \
|
|
})
|
|
|
|
#define arch_cmpxchg64(ptr, o, n) \
|
|
({ \
|
|
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
|
|
arch_cmpxchg((ptr), (o), (n)); \
|
|
})
|
|
|
|
#define arch_cmpxchg64_local(ptr, o, n) \
|
|
({ \
|
|
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
|
|
arch_cmpxchg_relaxed((ptr), (o), (n)); \
|
|
})
|
|
|
|
#endif /* _ASM_RISCV_CMPXCHG_H */
|