1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00
linux/arch/parisc/include/asm/spinlock.h
Helge Deller a0f4b7879f parisc: Fix lightweight spinlock checks to not break futexes
The lightweight spinlock checks verify that a spinlock has either value
0 (spinlock locked) and that not any other bits than in
__ARCH_SPIN_LOCK_UNLOCKED_VAL is set.

This breaks the current LWS code, which writes the address of the lock
into the lock word to unlock it, which was an optimization to save one
assembler instruction.

Fix it by making spinlock_types.h accessible for asm code, change the
LWS spinlock-unlocking code to write __ARCH_SPIN_LOCK_UNLOCKED_VAL into
the lock word, and add some missing lightweight spinlock checks to the
LWS path. Finally, make the spinlock checks dependend on DEBUG_KERNEL.

Noticed-by: John David Anglin <dave.anglin@bell.net>
Signed-off-by: Helge Deller <deller@gmx.de>
Tested-by: John David Anglin <dave.anglin@bell.net>
Cc: stable@vger.kernel.org # v6.4+
Fixes: 15e64ef652 ("parisc: Add lightweight spinlock checks")
2023-08-10 17:32:09 +02:00

161 lines
3.4 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H
#include <asm/barrier.h>
#include <asm/ldcw.h>
#include <asm/processor.h>
#include <asm/spinlock_types.h>
static inline void arch_spin_val_check(int lock_val)
{
if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK))
asm volatile( "andcm,= %0,%1,%%r0\n"
".word %2\n"
: : "r" (lock_val), "r" (__ARCH_SPIN_LOCK_UNLOCKED_VAL),
"i" (SPINLOCK_BREAK_INSN));
}
static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
volatile unsigned int *a;
int lock_val;
a = __ldcw_align(x);
lock_val = READ_ONCE(*a);
arch_spin_val_check(lock_val);
return (lock_val == 0);
}
static inline void arch_spin_lock(arch_spinlock_t *x)
{
volatile unsigned int *a;
a = __ldcw_align(x);
do {
int lock_val_old;
lock_val_old = __ldcw(a);
arch_spin_val_check(lock_val_old);
if (lock_val_old)
return; /* got lock */
/* wait until we should try to get lock again */
while (*a == 0)
continue;
} while (1);
}
static inline void arch_spin_unlock(arch_spinlock_t *x)
{
volatile unsigned int *a;
a = __ldcw_align(x);
/* Release with ordered store. */
__asm__ __volatile__("stw,ma %0,0(%1)"
: : "r"(__ARCH_SPIN_LOCK_UNLOCKED_VAL), "r"(a) : "memory");
}
static inline int arch_spin_trylock(arch_spinlock_t *x)
{
volatile unsigned int *a;
int lock_val;
a = __ldcw_align(x);
lock_val = __ldcw(a);
arch_spin_val_check(lock_val);
return lock_val != 0;
}
/*
* Read-write spinlocks, allowing multiple readers but only one writer.
* Unfair locking as Writers could be starved indefinitely by Reader(s)
*
* The spinlock itself is contained in @counter and access to it is
* serialized with @lock_mutex.
*/
/* 1 - lock taken successfully */
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
int ret = 0;
unsigned long flags;
local_irq_save(flags);
arch_spin_lock(&(rw->lock_mutex));
/*
* zero means writer holds the lock exclusively, deny Reader.
* Otherwise grant lock to first/subseq reader
*/
if (rw->counter > 0) {
rw->counter--;
ret = 1;
}
arch_spin_unlock(&(rw->lock_mutex));
local_irq_restore(flags);
return ret;
}
/* 1 - lock taken successfully */
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
int ret = 0;
unsigned long flags;
local_irq_save(flags);
arch_spin_lock(&(rw->lock_mutex));
/*
* If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
* deny writer. Otherwise if unlocked grant to writer
* Hence the claim that Linux rwlocks are unfair to writers.
* (can be starved for an indefinite time by readers).
*/
if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
rw->counter = 0;
ret = 1;
}
arch_spin_unlock(&(rw->lock_mutex));
local_irq_restore(flags);
return ret;
}
static inline void arch_read_lock(arch_rwlock_t *rw)
{
while (!arch_read_trylock(rw))
cpu_relax();
}
static inline void arch_write_lock(arch_rwlock_t *rw)
{
while (!arch_write_trylock(rw))
cpu_relax();
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
arch_spin_lock(&(rw->lock_mutex));
rw->counter++;
arch_spin_unlock(&(rw->lock_mutex));
local_irq_restore(flags);
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
arch_spin_lock(&(rw->lock_mutex));
rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
arch_spin_unlock(&(rw->lock_mutex));
local_irq_restore(flags);
}
#endif /* __ASM_SPINLOCK_H */