locking/rwsem: Fold __down_{read,write}*()
There's a lot needless duplication in __down_{read,write}*(), cure that with a helper. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20201207090243.GE3040@hirez.programming.kicks-ass.net
This commit is contained in:
parent
285c61aedf
commit
c995e638cc
1 changed files with 23 additions and 22 deletions
|
@ -1354,32 +1354,29 @@ static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
|
||||||
/*
|
/*
|
||||||
* lock for reading
|
* lock for reading
|
||||||
*/
|
*/
|
||||||
static inline void __down_read(struct rw_semaphore *sem)
|
static inline int __down_read_common(struct rw_semaphore *sem, int state)
|
||||||
{
|
{
|
||||||
if (!rwsem_read_trylock(sem)) {
|
if (!rwsem_read_trylock(sem)) {
|
||||||
rwsem_down_read_slowpath(sem, TASK_UNINTERRUPTIBLE);
|
if (IS_ERR(rwsem_down_read_slowpath(sem, state)))
|
||||||
|
return -EINTR;
|
||||||
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
|
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __down_read(struct rw_semaphore *sem)
|
||||||
|
{
|
||||||
|
__down_read_common(sem, TASK_UNINTERRUPTIBLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __down_read_interruptible(struct rw_semaphore *sem)
|
static inline int __down_read_interruptible(struct rw_semaphore *sem)
|
||||||
{
|
{
|
||||||
if (!rwsem_read_trylock(sem)) {
|
return __down_read_common(sem, TASK_INTERRUPTIBLE);
|
||||||
if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_INTERRUPTIBLE)))
|
|
||||||
return -EINTR;
|
|
||||||
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __down_read_killable(struct rw_semaphore *sem)
|
static inline int __down_read_killable(struct rw_semaphore *sem)
|
||||||
{
|
{
|
||||||
if (!rwsem_read_trylock(sem)) {
|
return __down_read_common(sem, TASK_KILLABLE);
|
||||||
if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_KILLABLE)))
|
|
||||||
return -EINTR;
|
|
||||||
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __down_read_trylock(struct rw_semaphore *sem)
|
static inline int __down_read_trylock(struct rw_semaphore *sem)
|
||||||
|
@ -1405,22 +1402,26 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
|
||||||
/*
|
/*
|
||||||
* lock for writing
|
* lock for writing
|
||||||
*/
|
*/
|
||||||
static inline void __down_write(struct rw_semaphore *sem)
|
static inline int __down_write_common(struct rw_semaphore *sem, int state)
|
||||||
{
|
|
||||||
if (unlikely(!rwsem_write_trylock(sem)))
|
|
||||||
rwsem_down_write_slowpath(sem, TASK_UNINTERRUPTIBLE);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int __down_write_killable(struct rw_semaphore *sem)
|
|
||||||
{
|
{
|
||||||
if (unlikely(!rwsem_write_trylock(sem))) {
|
if (unlikely(!rwsem_write_trylock(sem))) {
|
||||||
if (IS_ERR(rwsem_down_write_slowpath(sem, TASK_KILLABLE)))
|
if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void __down_write(struct rw_semaphore *sem)
|
||||||
|
{
|
||||||
|
__down_write_common(sem, TASK_UNINTERRUPTIBLE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int __down_write_killable(struct rw_semaphore *sem)
|
||||||
|
{
|
||||||
|
return __down_write_common(sem, TASK_KILLABLE);
|
||||||
|
}
|
||||||
|
|
||||||
static inline int __down_write_trylock(struct rw_semaphore *sem)
|
static inline int __down_write_trylock(struct rw_semaphore *sem)
|
||||||
{
|
{
|
||||||
DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
|
DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
|
||||||
|
|
Loading…
Add table
Reference in a new issue