The mmiowb() macro is horribly difficult to use and drivers will continue to work most of the time if they omit a call when it is required. Rather than rely on driver authors getting this right, push mmiowb() into arch_spin_unlock() for ia64. If this is deemed to be a performance issue, a subsequent optimisation could make use of ARCH_HAS_MMIOWB to elide the barrier in cases where no I/O writes were performed inside the critical section. Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
25 lines
569 B
C
25 lines
569 B
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef _ASM_IA64_MMIOWB_H
|
|
#define _ASM_IA64_MMIOWB_H
|
|
|
|
#include <asm/machvec.h>
|
|
|
|
/**
|
|
* ___ia64_mmiowb - I/O write barrier
|
|
*
|
|
* Ensure ordering of I/O space writes. This will make sure that writes
|
|
* following the barrier will arrive after all previous writes. For most
|
|
* ia64 platforms, this is a simple 'mf.a' instruction.
|
|
*/
|
|
static inline void ___ia64_mmiowb(void)
|
|
{
|
|
ia64_mfa();
|
|
}
|
|
|
|
#define __ia64_mmiowb ___ia64_mmiowb
|
|
#define mmiowb() platform_mmiowb()
|
|
|
|
#include <asm-generic/mmiowb.h>
|
|
|
|
#endif /* _ASM_IA64_MMIOWB_H */
|