Merge tag 'arm64-mmiowb' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull mmiowb removal from Will Deacon: "Remove Mysterious Macro Intended to Obscure Weird Behaviours (mmiowb()) Remove mmiowb() from the kernel memory barrier API and instead, for architectures that need it, hide the barrier inside spin_unlock() when MMIO has been performed inside the critical section. The only relatively recent changes have been addressing review comments on the documentation, which is in a much better shape thanks to the efforts of Ben and Ingo. I was initially planning to split this into two pull requests so that you could run the coccinelle script yourself, however it's been plain sailing in linux-next so I've just included the whole lot here to keep things simple" * tag 'arm64-mmiowb' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (23 commits) docs/memory-barriers.txt: Update I/O section to be clearer about CPU vs thread docs/memory-barriers.txt: Fix style, spacing and grammar in I/O section arch: Remove dummy mmiowb() definitions from arch code net/ethernet/silan/sc92031: Remove stale comment about mmiowb() i40iw: Redefine i40iw_mmiowb() to do nothing scsi/qla1280: Remove stale comment about mmiowb() drivers: Remove explicit invocations of mmiowb() drivers: Remove useless trailing comments from mmiowb() invocations Documentation: Kill all references to mmiowb() riscv/mmiowb: Hook up mmwiob() implementation to asm-generic code powerpc/mmiowb: Hook up mmwiob() implementation to asm-generic code ia64/mmiowb: Add unconditional mmiowb() to arch_spin_unlock() mips/mmiowb: Add unconditional mmiowb() to arch_spin_unlock() sh/mmiowb: Add unconditional mmiowb() to arch_spin_unlock() m68k/io: Remove useless definition of mmiowb() nds32/io: Remove useless definition of mmiowb() x86/io: Remove useless definition of mmiowb() arm64/io: Remove useless definition of mmiowb() ARM/io: Remove useless definition of mmiowb() mmiowb: Hook up mmiowb helpers to spinlocks and generic I/O accessors ...
This commit is contained in:
@@ -22,6 +22,13 @@
|
||||
#include <linux/debug_locks.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#ifdef CONFIG_MMIOWB
|
||||
#ifndef arch_mmiowb_state
|
||||
DEFINE_PER_CPU(struct mmiowb_state, __mmiowb_state);
|
||||
EXPORT_PER_CPU_SYMBOL(__mmiowb_state);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If lockdep is enabled then we use the non-preemption spin-ops
|
||||
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
|
||||
|
@@ -111,6 +111,7 @@ void do_raw_spin_lock(raw_spinlock_t *lock)
|
||||
{
|
||||
debug_spin_lock_before(lock);
|
||||
arch_spin_lock(&lock->raw_lock);
|
||||
mmiowb_spin_lock();
|
||||
debug_spin_lock_after(lock);
|
||||
}
|
||||
|
||||
@@ -118,8 +119,10 @@ int do_raw_spin_trylock(raw_spinlock_t *lock)
|
||||
{
|
||||
int ret = arch_spin_trylock(&lock->raw_lock);
|
||||
|
||||
if (ret)
|
||||
if (ret) {
|
||||
mmiowb_spin_lock();
|
||||
debug_spin_lock_after(lock);
|
||||
}
|
||||
#ifndef CONFIG_SMP
|
||||
/*
|
||||
* Must not happen on UP:
|
||||
@@ -131,6 +134,7 @@ int do_raw_spin_trylock(raw_spinlock_t *lock)
|
||||
|
||||
void do_raw_spin_unlock(raw_spinlock_t *lock)
|
||||
{
|
||||
mmiowb_spin_unlock();
|
||||
debug_spin_unlock(lock);
|
||||
arch_spin_unlock(&lock->raw_lock);
|
||||
}
|
||||
|
Reference in New Issue
Block a user