locking: Convert __raw_spin* functions to arch_spin*
Name space cleanup. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: linux-arch@vger.kernel.org
This commit is contained in:
@@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||
|
||||
static inline int __raw_spin_is_locked(struct arch_spinlock *lock)
|
||||
static inline int arch_spin_is_locked(struct arch_spinlock *lock)
|
||||
{
|
||||
return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
|
||||
}
|
||||
|
||||
static inline int __raw_spin_is_contended(struct arch_spinlock *lock)
|
||||
static inline int arch_spin_is_contended(struct arch_spinlock *lock)
|
||||
{
|
||||
return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
|
||||
}
|
||||
#define __raw_spin_is_contended __raw_spin_is_contended
|
||||
#define arch_spin_is_contended arch_spin_is_contended
|
||||
|
||||
static __always_inline void __raw_spin_lock(struct arch_spinlock *lock)
|
||||
static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
|
||||
{
|
||||
PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
|
||||
}
|
||||
|
||||
static __always_inline void __raw_spin_lock_flags(struct arch_spinlock *lock,
|
||||
static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
|
||||
unsigned long flags)
|
||||
{
|
||||
PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
|
||||
}
|
||||
|
||||
static __always_inline int __raw_spin_trylock(struct arch_spinlock *lock)
|
||||
static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
|
||||
{
|
||||
return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
|
||||
}
|
||||
|
||||
static __always_inline void __raw_spin_unlock(struct arch_spinlock *lock)
|
||||
static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
|
||||
{
|
||||
PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
|
||||
}
|
||||
|
@@ -174,43 +174,43 @@ static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
|
||||
|
||||
#ifndef CONFIG_PARAVIRT_SPINLOCKS
|
||||
|
||||
static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
|
||||
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
|
||||
{
|
||||
return __ticket_spin_is_locked(lock);
|
||||
}
|
||||
|
||||
static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
|
||||
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
|
||||
{
|
||||
return __ticket_spin_is_contended(lock);
|
||||
}
|
||||
#define __raw_spin_is_contended __raw_spin_is_contended
|
||||
#define arch_spin_is_contended arch_spin_is_contended
|
||||
|
||||
static __always_inline void __raw_spin_lock(arch_spinlock_t *lock)
|
||||
static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||
{
|
||||
__ticket_spin_lock(lock);
|
||||
}
|
||||
|
||||
static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock)
|
||||
static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||
{
|
||||
return __ticket_spin_trylock(lock);
|
||||
}
|
||||
|
||||
static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock)
|
||||
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||
{
|
||||
__ticket_spin_unlock(lock);
|
||||
}
|
||||
|
||||
static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock,
|
||||
static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
|
||||
unsigned long flags)
|
||||
{
|
||||
__raw_spin_lock(lock);
|
||||
arch_spin_lock(lock);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
|
||||
|
||||
static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
|
||||
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
|
||||
{
|
||||
while (__raw_spin_is_locked(lock))
|
||||
while (arch_spin_is_locked(lock))
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
@@ -298,9 +298,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
||||
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
|
||||
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
|
||||
|
||||
#define _raw_spin_relax(lock) cpu_relax()
|
||||
#define _raw_read_relax(lock) cpu_relax()
|
||||
#define _raw_write_relax(lock) cpu_relax()
|
||||
#define arch_spin_relax(lock) cpu_relax()
|
||||
#define arch_read_relax(lock) cpu_relax()
|
||||
#define arch_write_relax(lock) cpu_relax()
|
||||
|
||||
/* The {read|write|spin}_lock() on x86 are full memory barriers. */
|
||||
static inline void smp_mb__after_lock(void) { }
|
||||
|
Reference in New Issue
Block a user