spinlock-cas.h 2.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. /* SPDX-License-Identifier: GPL-2.0
  2. *
  3. * include/asm-sh/spinlock-cas.h
  4. *
  5. * Copyright (C) 2015 SEI
  6. */
  7. #ifndef __ASM_SH_SPINLOCK_CAS_H
  8. #define __ASM_SH_SPINLOCK_CAS_H
  9. #include <asm/barrier.h>
  10. #include <asm/processor.h>
  11. static inline unsigned __sl_cas(volatile unsigned *p, unsigned old, unsigned new)
  12. {
  13. __asm__ __volatile__("cas.l %1,%0,@r0"
  14. : "+r"(new)
  15. : "r"(old), "z"(p)
  16. : "t", "memory" );
  17. return new;
  18. }
  19. /*
  20. * Your basic SMP spinlocks, allowing only a single CPU anywhere
  21. */
  22. #define arch_spin_is_locked(x) ((x)->lock <= 0)
  23. static inline void arch_spin_lock(arch_spinlock_t *lock)
  24. {
  25. while (!__sl_cas(&lock->lock, 1, 0));
  26. }
  27. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  28. {
  29. __sl_cas(&lock->lock, 0, 1);
  30. }
  31. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  32. {
  33. return __sl_cas(&lock->lock, 1, 0);
  34. }
  35. /*
  36. * Read-write spinlocks, allowing multiple readers but only one writer.
  37. *
  38. * NOTE! it is quite common to have readers in interrupts but no interrupt
  39. * writers. For those circumstances we can "mix" irq-safe locks - any writer
  40. * needs to get a irq-safe write-lock, but readers can get non-irqsafe
  41. * read-locks.
  42. */
  43. static inline void arch_read_lock(arch_rwlock_t *rw)
  44. {
  45. unsigned old;
  46. do old = rw->lock;
  47. while (!old || __sl_cas(&rw->lock, old, old-1) != old);
  48. }
  49. static inline void arch_read_unlock(arch_rwlock_t *rw)
  50. {
  51. unsigned old;
  52. do old = rw->lock;
  53. while (__sl_cas(&rw->lock, old, old+1) != old);
  54. }
  55. static inline void arch_write_lock(arch_rwlock_t *rw)
  56. {
  57. while (__sl_cas(&rw->lock, RW_LOCK_BIAS, 0) != RW_LOCK_BIAS);
  58. }
  59. static inline void arch_write_unlock(arch_rwlock_t *rw)
  60. {
  61. __sl_cas(&rw->lock, 0, RW_LOCK_BIAS);
  62. }
  63. static inline int arch_read_trylock(arch_rwlock_t *rw)
  64. {
  65. unsigned old;
  66. do old = rw->lock;
  67. while (old && __sl_cas(&rw->lock, old, old-1) != old);
  68. return !!old;
  69. }
  70. static inline int arch_write_trylock(arch_rwlock_t *rw)
  71. {
  72. return __sl_cas(&rw->lock, RW_LOCK_BIAS, 0) == RW_LOCK_BIAS;
  73. }
  74. #endif /* __ASM_SH_SPINLOCK_CAS_H */