spinlock.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __ASM_SPINLOCK_H
  3. #define __ASM_SPINLOCK_H
  4. #include <asm/barrier.h>
  5. #include <asm/ldcw.h>
  6. #include <asm/processor.h>
  7. #include <asm/spinlock_types.h>
  8. static inline int arch_spin_is_locked(arch_spinlock_t *x)
  9. {
  10. volatile unsigned int *a = __ldcw_align(x);
  11. return READ_ONCE(*a) == 0;
  12. }
  13. static inline void arch_spin_lock(arch_spinlock_t *x)
  14. {
  15. volatile unsigned int *a;
  16. a = __ldcw_align(x);
  17. while (__ldcw(a) == 0)
  18. while (*a == 0)
  19. continue;
  20. }
  21. static inline void arch_spin_unlock(arch_spinlock_t *x)
  22. {
  23. volatile unsigned int *a;
  24. a = __ldcw_align(x);
  25. /* Release with ordered store. */
  26. __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
  27. }
  28. static inline int arch_spin_trylock(arch_spinlock_t *x)
  29. {
  30. volatile unsigned int *a;
  31. a = __ldcw_align(x);
  32. return __ldcw(a) != 0;
  33. }
  34. /*
  35. * Read-write spinlocks, allowing multiple readers but only one writer.
  36. * Unfair locking as Writers could be starved indefinitely by Reader(s)
  37. *
  38. * The spinlock itself is contained in @counter and access to it is
  39. * serialized with @lock_mutex.
  40. */
  41. /* 1 - lock taken successfully */
  42. static inline int arch_read_trylock(arch_rwlock_t *rw)
  43. {
  44. int ret = 0;
  45. unsigned long flags;
  46. local_irq_save(flags);
  47. arch_spin_lock(&(rw->lock_mutex));
  48. /*
  49. * zero means writer holds the lock exclusively, deny Reader.
  50. * Otherwise grant lock to first/subseq reader
  51. */
  52. if (rw->counter > 0) {
  53. rw->counter--;
  54. ret = 1;
  55. }
  56. arch_spin_unlock(&(rw->lock_mutex));
  57. local_irq_restore(flags);
  58. return ret;
  59. }
  60. /* 1 - lock taken successfully */
  61. static inline int arch_write_trylock(arch_rwlock_t *rw)
  62. {
  63. int ret = 0;
  64. unsigned long flags;
  65. local_irq_save(flags);
  66. arch_spin_lock(&(rw->lock_mutex));
  67. /*
  68. * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
  69. * deny writer. Otherwise if unlocked grant to writer
  70. * Hence the claim that Linux rwlocks are unfair to writers.
  71. * (can be starved for an indefinite time by readers).
  72. */
  73. if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
  74. rw->counter = 0;
  75. ret = 1;
  76. }
  77. arch_spin_unlock(&(rw->lock_mutex));
  78. local_irq_restore(flags);
  79. return ret;
  80. }
  81. static inline void arch_read_lock(arch_rwlock_t *rw)
  82. {
  83. while (!arch_read_trylock(rw))
  84. cpu_relax();
  85. }
  86. static inline void arch_write_lock(arch_rwlock_t *rw)
  87. {
  88. while (!arch_write_trylock(rw))
  89. cpu_relax();
  90. }
  91. static inline void arch_read_unlock(arch_rwlock_t *rw)
  92. {
  93. unsigned long flags;
  94. local_irq_save(flags);
  95. arch_spin_lock(&(rw->lock_mutex));
  96. rw->counter++;
  97. arch_spin_unlock(&(rw->lock_mutex));
  98. local_irq_restore(flags);
  99. }
  100. static inline void arch_write_unlock(arch_rwlock_t *rw)
  101. {
  102. unsigned long flags;
  103. local_irq_save(flags);
  104. arch_spin_lock(&(rw->lock_mutex));
  105. rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
  106. arch_spin_unlock(&(rw->lock_mutex));
  107. local_irq_restore(flags);
  108. }
  109. #endif /* __ASM_SPINLOCK_H */