spinlock.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Spinlock support for the Hexagon architecture
  4. *
  5. * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
  6. */
  7. #ifndef _ASM_SPINLOCK_H
  8. #define _ASM_SPINLOCK_H
  9. #include <asm/irqflags.h>
  10. #include <asm/barrier.h>
  11. #include <asm/processor.h>
  12. /*
  13. * This file is pulled in for SMP builds.
  14. * Really need to check all the barrier stuff for "true" SMP
  15. */
  16. /*
  17. * Read locks:
  18. * - load the lock value
  19. * - increment it
  20. * - if the lock value is still negative, go back and try again.
  21. * - unsuccessful store is unsuccessful. Go back and try again. Loser.
  22. * - successful store new lock value if positive -> lock acquired
  23. */
  24. static inline void arch_read_lock(arch_rwlock_t *lock)
  25. {
  26. __asm__ __volatile__(
  27. "1: R6 = memw_locked(%0);\n"
  28. " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
  29. " { if (!P3) jump 1b; }\n"
  30. " memw_locked(%0,P3) = R6;\n"
  31. " { if (!P3) jump 1b; }\n"
  32. :
  33. : "r" (&lock->lock)
  34. : "memory", "r6", "p3"
  35. );
  36. }
  37. static inline void arch_read_unlock(arch_rwlock_t *lock)
  38. {
  39. __asm__ __volatile__(
  40. "1: R6 = memw_locked(%0);\n"
  41. " R6 = add(R6,#-1);\n"
  42. " memw_locked(%0,P3) = R6\n"
  43. " if (!P3) jump 1b;\n"
  44. :
  45. : "r" (&lock->lock)
  46. : "memory", "r6", "p3"
  47. );
  48. }
  49. /* I think this returns 0 on fail, 1 on success. */
  50. static inline int arch_read_trylock(arch_rwlock_t *lock)
  51. {
  52. int temp;
  53. __asm__ __volatile__(
  54. " R6 = memw_locked(%1);\n"
  55. " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
  56. " { if (!P3) jump 1f; }\n"
  57. " memw_locked(%1,P3) = R6;\n"
  58. " { %0 = P3 }\n"
  59. "1:\n"
  60. : "=&r" (temp)
  61. : "r" (&lock->lock)
  62. : "memory", "r6", "p3"
  63. );
  64. return temp;
  65. }
  66. /* Stuffs a -1 in the lock value? */
  67. static inline void arch_write_lock(arch_rwlock_t *lock)
  68. {
  69. __asm__ __volatile__(
  70. "1: R6 = memw_locked(%0)\n"
  71. " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
  72. " { if (!P3) jump 1b; }\n"
  73. " memw_locked(%0,P3) = R6;\n"
  74. " { if (!P3) jump 1b; }\n"
  75. :
  76. : "r" (&lock->lock)
  77. : "memory", "r6", "p3"
  78. );
  79. }
  80. static inline int arch_write_trylock(arch_rwlock_t *lock)
  81. {
  82. int temp;
  83. __asm__ __volatile__(
  84. " R6 = memw_locked(%1)\n"
  85. " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
  86. " { if (!P3) jump 1f; }\n"
  87. " memw_locked(%1,P3) = R6;\n"
  88. " %0 = P3;\n"
  89. "1:\n"
  90. : "=&r" (temp)
  91. : "r" (&lock->lock)
  92. : "memory", "r6", "p3"
  93. );
  94. return temp;
  95. }
  96. static inline void arch_write_unlock(arch_rwlock_t *lock)
  97. {
  98. smp_mb();
  99. lock->lock = 0;
  100. }
  101. static inline void arch_spin_lock(arch_spinlock_t *lock)
  102. {
  103. __asm__ __volatile__(
  104. "1: R6 = memw_locked(%0);\n"
  105. " P3 = cmp.eq(R6,#0);\n"
  106. " { if (!P3) jump 1b; R6 = #1; }\n"
  107. " memw_locked(%0,P3) = R6;\n"
  108. " { if (!P3) jump 1b; }\n"
  109. :
  110. : "r" (&lock->lock)
  111. : "memory", "r6", "p3"
  112. );
  113. }
  114. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  115. {
  116. smp_mb();
  117. lock->lock = 0;
  118. }
  119. static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
  120. {
  121. int temp;
  122. __asm__ __volatile__(
  123. " R6 = memw_locked(%1);\n"
  124. " P3 = cmp.eq(R6,#0);\n"
  125. " { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n"
  126. " memw_locked(%1,P3) = R6;\n"
  127. " %0 = P3;\n"
  128. "1:\n"
  129. : "=&r" (temp)
  130. : "r" (&lock->lock)
  131. : "memory", "r6", "p3"
  132. );
  133. return temp;
  134. }
  135. /*
  136. * SMP spinlocks are intended to allow only a single CPU at the lock
  137. */
  138. #define arch_spin_is_locked(x) ((x)->lock != 0)
  139. #endif