lockref.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/export.h>
  3. #include <linux/lockref.h>
  4. #if USE_CMPXCHG_LOCKREF
  5. /*
  6. * Note that the "cmpxchg()" reloads the "old" value for the
  7. * failure case.
  8. */
  9. #define CMPXCHG_LOOP(CODE, SUCCESS) do { \
  10. int retry = 100; \
  11. struct lockref old; \
  12. BUILD_BUG_ON(sizeof(old) != 8); \
  13. old.lock_count = READ_ONCE(lockref->lock_count); \
  14. while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
  15. struct lockref new = old; \
  16. CODE \
  17. if (likely(try_cmpxchg64_relaxed(&lockref->lock_count, \
  18. &old.lock_count, \
  19. new.lock_count))) { \
  20. SUCCESS; \
  21. } \
  22. if (!--retry) \
  23. break; \
  24. } \
  25. } while (0)
  26. #else
  27. #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
  28. #endif
  29. /**
  30. * lockref_get - Increments reference count unconditionally
  31. * @lockref: pointer to lockref structure
  32. *
  33. * This operation is only valid if you already hold a reference
  34. * to the object, so you know the count cannot be zero.
  35. */
  36. void lockref_get(struct lockref *lockref)
  37. {
  38. CMPXCHG_LOOP(
  39. new.count++;
  40. ,
  41. return;
  42. );
  43. spin_lock(&lockref->lock);
  44. lockref->count++;
  45. spin_unlock(&lockref->lock);
  46. }
  47. EXPORT_SYMBOL(lockref_get);
  48. /**
  49. * lockref_get_not_zero - Increments count unless the count is 0 or dead
  50. * @lockref: pointer to lockref structure
  51. * Return: 1 if count updated successfully or 0 if count was zero
  52. */
  53. int lockref_get_not_zero(struct lockref *lockref)
  54. {
  55. int retval;
  56. CMPXCHG_LOOP(
  57. new.count++;
  58. if (old.count <= 0)
  59. return 0;
  60. ,
  61. return 1;
  62. );
  63. spin_lock(&lockref->lock);
  64. retval = 0;
  65. if (lockref->count > 0) {
  66. lockref->count++;
  67. retval = 1;
  68. }
  69. spin_unlock(&lockref->lock);
  70. return retval;
  71. }
  72. EXPORT_SYMBOL(lockref_get_not_zero);
  73. /**
  74. * lockref_put_not_zero - Decrements count unless count <= 1 before decrement
  75. * @lockref: pointer to lockref structure
  76. * Return: 1 if count updated successfully or 0 if count would become zero
  77. */
  78. int lockref_put_not_zero(struct lockref *lockref)
  79. {
  80. int retval;
  81. CMPXCHG_LOOP(
  82. new.count--;
  83. if (old.count <= 1)
  84. return 0;
  85. ,
  86. return 1;
  87. );
  88. spin_lock(&lockref->lock);
  89. retval = 0;
  90. if (lockref->count > 1) {
  91. lockref->count--;
  92. retval = 1;
  93. }
  94. spin_unlock(&lockref->lock);
  95. return retval;
  96. }
  97. EXPORT_SYMBOL(lockref_put_not_zero);
  98. /**
  99. * lockref_put_return - Decrement reference count if possible
  100. * @lockref: pointer to lockref structure
  101. *
  102. * Decrement the reference count and return the new value.
  103. * If the lockref was dead or locked, return an error.
  104. */
  105. int lockref_put_return(struct lockref *lockref)
  106. {
  107. CMPXCHG_LOOP(
  108. new.count--;
  109. if (old.count <= 0)
  110. return -1;
  111. ,
  112. return new.count;
  113. );
  114. return -1;
  115. }
  116. EXPORT_SYMBOL(lockref_put_return);
  117. /**
  118. * lockref_put_or_lock - decrements count unless count <= 1 before decrement
  119. * @lockref: pointer to lockref structure
  120. * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
  121. */
  122. int lockref_put_or_lock(struct lockref *lockref)
  123. {
  124. CMPXCHG_LOOP(
  125. new.count--;
  126. if (old.count <= 1)
  127. break;
  128. ,
  129. return 1;
  130. );
  131. spin_lock(&lockref->lock);
  132. if (lockref->count <= 1)
  133. return 0;
  134. lockref->count--;
  135. spin_unlock(&lockref->lock);
  136. return 1;
  137. }
  138. EXPORT_SYMBOL(lockref_put_or_lock);
  139. /**
  140. * lockref_mark_dead - mark lockref dead
  141. * @lockref: pointer to lockref structure
  142. */
  143. void lockref_mark_dead(struct lockref *lockref)
  144. {
  145. assert_spin_locked(&lockref->lock);
  146. lockref->count = -128;
  147. }
  148. EXPORT_SYMBOL(lockref_mark_dead);
  149. /**
  150. * lockref_get_not_dead - Increments count unless the ref is dead
  151. * @lockref: pointer to lockref structure
  152. * Return: 1 if count updated successfully or 0 if lockref was dead
  153. */
  154. int lockref_get_not_dead(struct lockref *lockref)
  155. {
  156. int retval;
  157. CMPXCHG_LOOP(
  158. new.count++;
  159. if (old.count < 0)
  160. return 0;
  161. ,
  162. return 1;
  163. );
  164. spin_lock(&lockref->lock);
  165. retval = 0;
  166. if (lockref->count >= 0) {
  167. lockref->count++;
  168. retval = 1;
  169. }
  170. spin_unlock(&lockref->lock);
  171. return retval;
  172. }
  173. EXPORT_SYMBOL(lockref_get_not_dead);