spinlock_debug.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. /*
  2. * Copyright 2005, Red Hat, Inc., Ingo Molnar
  3. * Released under the General Public License (GPL).
  4. *
  5. * This file contains the spinlock/rwlock implementations for
  6. * DEBUG_SPINLOCK.
  7. */
  8. #include <linux/spinlock.h>
  9. #include <linux/nmi.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/debug_locks.h>
  12. #include <linux/delay.h>
  13. #include <linux/export.h>
  14. void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
  15. struct lock_class_key *key, short inner)
  16. {
  17. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  18. /*
  19. * Make sure we are not reinitializing a held lock:
  20. */
  21. debug_check_no_locks_freed((void *)lock, sizeof(*lock));
  22. lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner);
  23. #endif
  24. lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  25. lock->magic = SPINLOCK_MAGIC;
  26. lock->owner = SPINLOCK_OWNER_INIT;
  27. lock->owner_cpu = -1;
  28. }
  29. EXPORT_SYMBOL(__raw_spin_lock_init);
  30. #ifndef CONFIG_PREEMPT_RT
  31. void __rwlock_init(rwlock_t *lock, const char *name,
  32. struct lock_class_key *key)
  33. {
  34. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  35. /*
  36. * Make sure we are not reinitializing a held lock:
  37. */
  38. debug_check_no_locks_freed((void *)lock, sizeof(*lock));
  39. lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG);
  40. #endif
  41. lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
  42. lock->magic = RWLOCK_MAGIC;
  43. lock->owner = SPINLOCK_OWNER_INIT;
  44. lock->owner_cpu = -1;
  45. }
  46. EXPORT_SYMBOL(__rwlock_init);
  47. #endif
  48. static void spin_dump(raw_spinlock_t *lock, const char *msg)
  49. {
  50. struct task_struct *owner = READ_ONCE(lock->owner);
  51. if (owner == SPINLOCK_OWNER_INIT)
  52. owner = NULL;
  53. printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
  54. msg, raw_smp_processor_id(),
  55. current->comm, task_pid_nr(current));
  56. printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
  57. ".owner_cpu: %d\n",
  58. lock, READ_ONCE(lock->magic),
  59. owner ? owner->comm : "<none>",
  60. owner ? task_pid_nr(owner) : -1,
  61. READ_ONCE(lock->owner_cpu));
  62. dump_stack();
  63. }
  64. static void spin_bug(raw_spinlock_t *lock, const char *msg)
  65. {
  66. if (!debug_locks_off())
  67. return;
  68. spin_dump(lock, msg);
  69. }
  70. #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
  71. static inline void
  72. debug_spin_lock_before(raw_spinlock_t *lock)
  73. {
  74. SPIN_BUG_ON(READ_ONCE(lock->magic) != SPINLOCK_MAGIC, lock, "bad magic");
  75. SPIN_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion");
  76. SPIN_BUG_ON(READ_ONCE(lock->owner_cpu) == raw_smp_processor_id(),
  77. lock, "cpu recursion");
  78. }
  79. static inline void debug_spin_lock_after(raw_spinlock_t *lock)
  80. {
  81. WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
  82. WRITE_ONCE(lock->owner, current);
  83. }
  84. static inline void debug_spin_unlock(raw_spinlock_t *lock)
  85. {
  86. SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  87. SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
  88. SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
  89. SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  90. lock, "wrong CPU");
  91. WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
  92. WRITE_ONCE(lock->owner_cpu, -1);
  93. }
  94. /*
  95. * We are now relying on the NMI watchdog to detect lockup instead of doing
  96. * the detection here with an unfair lock which can cause problem of its own.
  97. */
  98. void do_raw_spin_lock(raw_spinlock_t *lock)
  99. {
  100. debug_spin_lock_before(lock);
  101. arch_spin_lock(&lock->raw_lock);
  102. mmiowb_spin_lock();
  103. debug_spin_lock_after(lock);
  104. }
  105. int do_raw_spin_trylock(raw_spinlock_t *lock)
  106. {
  107. int ret = arch_spin_trylock(&lock->raw_lock);
  108. if (ret) {
  109. mmiowb_spin_lock();
  110. debug_spin_lock_after(lock);
  111. }
  112. #ifndef CONFIG_SMP
  113. /*
  114. * Must not happen on UP:
  115. */
  116. SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
  117. #endif
  118. return ret;
  119. }
  120. void do_raw_spin_unlock(raw_spinlock_t *lock)
  121. {
  122. mmiowb_spin_unlock();
  123. debug_spin_unlock(lock);
  124. arch_spin_unlock(&lock->raw_lock);
  125. }
  126. #ifndef CONFIG_PREEMPT_RT
  127. static void rwlock_bug(rwlock_t *lock, const char *msg)
  128. {
  129. if (!debug_locks_off())
  130. return;
  131. printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
  132. msg, raw_smp_processor_id(), current->comm,
  133. task_pid_nr(current), lock);
  134. dump_stack();
  135. }
  136. #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
  137. void do_raw_read_lock(rwlock_t *lock)
  138. {
  139. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  140. arch_read_lock(&lock->raw_lock);
  141. }
  142. int do_raw_read_trylock(rwlock_t *lock)
  143. {
  144. int ret = arch_read_trylock(&lock->raw_lock);
  145. #ifndef CONFIG_SMP
  146. /*
  147. * Must not happen on UP:
  148. */
  149. RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  150. #endif
  151. return ret;
  152. }
  153. void do_raw_read_unlock(rwlock_t *lock)
  154. {
  155. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  156. arch_read_unlock(&lock->raw_lock);
  157. }
  158. static inline void debug_write_lock_before(rwlock_t *lock)
  159. {
  160. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  161. RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
  162. RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  163. lock, "cpu recursion");
  164. }
  165. static inline void debug_write_lock_after(rwlock_t *lock)
  166. {
  167. WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
  168. WRITE_ONCE(lock->owner, current);
  169. }
  170. static inline void debug_write_unlock(rwlock_t *lock)
  171. {
  172. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  173. RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
  174. RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  175. lock, "wrong CPU");
  176. WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
  177. WRITE_ONCE(lock->owner_cpu, -1);
  178. }
  179. void do_raw_write_lock(rwlock_t *lock)
  180. {
  181. debug_write_lock_before(lock);
  182. arch_write_lock(&lock->raw_lock);
  183. debug_write_lock_after(lock);
  184. }
  185. int do_raw_write_trylock(rwlock_t *lock)
  186. {
  187. int ret = arch_write_trylock(&lock->raw_lock);
  188. if (ret)
  189. debug_write_lock_after(lock);
  190. #ifndef CONFIG_SMP
  191. /*
  192. * Must not happen on UP:
  193. */
  194. RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  195. #endif
  196. return ret;
  197. }
  198. void do_raw_write_unlock(rwlock_t *lock)
  199. {
  200. debug_write_unlock(lock);
  201. arch_write_unlock(&lock->raw_lock);
  202. }
  203. #endif /* !CONFIG_PREEMPT_RT */