local_lock_internal.h 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_LOCAL_LOCK_H
  3. # error "Do not include directly, include linux/local_lock.h"
  4. #endif
  5. #include <linux/percpu-defs.h>
  6. #include <linux/lockdep.h>
  7. #ifndef CONFIG_PREEMPT_RT
  8. typedef struct {
  9. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  10. struct lockdep_map dep_map;
  11. struct task_struct *owner;
  12. #endif
  13. } local_lock_t;
  14. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  15. # define LOCAL_LOCK_DEBUG_INIT(lockname) \
  16. .dep_map = { \
  17. .name = #lockname, \
  18. .wait_type_inner = LD_WAIT_CONFIG, \
  19. .lock_type = LD_LOCK_PERCPU, \
  20. }, \
  21. .owner = NULL,
  22. static inline void local_lock_acquire(local_lock_t *l)
  23. {
  24. lock_map_acquire(&l->dep_map);
  25. DEBUG_LOCKS_WARN_ON(l->owner);
  26. l->owner = current;
  27. }
  28. static inline void local_lock_release(local_lock_t *l)
  29. {
  30. DEBUG_LOCKS_WARN_ON(l->owner != current);
  31. l->owner = NULL;
  32. lock_map_release(&l->dep_map);
  33. }
  34. static inline void local_lock_debug_init(local_lock_t *l)
  35. {
  36. l->owner = NULL;
  37. }
  38. #else /* CONFIG_DEBUG_LOCK_ALLOC */
  39. # define LOCAL_LOCK_DEBUG_INIT(lockname)
  40. static inline void local_lock_acquire(local_lock_t *l) { }
  41. static inline void local_lock_release(local_lock_t *l) { }
  42. static inline void local_lock_debug_init(local_lock_t *l) { }
  43. #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
  44. #define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
  45. #define __local_lock_init(lock) \
  46. do { \
  47. static struct lock_class_key __key; \
  48. \
  49. debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
  50. lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
  51. 0, LD_WAIT_CONFIG, LD_WAIT_INV, \
  52. LD_LOCK_PERCPU); \
  53. local_lock_debug_init(lock); \
  54. } while (0)
  55. #define __local_lock(lock) \
  56. do { \
  57. preempt_disable(); \
  58. local_lock_acquire(this_cpu_ptr(lock)); \
  59. } while (0)
  60. #define __local_lock_irq(lock) \
  61. do { \
  62. local_irq_disable(); \
  63. local_lock_acquire(this_cpu_ptr(lock)); \
  64. } while (0)
  65. #define __local_lock_irqsave(lock, flags) \
  66. do { \
  67. local_irq_save(flags); \
  68. local_lock_acquire(this_cpu_ptr(lock)); \
  69. } while (0)
  70. #define __local_unlock(lock) \
  71. do { \
  72. local_lock_release(this_cpu_ptr(lock)); \
  73. preempt_enable(); \
  74. } while (0)
  75. #define __local_unlock_irq(lock) \
  76. do { \
  77. local_lock_release(this_cpu_ptr(lock)); \
  78. local_irq_enable(); \
  79. } while (0)
  80. #define __local_unlock_irqrestore(lock, flags) \
  81. do { \
  82. local_lock_release(this_cpu_ptr(lock)); \
  83. local_irq_restore(flags); \
  84. } while (0)
  85. #else /* !CONFIG_PREEMPT_RT */
  86. /*
  87. * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
  88. * critical section while staying preemptible.
  89. */
  90. typedef spinlock_t local_lock_t;
  91. #define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
  92. #define __local_lock_init(l) \
  93. do { \
  94. local_spin_lock_init((l)); \
  95. } while (0)
  96. #define __local_lock(__lock) \
  97. do { \
  98. migrate_disable(); \
  99. spin_lock(this_cpu_ptr((__lock))); \
  100. } while (0)
  101. #define __local_lock_irq(lock) __local_lock(lock)
  102. #define __local_lock_irqsave(lock, flags) \
  103. do { \
  104. typecheck(unsigned long, flags); \
  105. flags = 0; \
  106. __local_lock(lock); \
  107. } while (0)
  108. #define __local_unlock(__lock) \
  109. do { \
  110. spin_unlock(this_cpu_ptr((__lock))); \
  111. migrate_enable(); \
  112. } while (0)
  113. #define __local_unlock_irq(lock) __local_unlock(lock)
  114. #define __local_unlock_irqrestore(lock, flags) __local_unlock(lock)
  115. #endif /* CONFIG_PREEMPT_RT */