seqlock.h 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __LINUX_SEQLOCK_H
  3. #define __LINUX_SEQLOCK_H
  4. /*
  5. * seqcount_t / seqlock_t - a reader-writer consistency mechanism with
  6. * lockless readers (read-only retry loops), and no writer starvation.
  7. *
  8. * See Documentation/locking/seqlock.rst
  9. *
  10. * Copyrights:
  11. * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
  12. * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH
  13. */
  14. #include <linux/compiler.h>
  15. #include <linux/kcsan-checks.h>
  16. #include <linux/lockdep.h>
  17. #include <linux/mutex.h>
  18. #include <linux/preempt.h>
  19. #include <linux/spinlock.h>
  20. #include <asm/processor.h>
  21. /*
  22. * The seqlock seqcount_t interface does not prescribe a precise sequence of
  23. * read begin/retry/end. For readers, typically there is a call to
  24. * read_seqcount_begin() and read_seqcount_retry(), however, there are more
  25. * esoteric cases which do not follow this pattern.
  26. *
  27. * As a consequence, we take the following best-effort approach for raw usage
  28. * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
  29. * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
  30. * atomics; if there is a matching read_seqcount_retry() call, no following
  31. * memory operations are considered atomic. Usage of the seqlock_t interface
  32. * is not affected.
  33. */
  34. #define KCSAN_SEQLOCK_REGION_MAX 1000
  35. /*
  36. * Sequence counters (seqcount_t)
  37. *
  38. * This is the raw counting mechanism, without any writer protection.
  39. *
  40. * Write side critical sections must be serialized and non-preemptible.
  41. *
  42. * If readers can be invoked from hardirq or softirq contexts,
  43. * interrupts or bottom halves must also be respectively disabled before
  44. * entering the write section.
  45. *
  46. * This mechanism can't be used if the protected data contains pointers,
  47. * as the writer can invalidate a pointer that a reader is following.
  48. *
  49. * If the write serialization mechanism is one of the common kernel
  50. * locking primitives, use a sequence counter with associated lock
  51. * (seqcount_LOCKNAME_t) instead.
  52. *
  53. * If it's desired to automatically handle the sequence counter writer
  54. * serialization and non-preemptibility requirements, use a sequential
  55. * lock (seqlock_t) instead.
  56. *
  57. * See Documentation/locking/seqlock.rst
  58. */
  59. typedef struct seqcount {
  60. unsigned sequence;
  61. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  62. struct lockdep_map dep_map;
  63. #endif
  64. } seqcount_t;
  65. static inline void __seqcount_init(seqcount_t *s, const char *name,
  66. struct lock_class_key *key)
  67. {
  68. /*
  69. * Make sure we are not reinitializing a held lock:
  70. */
  71. lockdep_init_map(&s->dep_map, name, key, 0);
  72. s->sequence = 0;
  73. }
  74. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  75. # define SEQCOUNT_DEP_MAP_INIT(lockname) \
  76. .dep_map = { .name = #lockname }
  77. /**
  78. * seqcount_init() - runtime initializer for seqcount_t
  79. * @s: Pointer to the seqcount_t instance
  80. */
  81. # define seqcount_init(s) \
  82. do { \
  83. static struct lock_class_key __key; \
  84. __seqcount_init((s), #s, &__key); \
  85. } while (0)
  86. static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
  87. {
  88. seqcount_t *l = (seqcount_t *)s;
  89. unsigned long flags;
  90. local_irq_save(flags);
  91. seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
  92. seqcount_release(&l->dep_map, _RET_IP_);
  93. local_irq_restore(flags);
  94. }
  95. #else
  96. # define SEQCOUNT_DEP_MAP_INIT(lockname)
  97. # define seqcount_init(s) __seqcount_init(s, NULL, NULL)
  98. # define seqcount_lockdep_reader_access(x)
  99. #endif
  100. /**
  101. * SEQCNT_ZERO() - static initializer for seqcount_t
  102. * @name: Name of the seqcount_t instance
  103. */
  104. #define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
  105. /*
  106. * Sequence counters with associated locks (seqcount_LOCKNAME_t)
  107. *
  108. * A sequence counter which associates the lock used for writer
  109. * serialization at initialization time. This enables lockdep to validate
  110. * that the write side critical section is properly serialized.
  111. *
  112. * For associated locks which do not implicitly disable preemption,
  113. * preemption protection is enforced in the write side function.
  114. *
  115. * Lockdep is never used in any for the raw write variants.
  116. *
  117. * See Documentation/locking/seqlock.rst
  118. */
  119. /*
  120. * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
  121. * disable preemption. It can lead to higher latencies, and the write side
  122. * sections will not be able to acquire locks which become sleeping locks
  123. * (e.g. spinlock_t).
  124. *
  125. * To remain preemptible while avoiding a possible livelock caused by the
  126. * reader preempting the writer, use a different technique: let the reader
  127. * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
  128. * case, acquire then release the associated LOCKNAME writer serialization
  129. * lock. This will allow any possibly-preempted writer to make progress
  130. * until the end of its writer serialization lock critical section.
  131. *
  132. * This lock-unlock technique must be implemented for all of PREEMPT_RT
  133. * sleeping locks. See Documentation/locking/locktypes.rst
  134. */
  135. #if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
  136. #define __SEQ_LOCK(expr) expr
  137. #else
  138. #define __SEQ_LOCK(expr)
  139. #endif
  140. /*
  141. * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
  142. * @seqcount: The real sequence counter
  143. * @lock: Pointer to the associated lock
  144. *
  145. * A plain sequence counter with external writer synchronization by
  146. * LOCKNAME @lock. The lock is associated to the sequence counter in the
  147. * static initializer or init function. This enables lockdep to validate
  148. * that the write side critical section is properly serialized.
  149. *
  150. * LOCKNAME: raw_spinlock, spinlock, rwlock or mutex
  151. */
  152. /*
  153. * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
  154. * @s: Pointer to the seqcount_LOCKNAME_t instance
  155. * @lock: Pointer to the associated lock
  156. */
  157. #define seqcount_LOCKNAME_init(s, _lock, lockname) \
  158. do { \
  159. seqcount_##lockname##_t *____s = (s); \
  160. seqcount_init(&____s->seqcount); \
  161. __SEQ_LOCK(____s->lock = (_lock)); \
  162. } while (0)
  163. #define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock)
  164. #define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock)
  165. #define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock)
  166. #define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex)
  167. /*
  168. * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
  169. * seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t
  170. *
  171. * @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t
  172. * @locktype: LOCKNAME canonical C data type
  173. * @preemptible: preemptibility of above locktype
  174. * @lockmember: argument for lockdep_assert_held()
  175. * @lockbase: associated lock release function (prefix only)
  176. * @lock_acquire: associated lock acquisition function (full call)
  177. */
  178. #define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember, lockbase, lock_acquire) \
  179. typedef struct seqcount_##lockname { \
  180. seqcount_t seqcount; \
  181. __SEQ_LOCK(locktype *lock); \
  182. } seqcount_##lockname##_t; \
  183. \
  184. static __always_inline seqcount_t * \
  185. __seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
  186. { \
  187. return &s->seqcount; \
  188. } \
  189. \
  190. static __always_inline unsigned \
  191. __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
  192. { \
  193. unsigned seq = READ_ONCE(s->seqcount.sequence); \
  194. \
  195. if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
  196. return seq; \
  197. \
  198. if (preemptible && unlikely(seq & 1)) { \
  199. __SEQ_LOCK(lock_acquire); \
  200. __SEQ_LOCK(lockbase##_unlock(s->lock)); \
  201. \
  202. /* \
  203. * Re-read the sequence counter since the (possibly \
  204. * preempted) writer made progress. \
  205. */ \
  206. seq = READ_ONCE(s->seqcount.sequence); \
  207. } \
  208. \
  209. return seq; \
  210. } \
  211. \
  212. static __always_inline bool \
  213. __seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \
  214. { \
  215. if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
  216. return preemptible; \
  217. \
  218. /* PREEMPT_RT relies on the above LOCK+UNLOCK */ \
  219. return false; \
  220. } \
  221. \
  222. static __always_inline void \
  223. __seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \
  224. { \
  225. __SEQ_LOCK(lockdep_assert_held(lockmember)); \
  226. }
  227. /*
  228. * __seqprop() for seqcount_t
  229. */
  230. static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
  231. {
  232. return s;
  233. }
  234. static inline unsigned __seqprop_sequence(const seqcount_t *s)
  235. {
  236. return READ_ONCE(s->sequence);
  237. }
  238. static inline bool __seqprop_preemptible(const seqcount_t *s)
  239. {
  240. return false;
  241. }
  242. static inline void __seqprop_assert(const seqcount_t *s)
  243. {
  244. lockdep_assert_preemption_disabled();
  245. }
  246. #define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT)
  247. SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_spin, raw_spin_lock(s->lock))
  248. SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock))
  249. SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock))
  250. SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock))
  251. /*
  252. * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
  253. * @name: Name of the seqcount_LOCKNAME_t instance
  254. * @lock: Pointer to the associated LOCKNAME
  255. */
  256. #define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) { \
  257. .seqcount = SEQCNT_ZERO(seq_name.seqcount), \
  258. __SEQ_LOCK(.lock = (assoc_lock)) \
  259. }
  260. #define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
  261. #define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
  262. #define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
  263. #define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
  264. #define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
  265. #define __seqprop_case(s, lockname, prop) \
  266. seqcount_##lockname##_t: __seqprop_##lockname##_##prop((void *)(s))
  267. #define __seqprop(s, prop) _Generic(*(s), \
  268. seqcount_t: __seqprop_##prop((void *)(s)), \
  269. __seqprop_case((s), raw_spinlock, prop), \
  270. __seqprop_case((s), spinlock, prop), \
  271. __seqprop_case((s), rwlock, prop), \
  272. __seqprop_case((s), mutex, prop))
  273. #define seqprop_ptr(s) __seqprop(s, ptr)
  274. #define seqprop_sequence(s) __seqprop(s, sequence)
  275. #define seqprop_preemptible(s) __seqprop(s, preemptible)
  276. #define seqprop_assert(s) __seqprop(s, assert)
  277. /**
  278. * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
  279. * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
  280. *
  281. * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
  282. * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
  283. * provided before actually loading any of the variables that are to be
  284. * protected in this critical section.
  285. *
  286. * Use carefully, only in critical code, and comment how the barrier is
  287. * provided.
  288. *
  289. * Return: count to be passed to read_seqcount_retry()
  290. */
  291. #define __read_seqcount_begin(s) \
  292. ({ \
  293. unsigned __seq; \
  294. \
  295. while ((__seq = seqprop_sequence(s)) & 1) \
  296. cpu_relax(); \
  297. \
  298. kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
  299. __seq; \
  300. })
  301. /**
  302. * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
  303. * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
  304. *
  305. * Return: count to be passed to read_seqcount_retry()
  306. */
  307. #define raw_read_seqcount_begin(s) \
  308. ({ \
  309. unsigned _seq = __read_seqcount_begin(s); \
  310. \
  311. smp_rmb(); \
  312. _seq; \
  313. })
  314. /**
  315. * read_seqcount_begin() - begin a seqcount_t read critical section
  316. * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
  317. *
  318. * Return: count to be passed to read_seqcount_retry()
  319. */
  320. #define read_seqcount_begin(s) \
  321. ({ \
  322. seqcount_lockdep_reader_access(seqprop_ptr(s)); \
  323. raw_read_seqcount_begin(s); \
  324. })
  325. /**
  326. * raw_read_seqcount() - read the raw seqcount_t counter value
  327. * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
  328. *
  329. * raw_read_seqcount opens a read critical section of the given
  330. * seqcount_t, without any lockdep checking, and without checking or
  331. * masking the sequence counter LSB. Calling code is responsible for
  332. * handling that.
  333. *
  334. * Return: count to be passed to read_seqcount_retry()
  335. */
  336. #define raw_read_seqcount(s) \
  337. ({ \
  338. unsigned __seq = seqprop_sequence(s); \
  339. \
  340. smp_rmb(); \
  341. kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
  342. __seq; \
  343. })
  344. /**
  345. * raw_seqcount_begin() - begin a seqcount_t read critical section w/o
  346. * lockdep and w/o counter stabilization
  347. * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
  348. *
  349. * raw_seqcount_begin opens a read critical section of the given
  350. * seqcount_t. Unlike read_seqcount_begin(), this function will not wait
  351. * for the count to stabilize. If a writer is active when it begins, it
  352. * will fail the read_seqcount_retry() at the end of the read critical
  353. * section instead of stabilizing at the beginning of it.
  354. *
  355. * Use this only in special kernel hot paths where the read section is
  356. * small and has a high probability of success through other external
  357. * means. It will save a single branching instruction.
  358. *
  359. * Return: count to be passed to read_seqcount_retry()
  360. */
  361. #define raw_seqcount_begin(s) \
  362. ({ \
  363. /* \
  364. * If the counter is odd, let read_seqcount_retry() fail \
  365. * by decrementing the counter. \
  366. */ \
  367. raw_read_seqcount(s) & ~1; \
  368. })
  369. /**
  370. * __read_seqcount_retry() - end a seqcount_t read section w/o barrier
  371. * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
  372. * @start: count, from read_seqcount_begin()
  373. *
  374. * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
  375. * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
  376. * provided before actually loading any of the variables that are to be
  377. * protected in this critical section.
  378. *
  379. * Use carefully, only in critical code, and comment how the barrier is
  380. * provided.
  381. *
  382. * Return: true if a read section retry is required, else false
  383. */
  384. #define __read_seqcount_retry(s, start) \
  385. do___read_seqcount_retry(seqprop_ptr(s), start)
  386. static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
  387. {
  388. kcsan_atomic_next(0);
  389. return unlikely(READ_ONCE(s->sequence) != start);
  390. }
  391. /**
  392. * read_seqcount_retry() - end a seqcount_t read critical section
  393. * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
  394. * @start: count, from read_seqcount_begin()
  395. *
  396. * read_seqcount_retry closes the read critical section of given
  397. * seqcount_t. If the critical section was invalid, it must be ignored
  398. * (and typically retried).
  399. *
  400. * Return: true if a read section retry is required, else false
  401. */
  402. #define read_seqcount_retry(s, start) \
  403. do_read_seqcount_retry(seqprop_ptr(s), start)
  404. static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
  405. {
  406. smp_rmb();
  407. return do___read_seqcount_retry(s, start);
  408. }
  409. /**
  410. * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
  411. * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
  412. *
  413. * Context: check write_seqcount_begin()
  414. */
  415. #define raw_write_seqcount_begin(s) \
  416. do { \
  417. if (seqprop_preemptible(s)) \
  418. preempt_disable(); \
  419. \
  420. do_raw_write_seqcount_begin(seqprop_ptr(s)); \
  421. } while (0)
  422. static inline void do_raw_write_seqcount_begin(seqcount_t *s)
  423. {
  424. kcsan_nestable_atomic_begin();
  425. s->sequence++;
  426. smp_wmb();
  427. }
  428. /**
  429. * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
  430. * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
  431. *
  432. * Context: check write_seqcount_end()
  433. */
  434. #define raw_write_seqcount_end(s) \
  435. do { \
  436. do_raw_write_seqcount_end(seqprop_ptr(s)); \
  437. \
  438. if (seqprop_preemptible(s)) \
  439. preempt_enable(); \
  440. } while (0)
  441. static inline void do_raw_write_seqcount_end(seqcount_t *s)
  442. {
  443. smp_wmb();
  444. s->sequence++;
  445. kcsan_nestable_atomic_end();
  446. }
  447. /**
  448. * write_seqcount_begin_nested() - start a seqcount_t write section with
  449. * custom lockdep nesting level
  450. * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
  451. * @subclass: lockdep nesting level
  452. *
  453. * See Documentation/locking/lockdep-design.rst
  454. * Context: check write_seqcount_begin()
  455. */
  456. #define write_seqcount_begin_nested(s, subclass) \
  457. do { \
  458. seqprop_assert(s); \
  459. \
  460. if (seqprop_preemptible(s)) \
  461. preempt_disable(); \
  462. \
  463. do_write_seqcount_begin_nested(seqprop_ptr(s), subclass); \
  464. } while (0)
  465. static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
  466. {
  467. seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
  468. do_raw_write_seqcount_begin(s);
  469. }
  470. /**
  471. * write_seqcount_begin() - start a seqcount_t write side critical section
  472. * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
  473. *
  474. * Context: sequence counter write side sections must be serialized and
  475. * non-preemptible. Preemption will be automatically disabled if and
  476. * only if the seqcount write serialization lock is associated, and
  477. * preemptible. If readers can be invoked from hardirq or softirq
  478. * context, interrupts or bottom halves must be respectively disabled.
  479. */
  480. #define write_seqcount_begin(s) \
  481. do { \
  482. seqprop_assert(s); \
  483. \
  484. if (seqprop_preemptible(s)) \
  485. preempt_disable(); \
  486. \
  487. do_write_seqcount_begin(seqprop_ptr(s)); \
  488. } while (0)
  489. static inline void do_write_seqcount_begin(seqcount_t *s)
  490. {
  491. do_write_seqcount_begin_nested(s, 0);
  492. }
  493. /**
  494. * write_seqcount_end() - end a seqcount_t write side critical section
  495. * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
  496. *
  497. * Context: Preemption will be automatically re-enabled if and only if
  498. * the seqcount write serialization lock is associated, and preemptible.
  499. */
  500. #define write_seqcount_end(s) \
  501. do { \
  502. do_write_seqcount_end(seqprop_ptr(s)); \
  503. \
  504. if (seqprop_preemptible(s)) \
  505. preempt_enable(); \
  506. } while (0)
  507. static inline void do_write_seqcount_end(seqcount_t *s)
  508. {
  509. seqcount_release(&s->dep_map, _RET_IP_);
  510. do_raw_write_seqcount_end(s);
  511. }
  512. /**
  513. * raw_write_seqcount_barrier() - do a seqcount_t write barrier
  514. * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
  515. *
  516. * This can be used to provide an ordering guarantee instead of the usual
  517. * consistency guarantee. It is one wmb cheaper, because it can collapse
  518. * the two back-to-back wmb()s.
  519. *
  520. * Note that writes surrounding the barrier should be declared atomic (e.g.
  521. * via WRITE_ONCE): a) to ensure the writes become visible to other threads
  522. * atomically, avoiding compiler optimizations; b) to document which writes are
  523. * meant to propagate to the reader critical section. This is necessary because
  524. * neither writes before and after the barrier are enclosed in a seq-writer
  525. * critical section that would ensure readers are aware of ongoing writes::
  526. *
  527. * seqcount_t seq;
  528. * bool X = true, Y = false;
  529. *
  530. * void read(void)
  531. * {
  532. * bool x, y;
  533. *
  534. * do {
  535. * int s = read_seqcount_begin(&seq);
  536. *
  537. * x = X; y = Y;
  538. *
  539. * } while (read_seqcount_retry(&seq, s));
  540. *
  541. * BUG_ON(!x && !y);
  542. * }
  543. *
  544. * void write(void)
  545. * {
  546. * WRITE_ONCE(Y, true);
  547. *
  548. * raw_write_seqcount_barrier(seq);
  549. *
  550. * WRITE_ONCE(X, false);
  551. * }
  552. */
  553. #define raw_write_seqcount_barrier(s) \
  554. do_raw_write_seqcount_barrier(seqprop_ptr(s))
  555. static inline void do_raw_write_seqcount_barrier(seqcount_t *s)
  556. {
  557. kcsan_nestable_atomic_begin();
  558. s->sequence++;
  559. smp_wmb();
  560. s->sequence++;
  561. kcsan_nestable_atomic_end();
  562. }
  563. /**
  564. * write_seqcount_invalidate() - invalidate in-progress seqcount_t read
  565. * side operations
  566. * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
  567. *
  568. * After write_seqcount_invalidate, no seqcount_t read side operations
  569. * will complete successfully and see data older than this.
  570. */
  571. #define write_seqcount_invalidate(s) \
  572. do_write_seqcount_invalidate(seqprop_ptr(s))
  573. static inline void do_write_seqcount_invalidate(seqcount_t *s)
  574. {
  575. smp_wmb();
  576. kcsan_nestable_atomic_begin();
  577. s->sequence+=2;
  578. kcsan_nestable_atomic_end();
  579. }
  580. /*
  581. * Latch sequence counters (seqcount_latch_t)
  582. *
  583. * A sequence counter variant where the counter even/odd value is used to
  584. * switch between two copies of protected data. This allows the read path,
  585. * typically NMIs, to safely interrupt the write side critical section.
  586. *
  587. * As the write sections are fully preemptible, no special handling for
  588. * PREEMPT_RT is needed.
  589. */
  590. typedef struct {
  591. seqcount_t seqcount;
  592. } seqcount_latch_t;
  593. /**
  594. * SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t
  595. * @seq_name: Name of the seqcount_latch_t instance
  596. */
  597. #define SEQCNT_LATCH_ZERO(seq_name) { \
  598. .seqcount = SEQCNT_ZERO(seq_name.seqcount), \
  599. }
  600. /**
  601. * seqcount_latch_init() - runtime initializer for seqcount_latch_t
  602. * @s: Pointer to the seqcount_latch_t instance
  603. */
  604. #define seqcount_latch_init(s) seqcount_init(&(s)->seqcount)
  605. /**
  606. * raw_read_seqcount_latch() - pick even/odd latch data copy
  607. * @s: Pointer to seqcount_latch_t
  608. *
  609. * See raw_write_seqcount_latch() for details and a full reader/writer
  610. * usage example.
  611. *
  612. * Return: sequence counter raw value. Use the lowest bit as an index for
  613. * picking which data copy to read. The full counter must then be checked
  614. * with read_seqcount_latch_retry().
  615. */
  616. static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
  617. {
  618. /*
  619. * Pairs with the first smp_wmb() in raw_write_seqcount_latch().
  620. * Due to the dependent load, a full smp_rmb() is not needed.
  621. */
  622. return READ_ONCE(s->seqcount.sequence);
  623. }
  624. /**
  625. * read_seqcount_latch_retry() - end a seqcount_latch_t read section
  626. * @s: Pointer to seqcount_latch_t
  627. * @start: count, from raw_read_seqcount_latch()
  628. *
  629. * Return: true if a read section retry is required, else false
  630. */
  631. static inline int
  632. read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
  633. {
  634. return read_seqcount_retry(&s->seqcount, start);
  635. }
  636. /**
  637. * raw_write_seqcount_latch() - redirect latch readers to even/odd copy
  638. * @s: Pointer to seqcount_latch_t
  639. *
  640. * The latch technique is a multiversion concurrency control method that allows
  641. * queries during non-atomic modifications. If you can guarantee queries never
  642. * interrupt the modification -- e.g. the concurrency is strictly between CPUs
  643. * -- you most likely do not need this.
  644. *
  645. * Where the traditional RCU/lockless data structures rely on atomic
  646. * modifications to ensure queries observe either the old or the new state the
  647. * latch allows the same for non-atomic updates. The trade-off is doubling the
  648. * cost of storage; we have to maintain two copies of the entire data
  649. * structure.
  650. *
  651. * Very simply put: we first modify one copy and then the other. This ensures
  652. * there is always one copy in a stable state, ready to give us an answer.
  653. *
  654. * The basic form is a data structure like::
  655. *
  656. * struct latch_struct {
  657. * seqcount_latch_t seq;
  658. * struct data_struct data[2];
  659. * };
  660. *
  661. * Where a modification, which is assumed to be externally serialized, does the
  662. * following::
  663. *
  664. * void latch_modify(struct latch_struct *latch, ...)
  665. * {
  666. * smp_wmb(); // Ensure that the last data[1] update is visible
  667. * latch->seq.sequence++;
  668. * smp_wmb(); // Ensure that the seqcount update is visible
  669. *
  670. * modify(latch->data[0], ...);
  671. *
  672. * smp_wmb(); // Ensure that the data[0] update is visible
  673. * latch->seq.sequence++;
  674. * smp_wmb(); // Ensure that the seqcount update is visible
  675. *
  676. * modify(latch->data[1], ...);
  677. * }
  678. *
  679. * The query will have a form like::
  680. *
  681. * struct entry *latch_query(struct latch_struct *latch, ...)
  682. * {
  683. * struct entry *entry;
  684. * unsigned seq, idx;
  685. *
  686. * do {
  687. * seq = raw_read_seqcount_latch(&latch->seq);
  688. *
  689. * idx = seq & 0x01;
  690. * entry = data_query(latch->data[idx], ...);
  691. *
  692. * // This includes needed smp_rmb()
  693. * } while (read_seqcount_latch_retry(&latch->seq, seq));
  694. *
  695. * return entry;
  696. * }
  697. *
  698. * So during the modification, queries are first redirected to data[1]. Then we
  699. * modify data[0]. When that is complete, we redirect queries back to data[0]
  700. * and we can modify data[1].
  701. *
  702. * NOTE:
  703. *
  704. * The non-requirement for atomic modifications does _NOT_ include
  705. * the publishing of new entries in the case where data is a dynamic
  706. * data structure.
  707. *
  708. * An iteration might start in data[0] and get suspended long enough
  709. * to miss an entire modification sequence, once it resumes it might
  710. * observe the new entry.
  711. *
  712. * NOTE2:
  713. *
  714. * When data is a dynamic data structure; one should use regular RCU
  715. * patterns to manage the lifetimes of the objects within.
  716. */
  717. static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
  718. {
  719. smp_wmb(); /* prior stores before incrementing "sequence" */
  720. s->seqcount.sequence++;
  721. smp_wmb(); /* increment "sequence" before following stores */
  722. }
  723. /*
  724. * Sequential locks (seqlock_t)
  725. *
  726. * Sequence counters with an embedded spinlock for writer serialization
  727. * and non-preemptibility.
  728. *
  729. * For more info, see:
  730. * - Comments on top of seqcount_t
  731. * - Documentation/locking/seqlock.rst
  732. */
  733. typedef struct {
  734. /*
  735. * Make sure that readers don't starve writers on PREEMPT_RT: use
  736. * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
  737. */
  738. seqcount_spinlock_t seqcount;
  739. spinlock_t lock;
  740. } seqlock_t;
  741. #define __SEQLOCK_UNLOCKED(lockname) \
  742. { \
  743. .seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \
  744. .lock = __SPIN_LOCK_UNLOCKED(lockname) \
  745. }
  746. /**
  747. * seqlock_init() - dynamic initializer for seqlock_t
  748. * @sl: Pointer to the seqlock_t instance
  749. */
  750. #define seqlock_init(sl) \
  751. do { \
  752. spin_lock_init(&(sl)->lock); \
  753. seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \
  754. } while (0)
  755. /**
  756. * DEFINE_SEQLOCK(sl) - Define a statically allocated seqlock_t
  757. * @sl: Name of the seqlock_t instance
  758. */
  759. #define DEFINE_SEQLOCK(sl) \
  760. seqlock_t sl = __SEQLOCK_UNLOCKED(sl)
  761. /**
  762. * read_seqbegin() - start a seqlock_t read side critical section
  763. * @sl: Pointer to seqlock_t
  764. *
  765. * Return: count, to be passed to read_seqretry()
  766. */
  767. static inline unsigned read_seqbegin(const seqlock_t *sl)
  768. {
  769. unsigned ret = read_seqcount_begin(&sl->seqcount);
  770. kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */
  771. kcsan_flat_atomic_begin();
  772. return ret;
  773. }
  774. /**
  775. * read_seqretry() - end a seqlock_t read side section
  776. * @sl: Pointer to seqlock_t
  777. * @start: count, from read_seqbegin()
  778. *
  779. * read_seqretry closes the read side critical section of given seqlock_t.
  780. * If the critical section was invalid, it must be ignored (and typically
  781. * retried).
  782. *
  783. * Return: true if a read section retry is required, else false
  784. */
  785. static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
  786. {
  787. /*
  788. * Assume not nested: read_seqretry() may be called multiple times when
  789. * completing read critical section.
  790. */
  791. kcsan_flat_atomic_end();
  792. return read_seqcount_retry(&sl->seqcount, start);
  793. }
  794. /*
  795. * For all seqlock_t write side functions, use the the internal
  796. * do_write_seqcount_begin() instead of generic write_seqcount_begin().
  797. * This way, no redundant lockdep_assert_held() checks are added.
  798. */
  799. /**
  800. * write_seqlock() - start a seqlock_t write side critical section
  801. * @sl: Pointer to seqlock_t
  802. *
  803. * write_seqlock opens a write side critical section for the given
  804. * seqlock_t. It also implicitly acquires the spinlock_t embedded inside
  805. * that sequential lock. All seqlock_t write side sections are thus
  806. * automatically serialized and non-preemptible.
  807. *
  808. * Context: if the seqlock_t read section, or other write side critical
  809. * sections, can be invoked from hardirq or softirq contexts, use the
  810. * _irqsave or _bh variants of this function instead.
  811. */
  812. static inline void write_seqlock(seqlock_t *sl)
  813. {
  814. spin_lock(&sl->lock);
  815. do_write_seqcount_begin(&sl->seqcount.seqcount);
  816. }
  817. /**
  818. * write_sequnlock() - end a seqlock_t write side critical section
  819. * @sl: Pointer to seqlock_t
  820. *
  821. * write_sequnlock closes the (serialized and non-preemptible) write side
  822. * critical section of given seqlock_t.
  823. */
  824. static inline void write_sequnlock(seqlock_t *sl)
  825. {
  826. do_write_seqcount_end(&sl->seqcount.seqcount);
  827. spin_unlock(&sl->lock);
  828. }
  829. /**
  830. * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section
  831. * @sl: Pointer to seqlock_t
  832. *
  833. * _bh variant of write_seqlock(). Use only if the read side section, or
  834. * other write side sections, can be invoked from softirq contexts.
  835. */
  836. static inline void write_seqlock_bh(seqlock_t *sl)
  837. {
  838. spin_lock_bh(&sl->lock);
  839. do_write_seqcount_begin(&sl->seqcount.seqcount);
  840. }
  841. /**
  842. * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section
  843. * @sl: Pointer to seqlock_t
  844. *
  845. * write_sequnlock_bh closes the serialized, non-preemptible, and
  846. * softirqs-disabled, seqlock_t write side critical section opened with
  847. * write_seqlock_bh().
  848. */
  849. static inline void write_sequnlock_bh(seqlock_t *sl)
  850. {
  851. do_write_seqcount_end(&sl->seqcount.seqcount);
  852. spin_unlock_bh(&sl->lock);
  853. }
  854. /**
  855. * write_seqlock_irq() - start a non-interruptible seqlock_t write section
  856. * @sl: Pointer to seqlock_t
  857. *
  858. * _irq variant of write_seqlock(). Use only if the read side section, or
  859. * other write sections, can be invoked from hardirq contexts.
  860. */
  861. static inline void write_seqlock_irq(seqlock_t *sl)
  862. {
  863. spin_lock_irq(&sl->lock);
  864. do_write_seqcount_begin(&sl->seqcount.seqcount);
  865. }
  866. /**
  867. * write_sequnlock_irq() - end a non-interruptible seqlock_t write section
  868. * @sl: Pointer to seqlock_t
  869. *
  870. * write_sequnlock_irq closes the serialized and non-interruptible
  871. * seqlock_t write side section opened with write_seqlock_irq().
  872. */
  873. static inline void write_sequnlock_irq(seqlock_t *sl)
  874. {
  875. do_write_seqcount_end(&sl->seqcount.seqcount);
  876. spin_unlock_irq(&sl->lock);
  877. }
  878. static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
  879. {
  880. unsigned long flags;
  881. spin_lock_irqsave(&sl->lock, flags);
  882. do_write_seqcount_begin(&sl->seqcount.seqcount);
  883. return flags;
  884. }
  885. /**
  886. * write_seqlock_irqsave() - start a non-interruptible seqlock_t write
  887. * section
  888. * @lock: Pointer to seqlock_t
  889. * @flags: Stack-allocated storage for saving caller's local interrupt
  890. * state, to be passed to write_sequnlock_irqrestore().
  891. *
  892. * _irqsave variant of write_seqlock(). Use it only if the read side
  893. * section, or other write sections, can be invoked from hardirq context.
  894. */
  895. #define write_seqlock_irqsave(lock, flags) \
  896. do { flags = __write_seqlock_irqsave(lock); } while (0)
  897. /**
  898. * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write
  899. * section
  900. * @sl: Pointer to seqlock_t
  901. * @flags: Caller's saved interrupt state, from write_seqlock_irqsave()
  902. *
  903. * write_sequnlock_irqrestore closes the serialized and non-interruptible
  904. * seqlock_t write section previously opened with write_seqlock_irqsave().
  905. */
  906. static inline void
  907. write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
  908. {
  909. do_write_seqcount_end(&sl->seqcount.seqcount);
  910. spin_unlock_irqrestore(&sl->lock, flags);
  911. }
  912. /**
  913. * read_seqlock_excl() - begin a seqlock_t locking reader section
  914. * @sl: Pointer to seqlock_t
  915. *
  916. * read_seqlock_excl opens a seqlock_t locking reader critical section. A
  917. * locking reader exclusively locks out *both* other writers *and* other
  918. * locking readers, but it does not update the embedded sequence number.
  919. *
  920. * Locking readers act like a normal spin_lock()/spin_unlock().
  921. *
  922. * Context: if the seqlock_t write section, *or other read sections*, can
  923. * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
  924. * variant of this function instead.
  925. *
  926. * The opened read section must be closed with read_sequnlock_excl().
  927. */
  928. static inline void read_seqlock_excl(seqlock_t *sl)
  929. {
  930. spin_lock(&sl->lock);
  931. }
  932. /**
  933. * read_sequnlock_excl() - end a seqlock_t locking reader critical section
  934. * @sl: Pointer to seqlock_t
  935. */
  936. static inline void read_sequnlock_excl(seqlock_t *sl)
  937. {
  938. spin_unlock(&sl->lock);
  939. }
  940. /**
  941. * read_seqlock_excl_bh() - start a seqlock_t locking reader section with
  942. * softirqs disabled
  943. * @sl: Pointer to seqlock_t
  944. *
  945. * _bh variant of read_seqlock_excl(). Use this variant only if the
  946. * seqlock_t write side section, *or other read sections*, can be invoked
  947. * from softirq contexts.
  948. */
  949. static inline void read_seqlock_excl_bh(seqlock_t *sl)
  950. {
  951. spin_lock_bh(&sl->lock);
  952. }
  953. /**
  954. * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking
  955. * reader section
  956. * @sl: Pointer to seqlock_t
  957. */
  958. static inline void read_sequnlock_excl_bh(seqlock_t *sl)
  959. {
  960. spin_unlock_bh(&sl->lock);
  961. }
  962. /**
  963. * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking
  964. * reader section
  965. * @sl: Pointer to seqlock_t
  966. *
  967. * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t
  968. * write side section, *or other read sections*, can be invoked from a
  969. * hardirq context.
  970. */
  971. static inline void read_seqlock_excl_irq(seqlock_t *sl)
  972. {
  973. spin_lock_irq(&sl->lock);
  974. }
  975. /**
  976. * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t
  977. * locking reader section
  978. * @sl: Pointer to seqlock_t
  979. */
  980. static inline void read_sequnlock_excl_irq(seqlock_t *sl)
  981. {
  982. spin_unlock_irq(&sl->lock);
  983. }
  984. static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
  985. {
  986. unsigned long flags;
  987. spin_lock_irqsave(&sl->lock, flags);
  988. return flags;
  989. }
  990. /**
  991. * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t
  992. * locking reader section
  993. * @lock: Pointer to seqlock_t
  994. * @flags: Stack-allocated storage for saving caller's local interrupt
  995. * state, to be passed to read_sequnlock_excl_irqrestore().
  996. *
  997. * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t
  998. * write side section, *or other read sections*, can be invoked from a
  999. * hardirq context.
  1000. */
  1001. #define read_seqlock_excl_irqsave(lock, flags) \
  1002. do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
  1003. /**
  1004. * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t
  1005. * locking reader section
  1006. * @sl: Pointer to seqlock_t
  1007. * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave()
  1008. */
  1009. static inline void
  1010. read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
  1011. {
  1012. spin_unlock_irqrestore(&sl->lock, flags);
  1013. }
  1014. /**
  1015. * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader
  1016. * @lock: Pointer to seqlock_t
  1017. * @seq : Marker and return parameter. If the passed value is even, the
  1018. * reader will become a *lockless* seqlock_t reader as in read_seqbegin().
  1019. * If the passed value is odd, the reader will become a *locking* reader
  1020. * as in read_seqlock_excl(). In the first call to this function, the
  1021. * caller *must* initialize and pass an even value to @seq; this way, a
  1022. * lockless read can be optimistically tried first.
  1023. *
  1024. * read_seqbegin_or_lock is an API designed to optimistically try a normal
  1025. * lockless seqlock_t read section first. If an odd counter is found, the
  1026. * lockless read trial has failed, and the next read iteration transforms
  1027. * itself into a full seqlock_t locking reader.
  1028. *
  1029. * This is typically used to avoid seqlock_t lockless readers starvation
  1030. * (too much retry loops) in the case of a sharp spike in write side
  1031. * activity.
  1032. *
  1033. * Context: if the seqlock_t write section, *or other read sections*, can
  1034. * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
  1035. * variant of this function instead.
  1036. *
  1037. * Check Documentation/locking/seqlock.rst for template example code.
  1038. *
  1039. * Return: the encountered sequence counter value, through the @seq
  1040. * parameter, which is overloaded as a return parameter. This returned
  1041. * value must be checked with need_seqretry(). If the read section need to
  1042. * be retried, this returned value must also be passed as the @seq
  1043. * parameter of the next read_seqbegin_or_lock() iteration.
  1044. */
  1045. static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
  1046. {
  1047. if (!(*seq & 1)) /* Even */
  1048. *seq = read_seqbegin(lock);
  1049. else /* Odd */
  1050. read_seqlock_excl(lock);
  1051. }
  1052. /**
  1053. * need_seqretry() - validate seqlock_t "locking or lockless" read section
  1054. * @lock: Pointer to seqlock_t
  1055. * @seq: sequence count, from read_seqbegin_or_lock()
  1056. *
  1057. * Return: true if a read section retry is required, false otherwise
  1058. */
  1059. static inline int need_seqretry(seqlock_t *lock, int seq)
  1060. {
  1061. return !(seq & 1) && read_seqretry(lock, seq);
  1062. }
  1063. /**
  1064. * done_seqretry() - end seqlock_t "locking or lockless" reader section
  1065. * @lock: Pointer to seqlock_t
  1066. * @seq: count, from read_seqbegin_or_lock()
  1067. *
  1068. * done_seqretry finishes the seqlock_t read side critical section started
  1069. * with read_seqbegin_or_lock() and validated by need_seqretry().
  1070. */
  1071. static inline void done_seqretry(seqlock_t *lock, int seq)
  1072. {
  1073. if (seq & 1)
  1074. read_sequnlock_excl(lock);
  1075. }
  1076. /**
  1077. * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or
  1078. * a non-interruptible locking reader
  1079. * @lock: Pointer to seqlock_t
  1080. * @seq: Marker and return parameter. Check read_seqbegin_or_lock().
  1081. *
  1082. * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if
  1083. * the seqlock_t write section, *or other read sections*, can be invoked
  1084. * from hardirq context.
  1085. *
  1086. * Note: Interrupts will be disabled only for "locking reader" mode.
  1087. *
  1088. * Return:
  1089. *
  1090. * 1. The saved local interrupts state in case of a locking reader, to
  1091. * be passed to done_seqretry_irqrestore().
  1092. *
  1093. * 2. The encountered sequence counter value, returned through @seq
  1094. * overloaded as a return parameter. Check read_seqbegin_or_lock().
  1095. */
  1096. static inline unsigned long
  1097. read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
  1098. {
  1099. unsigned long flags = 0;
  1100. if (!(*seq & 1)) /* Even */
  1101. *seq = read_seqbegin(lock);
  1102. else /* Odd */
  1103. read_seqlock_excl_irqsave(lock, flags);
  1104. return flags;
  1105. }
  1106. /**
  1107. * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a
  1108. * non-interruptible locking reader section
  1109. * @lock: Pointer to seqlock_t
  1110. * @seq: Count, from read_seqbegin_or_lock_irqsave()
  1111. * @flags: Caller's saved local interrupt state in case of a locking
  1112. * reader, also from read_seqbegin_or_lock_irqsave()
  1113. *
  1114. * This is the _irqrestore variant of done_seqretry(). The read section
  1115. * must've been opened with read_seqbegin_or_lock_irqsave(), and validated
  1116. * by need_seqretry().
  1117. */
  1118. static inline void
  1119. done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
  1120. {
  1121. if (seq & 1)
  1122. read_sequnlock_excl_irqrestore(lock, flags);
  1123. }
  1124. #endif /* __LINUX_SEQLOCK_H */