kcsan-checks.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * KCSAN access checks and modifiers. These can be used to explicitly check
  4. * uninstrumented accesses, or change KCSAN checking behaviour of accesses.
  5. *
  6. * Copyright (C) 2019, Google LLC.
  7. */
  8. #ifndef _LINUX_KCSAN_CHECKS_H
  9. #define _LINUX_KCSAN_CHECKS_H
  10. /* Note: Only include what is already included by compiler.h. */
  11. #include <linux/compiler_attributes.h>
  12. #include <linux/types.h>
  13. /* Access types -- if KCSAN_ACCESS_WRITE is not set, the access is a read. */
  14. #define KCSAN_ACCESS_WRITE (1 << 0) /* Access is a write. */
  15. #define KCSAN_ACCESS_COMPOUND (1 << 1) /* Compounded read-write instrumentation. */
  16. #define KCSAN_ACCESS_ATOMIC (1 << 2) /* Access is atomic. */
  17. /* The following are special, and never due to compiler instrumentation. */
  18. #define KCSAN_ACCESS_ASSERT (1 << 3) /* Access is an assertion. */
  19. #define KCSAN_ACCESS_SCOPED (1 << 4) /* Access is a scoped access. */
  20. /*
  21. * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used
  22. * even in compilation units that selectively disable KCSAN, but must use KCSAN
  23. * to validate access to an address. Never use these in header files!
  24. */
  25. #ifdef CONFIG_KCSAN
  26. /**
  27. * __kcsan_check_access - check generic access for races
  28. *
  29. * @ptr: address of access
  30. * @size: size of access
  31. * @type: access type modifier
  32. */
  33. void __kcsan_check_access(const volatile void *ptr, size_t size, int type);
  34. /*
  35. * See definition of __tsan_atomic_signal_fence() in kernel/kcsan/core.c.
  36. * Note: The mappings are arbitrary, and do not reflect any real mappings of C11
  37. * memory orders to the LKMM memory orders and vice-versa!
  38. */
  39. #define __KCSAN_BARRIER_TO_SIGNAL_FENCE_mb __ATOMIC_SEQ_CST
  40. #define __KCSAN_BARRIER_TO_SIGNAL_FENCE_wmb __ATOMIC_ACQ_REL
  41. #define __KCSAN_BARRIER_TO_SIGNAL_FENCE_rmb __ATOMIC_ACQUIRE
  42. #define __KCSAN_BARRIER_TO_SIGNAL_FENCE_release __ATOMIC_RELEASE
  43. /**
  44. * __kcsan_mb - full memory barrier instrumentation
  45. */
  46. void __kcsan_mb(void);
  47. /**
  48. * __kcsan_wmb - write memory barrier instrumentation
  49. */
  50. void __kcsan_wmb(void);
  51. /**
  52. * __kcsan_rmb - read memory barrier instrumentation
  53. */
  54. void __kcsan_rmb(void);
  55. /**
  56. * __kcsan_release - release barrier instrumentation
  57. */
  58. void __kcsan_release(void);
  59. /**
  60. * kcsan_disable_current - disable KCSAN for the current context
  61. *
  62. * Supports nesting.
  63. */
  64. void kcsan_disable_current(void);
  65. /**
  66. * kcsan_enable_current - re-enable KCSAN for the current context
  67. *
  68. * Supports nesting.
  69. */
  70. void kcsan_enable_current(void);
  71. void kcsan_enable_current_nowarn(void); /* Safe in uaccess regions. */
  72. /**
  73. * kcsan_nestable_atomic_begin - begin nestable atomic region
  74. *
  75. * Accesses within the atomic region may appear to race with other accesses but
  76. * should be considered atomic.
  77. */
  78. void kcsan_nestable_atomic_begin(void);
  79. /**
  80. * kcsan_nestable_atomic_end - end nestable atomic region
  81. */
  82. void kcsan_nestable_atomic_end(void);
  83. /**
  84. * kcsan_flat_atomic_begin - begin flat atomic region
  85. *
  86. * Accesses within the atomic region may appear to race with other accesses but
  87. * should be considered atomic.
  88. */
  89. void kcsan_flat_atomic_begin(void);
  90. /**
  91. * kcsan_flat_atomic_end - end flat atomic region
  92. */
  93. void kcsan_flat_atomic_end(void);
  94. /**
  95. * kcsan_atomic_next - consider following accesses as atomic
  96. *
  97. * Force treating the next n memory accesses for the current context as atomic
  98. * operations.
  99. *
  100. * @n: number of following memory accesses to treat as atomic.
  101. */
  102. void kcsan_atomic_next(int n);
  103. /**
  104. * kcsan_set_access_mask - set access mask
  105. *
  106. * Set the access mask for all accesses for the current context if non-zero.
  107. * Only value changes to bits set in the mask will be reported.
  108. *
  109. * @mask: bitmask
  110. */
  111. void kcsan_set_access_mask(unsigned long mask);
  112. /* Scoped access information. */
  113. struct kcsan_scoped_access {
  114. union {
  115. struct list_head list; /* scoped_accesses list */
  116. /*
  117. * Not an entry in scoped_accesses list; stack depth from where
  118. * the access was initialized.
  119. */
  120. int stack_depth;
  121. };
  122. /* Access information. */
  123. const volatile void *ptr;
  124. size_t size;
  125. int type;
  126. /* Location where scoped access was set up. */
  127. unsigned long ip;
  128. };
  129. /*
  130. * Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes
  131. * out of scope; relies on attribute "cleanup", which is supported by all
  132. * compilers that support KCSAN.
  133. */
  134. #define __kcsan_cleanup_scoped \
  135. __maybe_unused __attribute__((__cleanup__(kcsan_end_scoped_access)))
  136. /**
  137. * kcsan_begin_scoped_access - begin scoped access
  138. *
  139. * Begin scoped access and initialize @sa, which will cause KCSAN to
  140. * continuously check the memory range in the current thread until
  141. * kcsan_end_scoped_access() is called for @sa.
  142. *
  143. * Scoped accesses are implemented by appending @sa to an internal list for the
  144. * current execution context, and then checked on every call into the KCSAN
  145. * runtime.
  146. *
  147. * @ptr: address of access
  148. * @size: size of access
  149. * @type: access type modifier
  150. * @sa: struct kcsan_scoped_access to use for the scope of the access
  151. */
  152. struct kcsan_scoped_access *
  153. kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
  154. struct kcsan_scoped_access *sa);
  155. /**
  156. * kcsan_end_scoped_access - end scoped access
  157. *
  158. * End a scoped access, which will stop KCSAN checking the memory range.
  159. * Requires that kcsan_begin_scoped_access() was previously called once for @sa.
  160. *
  161. * @sa: a previously initialized struct kcsan_scoped_access
  162. */
  163. void kcsan_end_scoped_access(struct kcsan_scoped_access *sa);
  164. #else /* CONFIG_KCSAN */
  165. static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
  166. int type) { }
  167. static inline void __kcsan_mb(void) { }
  168. static inline void __kcsan_wmb(void) { }
  169. static inline void __kcsan_rmb(void) { }
  170. static inline void __kcsan_release(void) { }
  171. static inline void kcsan_disable_current(void) { }
  172. static inline void kcsan_enable_current(void) { }
  173. static inline void kcsan_enable_current_nowarn(void) { }
  174. static inline void kcsan_nestable_atomic_begin(void) { }
  175. static inline void kcsan_nestable_atomic_end(void) { }
  176. static inline void kcsan_flat_atomic_begin(void) { }
  177. static inline void kcsan_flat_atomic_end(void) { }
  178. static inline void kcsan_atomic_next(int n) { }
  179. static inline void kcsan_set_access_mask(unsigned long mask) { }
  180. struct kcsan_scoped_access { };
  181. #define __kcsan_cleanup_scoped __maybe_unused
  182. static inline struct kcsan_scoped_access *
  183. kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
  184. struct kcsan_scoped_access *sa) { return sa; }
  185. static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { }
  186. #endif /* CONFIG_KCSAN */
  187. #ifdef __SANITIZE_THREAD__
  188. /*
  189. * Only calls into the runtime when the particular compilation unit has KCSAN
  190. * instrumentation enabled. May be used in header files.
  191. */
  192. #define kcsan_check_access __kcsan_check_access
  193. /*
  194. * Only use these to disable KCSAN for accesses in the current compilation unit;
  195. * calls into libraries may still perform KCSAN checks.
  196. */
  197. #define __kcsan_disable_current kcsan_disable_current
  198. #define __kcsan_enable_current kcsan_enable_current_nowarn
  199. #else /* __SANITIZE_THREAD__ */
  200. static inline void kcsan_check_access(const volatile void *ptr, size_t size,
  201. int type) { }
  202. static inline void __kcsan_enable_current(void) { }
  203. static inline void __kcsan_disable_current(void) { }
  204. #endif /* __SANITIZE_THREAD__ */
  205. #if defined(CONFIG_KCSAN_WEAK_MEMORY) && defined(__SANITIZE_THREAD__)
  206. /*
  207. * Normal barrier instrumentation is not done via explicit calls, but by mapping
  208. * to a repurposed __atomic_signal_fence(), which normally does not generate any
  209. * real instructions, but is still intercepted by fsanitize=thread. This means,
  210. * like any other compile-time instrumentation, barrier instrumentation can be
  211. * disabled with the __no_kcsan function attribute.
  212. *
  213. * Also see definition of __tsan_atomic_signal_fence() in kernel/kcsan/core.c.
  214. *
  215. * These are all macros, like <asm/barrier.h>, since some architectures use them
  216. * in non-static inline functions.
  217. */
  218. #define __KCSAN_BARRIER_TO_SIGNAL_FENCE(name) \
  219. do { \
  220. barrier(); \
  221. __atomic_signal_fence(__KCSAN_BARRIER_TO_SIGNAL_FENCE_##name); \
  222. barrier(); \
  223. } while (0)
  224. #define kcsan_mb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(mb)
  225. #define kcsan_wmb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(wmb)
  226. #define kcsan_rmb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(rmb)
  227. #define kcsan_release() __KCSAN_BARRIER_TO_SIGNAL_FENCE(release)
  228. #elif defined(CONFIG_KCSAN_WEAK_MEMORY) && defined(__KCSAN_INSTRUMENT_BARRIERS__)
  229. #define kcsan_mb __kcsan_mb
  230. #define kcsan_wmb __kcsan_wmb
  231. #define kcsan_rmb __kcsan_rmb
  232. #define kcsan_release __kcsan_release
  233. #else /* CONFIG_KCSAN_WEAK_MEMORY && ... */
  234. #define kcsan_mb() do { } while (0)
  235. #define kcsan_wmb() do { } while (0)
  236. #define kcsan_rmb() do { } while (0)
  237. #define kcsan_release() do { } while (0)
  238. #endif /* CONFIG_KCSAN_WEAK_MEMORY && ... */
  239. /**
  240. * __kcsan_check_read - check regular read access for races
  241. *
  242. * @ptr: address of access
  243. * @size: size of access
  244. */
  245. #define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0)
  246. /**
  247. * __kcsan_check_write - check regular write access for races
  248. *
  249. * @ptr: address of access
  250. * @size: size of access
  251. */
  252. #define __kcsan_check_write(ptr, size) \
  253. __kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
  254. /**
  255. * __kcsan_check_read_write - check regular read-write access for races
  256. *
  257. * @ptr: address of access
  258. * @size: size of access
  259. */
  260. #define __kcsan_check_read_write(ptr, size) \
  261. __kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
  262. /**
  263. * kcsan_check_read - check regular read access for races
  264. *
  265. * @ptr: address of access
  266. * @size: size of access
  267. */
  268. #define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0)
  269. /**
  270. * kcsan_check_write - check regular write access for races
  271. *
  272. * @ptr: address of access
  273. * @size: size of access
  274. */
  275. #define kcsan_check_write(ptr, size) \
  276. kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
  277. /**
  278. * kcsan_check_read_write - check regular read-write access for races
  279. *
  280. * @ptr: address of access
  281. * @size: size of access
  282. */
  283. #define kcsan_check_read_write(ptr, size) \
  284. kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
  285. /*
  286. * Check for atomic accesses: if atomic accesses are not ignored, this simply
  287. * aliases to kcsan_check_access(), otherwise becomes a no-op.
  288. */
  289. #ifdef CONFIG_KCSAN_IGNORE_ATOMICS
  290. #define kcsan_check_atomic_read(...) do { } while (0)
  291. #define kcsan_check_atomic_write(...) do { } while (0)
  292. #define kcsan_check_atomic_read_write(...) do { } while (0)
  293. #else
  294. #define kcsan_check_atomic_read(ptr, size) \
  295. kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC)
  296. #define kcsan_check_atomic_write(ptr, size) \
  297. kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
  298. #define kcsan_check_atomic_read_write(ptr, size) \
  299. kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOUND)
  300. #endif
  301. /**
  302. * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var
  303. *
  304. * Assert that there are no concurrent writes to @var; other readers are
  305. * allowed. This assertion can be used to specify properties of concurrent code,
  306. * where violation cannot be detected as a normal data race.
  307. *
  308. * For example, if we only have a single writer, but multiple concurrent
  309. * readers, to avoid data races, all these accesses must be marked; even
  310. * concurrent marked writes racing with the single writer are bugs.
  311. * Unfortunately, due to being marked, they are no longer data races. For cases
  312. * like these, we can use the macro as follows:
  313. *
  314. * .. code-block:: c
  315. *
  316. * void writer(void) {
  317. * spin_lock(&update_foo_lock);
  318. * ASSERT_EXCLUSIVE_WRITER(shared_foo);
  319. * WRITE_ONCE(shared_foo, ...);
  320. * spin_unlock(&update_foo_lock);
  321. * }
  322. * void reader(void) {
  323. * // update_foo_lock does not need to be held!
  324. * ... = READ_ONCE(shared_foo);
  325. * }
  326. *
  327. * Note: ASSERT_EXCLUSIVE_WRITER_SCOPED(), if applicable, performs more thorough
  328. * checking if a clear scope where no concurrent writes are expected exists.
  329. *
  330. * @var: variable to assert on
  331. */
  332. #define ASSERT_EXCLUSIVE_WRITER(var) \
  333. __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
  334. /*
  335. * Helper macros for implementation of for ASSERT_EXCLUSIVE_*_SCOPED(). @id is
  336. * expected to be unique for the scope in which instances of kcsan_scoped_access
  337. * are declared.
  338. */
  339. #define __kcsan_scoped_name(c, suffix) __kcsan_scoped_##c##suffix
  340. #define __ASSERT_EXCLUSIVE_SCOPED(var, type, id) \
  341. struct kcsan_scoped_access __kcsan_scoped_name(id, _) \
  342. __kcsan_cleanup_scoped; \
  343. struct kcsan_scoped_access *__kcsan_scoped_name(id, _dummy_p) \
  344. __maybe_unused = kcsan_begin_scoped_access( \
  345. &(var), sizeof(var), KCSAN_ACCESS_SCOPED | (type), \
  346. &__kcsan_scoped_name(id, _))
  347. /**
  348. * ASSERT_EXCLUSIVE_WRITER_SCOPED - assert no concurrent writes to @var in scope
  349. *
  350. * Scoped variant of ASSERT_EXCLUSIVE_WRITER().
  351. *
  352. * Assert that there are no concurrent writes to @var for the duration of the
  353. * scope in which it is introduced. This provides a better way to fully cover
  354. * the enclosing scope, compared to multiple ASSERT_EXCLUSIVE_WRITER(), and
  355. * increases the likelihood for KCSAN to detect racing accesses.
  356. *
  357. * For example, it allows finding race-condition bugs that only occur due to
  358. * state changes within the scope itself:
  359. *
  360. * .. code-block:: c
  361. *
  362. * void writer(void) {
  363. * spin_lock(&update_foo_lock);
  364. * {
  365. * ASSERT_EXCLUSIVE_WRITER_SCOPED(shared_foo);
  366. * WRITE_ONCE(shared_foo, 42);
  367. * ...
  368. * // shared_foo should still be 42 here!
  369. * }
  370. * spin_unlock(&update_foo_lock);
  371. * }
  372. * void buggy(void) {
  373. * if (READ_ONCE(shared_foo) == 42)
  374. * WRITE_ONCE(shared_foo, 1); // bug!
  375. * }
  376. *
  377. * @var: variable to assert on
  378. */
  379. #define ASSERT_EXCLUSIVE_WRITER_SCOPED(var) \
  380. __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_ASSERT, __COUNTER__)
  381. /**
  382. * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var
  383. *
  384. * Assert that there are no concurrent accesses to @var (no readers nor
  385. * writers). This assertion can be used to specify properties of concurrent
  386. * code, where violation cannot be detected as a normal data race.
  387. *
  388. * For example, where exclusive access is expected after determining no other
  389. * users of an object are left, but the object is not actually freed. We can
  390. * check that this property actually holds as follows:
  391. *
  392. * .. code-block:: c
  393. *
  394. * if (refcount_dec_and_test(&obj->refcnt)) {
  395. * ASSERT_EXCLUSIVE_ACCESS(*obj);
  396. * do_some_cleanup(obj);
  397. * release_for_reuse(obj);
  398. * }
  399. *
  400. * Note:
  401. *
  402. * 1. ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough
  403. * checking if a clear scope where no concurrent accesses are expected exists.
  404. *
  405. * 2. For cases where the object is freed, `KASAN <kasan.html>`_ is a better
  406. * fit to detect use-after-free bugs.
  407. *
  408. * @var: variable to assert on
  409. */
  410. #define ASSERT_EXCLUSIVE_ACCESS(var) \
  411. __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
  412. /**
  413. * ASSERT_EXCLUSIVE_ACCESS_SCOPED - assert no concurrent accesses to @var in scope
  414. *
  415. * Scoped variant of ASSERT_EXCLUSIVE_ACCESS().
  416. *
  417. * Assert that there are no concurrent accesses to @var (no readers nor writers)
  418. * for the entire duration of the scope in which it is introduced. This provides
  419. * a better way to fully cover the enclosing scope, compared to multiple
  420. * ASSERT_EXCLUSIVE_ACCESS(), and increases the likelihood for KCSAN to detect
  421. * racing accesses.
  422. *
  423. * @var: variable to assert on
  424. */
  425. #define ASSERT_EXCLUSIVE_ACCESS_SCOPED(var) \
  426. __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, __COUNTER__)
  427. /**
  428. * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var
  429. *
  430. * Bit-granular variant of ASSERT_EXCLUSIVE_WRITER().
  431. *
  432. * Assert that there are no concurrent writes to a subset of bits in @var;
  433. * concurrent readers are permitted. This assertion captures more detailed
  434. * bit-level properties, compared to the other (word granularity) assertions.
  435. * Only the bits set in @mask are checked for concurrent modifications, while
  436. * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~mask bits
  437. * are ignored.
  438. *
  439. * Use this for variables, where some bits must not be modified concurrently,
  440. * yet other bits are expected to be modified concurrently.
  441. *
  442. * For example, variables where, after initialization, some bits are read-only,
  443. * but other bits may still be modified concurrently. A reader may wish to
  444. * assert that this is true as follows:
  445. *
  446. * .. code-block:: c
  447. *
  448. * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
  449. * foo = (READ_ONCE(flags) & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
  450. *
  451. * Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is assumed
  452. * to access the masked bits only, and KCSAN optimistically assumes it is
  453. * therefore safe, even in the presence of data races, and marking it with
  454. * READ_ONCE() is optional from KCSAN's point-of-view. We caution, however, that
  455. * it may still be advisable to do so, since we cannot reason about all compiler
  456. * optimizations when it comes to bit manipulations (on the reader and writer
  457. * side). If you are sure nothing can go wrong, we can write the above simply
  458. * as:
  459. *
  460. * .. code-block:: c
  461. *
  462. * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
  463. * foo = (flags & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
  464. *
  465. * Another example, where this may be used, is when certain bits of @var may
  466. * only be modified when holding the appropriate lock, but other bits may still
  467. * be modified concurrently. Writers, where other bits may change concurrently,
  468. * could use the assertion as follows:
  469. *
  470. * .. code-block:: c
  471. *
  472. * spin_lock(&foo_lock);
  473. * ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK);
  474. * old_flags = flags;
  475. * new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT);
  476. * if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... }
  477. * spin_unlock(&foo_lock);
  478. *
  479. * @var: variable to assert on
  480. * @mask: only check for modifications to bits set in @mask
  481. */
  482. #define ASSERT_EXCLUSIVE_BITS(var, mask) \
  483. do { \
  484. kcsan_set_access_mask(mask); \
  485. __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\
  486. kcsan_set_access_mask(0); \
  487. kcsan_atomic_next(1); \
  488. } while (0)
  489. #endif /* _LINUX_KCSAN_CHECKS_H */