report.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * KCSAN reporting.
  4. *
  5. * Copyright (C) 2019, Google LLC.
  6. */
  7. #include <linux/debug_locks.h>
  8. #include <linux/delay.h>
  9. #include <linux/jiffies.h>
  10. #include <linux/kallsyms.h>
  11. #include <linux/kernel.h>
  12. #include <linux/lockdep.h>
  13. #include <linux/preempt.h>
  14. #include <linux/printk.h>
  15. #include <linux/sched.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/stacktrace.h>
  18. #include "kcsan.h"
  19. #include "encoding.h"
  20. /*
  21. * Max. number of stack entries to show in the report.
  22. */
  23. #define NUM_STACK_ENTRIES 64
  24. /* Common access info. */
  25. struct access_info {
  26. const volatile void *ptr;
  27. size_t size;
  28. int access_type;
  29. int task_pid;
  30. int cpu_id;
  31. unsigned long ip;
  32. };
  33. /*
  34. * Other thread info: communicated from other racing thread to thread that set
  35. * up the watchpoint, which then prints the complete report atomically.
  36. */
  37. struct other_info {
  38. struct access_info ai;
  39. unsigned long stack_entries[NUM_STACK_ENTRIES];
  40. int num_stack_entries;
  41. /*
  42. * Optionally pass @current. Typically we do not need to pass @current
  43. * via @other_info since just @task_pid is sufficient. Passing @current
  44. * has additional overhead.
  45. *
  46. * To safely pass @current, we must either use get_task_struct/
  47. * put_task_struct, or stall the thread that populated @other_info.
  48. *
  49. * We cannot rely on get_task_struct/put_task_struct in case
  50. * release_report() races with a task being released, and would have to
  51. * free it in release_report(). This may result in deadlock if we want
  52. * to use KCSAN on the allocators.
  53. *
  54. * Since we also want to reliably print held locks for
  55. * CONFIG_KCSAN_VERBOSE, the current implementation stalls the thread
  56. * that populated @other_info until it has been consumed.
  57. */
  58. struct task_struct *task;
  59. };
  60. /*
  61. * To never block any producers of struct other_info, we need as many elements
  62. * as we have watchpoints (upper bound on concurrent races to report).
  63. */
  64. static struct other_info other_infos[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
  65. /*
  66. * Information about reported races; used to rate limit reporting.
  67. */
  68. struct report_time {
  69. /*
  70. * The last time the race was reported.
  71. */
  72. unsigned long time;
  73. /*
  74. * The frames of the 2 threads; if only 1 thread is known, one frame
  75. * will be 0.
  76. */
  77. unsigned long frame1;
  78. unsigned long frame2;
  79. };
  80. /*
  81. * Since we also want to be able to debug allocators with KCSAN, to avoid
  82. * deadlock, report_times cannot be dynamically resized with krealloc in
  83. * rate_limit_report.
  84. *
  85. * Therefore, we use a fixed-size array, which at most will occupy a page. This
  86. * still adequately rate limits reports, assuming that a) number of unique data
  87. * races is not excessive, and b) occurrence of unique races within the
  88. * same time window is limited.
  89. */
  90. #define REPORT_TIMES_MAX (PAGE_SIZE / sizeof(struct report_time))
  91. #define REPORT_TIMES_SIZE \
  92. (CONFIG_KCSAN_REPORT_ONCE_IN_MS > REPORT_TIMES_MAX ? \
  93. REPORT_TIMES_MAX : \
  94. CONFIG_KCSAN_REPORT_ONCE_IN_MS)
  95. static struct report_time report_times[REPORT_TIMES_SIZE];
  96. /*
  97. * Spinlock serializing report generation, and access to @other_infos. Although
  98. * it could make sense to have a finer-grained locking story for @other_infos,
  99. * report generation needs to be serialized either way, so not much is gained.
  100. */
  101. static DEFINE_RAW_SPINLOCK(report_lock);
  102. /*
  103. * Checks if the race identified by thread frames frame1 and frame2 has
  104. * been reported since (now - KCSAN_REPORT_ONCE_IN_MS).
  105. */
  106. static bool rate_limit_report(unsigned long frame1, unsigned long frame2)
  107. {
  108. struct report_time *use_entry = &report_times[0];
  109. unsigned long invalid_before;
  110. int i;
  111. BUILD_BUG_ON(CONFIG_KCSAN_REPORT_ONCE_IN_MS != 0 && REPORT_TIMES_SIZE == 0);
  112. if (CONFIG_KCSAN_REPORT_ONCE_IN_MS == 0)
  113. return false;
  114. invalid_before = jiffies - msecs_to_jiffies(CONFIG_KCSAN_REPORT_ONCE_IN_MS);
  115. /* Check if a matching race report exists. */
  116. for (i = 0; i < REPORT_TIMES_SIZE; ++i) {
  117. struct report_time *rt = &report_times[i];
  118. /*
  119. * Must always select an entry for use to store info as we
  120. * cannot resize report_times; at the end of the scan, use_entry
  121. * will be the oldest entry, which ideally also happened before
  122. * KCSAN_REPORT_ONCE_IN_MS ago.
  123. */
  124. if (time_before(rt->time, use_entry->time))
  125. use_entry = rt;
  126. /*
  127. * Initially, no need to check any further as this entry as well
  128. * as following entries have never been used.
  129. */
  130. if (rt->time == 0)
  131. break;
  132. /* Check if entry expired. */
  133. if (time_before(rt->time, invalid_before))
  134. continue; /* before KCSAN_REPORT_ONCE_IN_MS ago */
  135. /* Reported recently, check if race matches. */
  136. if ((rt->frame1 == frame1 && rt->frame2 == frame2) ||
  137. (rt->frame1 == frame2 && rt->frame2 == frame1))
  138. return true;
  139. }
  140. use_entry->time = jiffies;
  141. use_entry->frame1 = frame1;
  142. use_entry->frame2 = frame2;
  143. return false;
  144. }
  145. /*
  146. * Special rules to skip reporting.
  147. */
  148. static bool
  149. skip_report(enum kcsan_value_change value_change, unsigned long top_frame)
  150. {
  151. /* Should never get here if value_change==FALSE. */
  152. WARN_ON_ONCE(value_change == KCSAN_VALUE_CHANGE_FALSE);
  153. /*
  154. * The first call to skip_report always has value_change==TRUE, since we
  155. * cannot know the value written of an instrumented access. For the 2nd
  156. * call there are 6 cases with CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY:
  157. *
  158. * 1. read watchpoint, conflicting write (value_change==TRUE): report;
  159. * 2. read watchpoint, conflicting write (value_change==MAYBE): skip;
  160. * 3. write watchpoint, conflicting write (value_change==TRUE): report;
  161. * 4. write watchpoint, conflicting write (value_change==MAYBE): skip;
  162. * 5. write watchpoint, conflicting read (value_change==MAYBE): skip;
  163. * 6. write watchpoint, conflicting read (value_change==TRUE): report;
  164. *
  165. * Cases 1-4 are intuitive and expected; case 5 ensures we do not report
  166. * data races where the write may have rewritten the same value; case 6
  167. * is possible either if the size is larger than what we check value
  168. * changes for or the access type is KCSAN_ACCESS_ASSERT.
  169. */
  170. if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) &&
  171. value_change == KCSAN_VALUE_CHANGE_MAYBE) {
  172. /*
  173. * The access is a write, but the data value did not change.
  174. *
  175. * We opt-out of this filter for certain functions at request of
  176. * maintainers.
  177. */
  178. char buf[64];
  179. int len = scnprintf(buf, sizeof(buf), "%ps", (void *)top_frame);
  180. if (!strnstr(buf, "rcu_", len) &&
  181. !strnstr(buf, "_rcu", len) &&
  182. !strnstr(buf, "_srcu", len))
  183. return true;
  184. }
  185. return kcsan_skip_report_debugfs(top_frame);
  186. }
  187. static const char *get_access_type(int type)
  188. {
  189. if (type & KCSAN_ACCESS_ASSERT) {
  190. if (type & KCSAN_ACCESS_SCOPED) {
  191. if (type & KCSAN_ACCESS_WRITE)
  192. return "assert no accesses (reordered)";
  193. else
  194. return "assert no writes (reordered)";
  195. } else {
  196. if (type & KCSAN_ACCESS_WRITE)
  197. return "assert no accesses";
  198. else
  199. return "assert no writes";
  200. }
  201. }
  202. switch (type) {
  203. case 0:
  204. return "read";
  205. case KCSAN_ACCESS_ATOMIC:
  206. return "read (marked)";
  207. case KCSAN_ACCESS_WRITE:
  208. return "write";
  209. case KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
  210. return "write (marked)";
  211. case KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE:
  212. return "read-write";
  213. case KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
  214. return "read-write (marked)";
  215. case KCSAN_ACCESS_SCOPED:
  216. return "read (reordered)";
  217. case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_ATOMIC:
  218. return "read (marked, reordered)";
  219. case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE:
  220. return "write (reordered)";
  221. case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
  222. return "write (marked, reordered)";
  223. case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE:
  224. return "read-write (reordered)";
  225. case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
  226. return "read-write (marked, reordered)";
  227. default:
  228. BUG();
  229. }
  230. }
  231. static const char *get_bug_type(int type)
  232. {
  233. return (type & KCSAN_ACCESS_ASSERT) != 0 ? "assert: race" : "data-race";
  234. }
  235. /* Return thread description: in task or interrupt. */
  236. static const char *get_thread_desc(int task_id)
  237. {
  238. if (task_id != -1) {
  239. static char buf[32]; /* safe: protected by report_lock */
  240. snprintf(buf, sizeof(buf), "task %i", task_id);
  241. return buf;
  242. }
  243. return "interrupt";
  244. }
  245. /* Helper to skip KCSAN-related functions in stack-trace. */
  246. static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries)
  247. {
  248. char buf[64];
  249. char *cur;
  250. int len, skip;
  251. for (skip = 0; skip < num_entries; ++skip) {
  252. len = scnprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skip]);
  253. /* Never show tsan_* or {read,write}_once_size. */
  254. if (strnstr(buf, "tsan_", len) ||
  255. strnstr(buf, "_once_size", len))
  256. continue;
  257. cur = strnstr(buf, "kcsan_", len);
  258. if (cur) {
  259. cur += strlen("kcsan_");
  260. if (!str_has_prefix(cur, "test"))
  261. continue; /* KCSAN runtime function. */
  262. /* KCSAN related test. */
  263. }
  264. /*
  265. * No match for runtime functions -- @skip entries to skip to
  266. * get to first frame of interest.
  267. */
  268. break;
  269. }
  270. return skip;
  271. }
  272. /*
  273. * Skips to the first entry that matches the function of @ip, and then replaces
  274. * that entry with @ip, returning the entries to skip with @replaced containing
  275. * the replaced entry.
  276. */
  277. static int
  278. replace_stack_entry(unsigned long stack_entries[], int num_entries, unsigned long ip,
  279. unsigned long *replaced)
  280. {
  281. unsigned long symbolsize, offset;
  282. unsigned long target_func;
  283. int skip;
  284. if (kallsyms_lookup_size_offset(ip, &symbolsize, &offset))
  285. target_func = ip - offset;
  286. else
  287. goto fallback;
  288. for (skip = 0; skip < num_entries; ++skip) {
  289. unsigned long func = stack_entries[skip];
  290. if (!kallsyms_lookup_size_offset(func, &symbolsize, &offset))
  291. goto fallback;
  292. func -= offset;
  293. if (func == target_func) {
  294. *replaced = stack_entries[skip];
  295. stack_entries[skip] = ip;
  296. return skip;
  297. }
  298. }
  299. fallback:
  300. /* Should not happen; the resulting stack trace is likely misleading. */
  301. WARN_ONCE(1, "Cannot find frame for %pS in stack trace", (void *)ip);
  302. return get_stack_skipnr(stack_entries, num_entries);
  303. }
  304. static int
  305. sanitize_stack_entries(unsigned long stack_entries[], int num_entries, unsigned long ip,
  306. unsigned long *replaced)
  307. {
  308. return ip ? replace_stack_entry(stack_entries, num_entries, ip, replaced) :
  309. get_stack_skipnr(stack_entries, num_entries);
  310. }
  311. /* Compares symbolized strings of addr1 and addr2. */
  312. static int sym_strcmp(void *addr1, void *addr2)
  313. {
  314. char buf1[64];
  315. char buf2[64];
  316. snprintf(buf1, sizeof(buf1), "%pS", addr1);
  317. snprintf(buf2, sizeof(buf2), "%pS", addr2);
  318. return strncmp(buf1, buf2, sizeof(buf1));
  319. }
  320. static void
  321. print_stack_trace(unsigned long stack_entries[], int num_entries, unsigned long reordered_to)
  322. {
  323. stack_trace_print(stack_entries, num_entries, 0);
  324. if (reordered_to)
  325. pr_err(" |\n +-> reordered to: %pS\n", (void *)reordered_to);
  326. }
  327. static void print_verbose_info(struct task_struct *task)
  328. {
  329. if (!task)
  330. return;
  331. /* Restore IRQ state trace for printing. */
  332. kcsan_restore_irqtrace(task);
  333. pr_err("\n");
  334. debug_show_held_locks(task);
  335. print_irqtrace_events(task);
  336. }
  337. static void print_report(enum kcsan_value_change value_change,
  338. const struct access_info *ai,
  339. struct other_info *other_info,
  340. u64 old, u64 new, u64 mask)
  341. {
  342. unsigned long reordered_to = 0;
  343. unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 };
  344. int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1);
  345. int skipnr = sanitize_stack_entries(stack_entries, num_stack_entries, ai->ip, &reordered_to);
  346. unsigned long this_frame = stack_entries[skipnr];
  347. unsigned long other_reordered_to = 0;
  348. unsigned long other_frame = 0;
  349. int other_skipnr = 0; /* silence uninit warnings */
  350. /*
  351. * Must check report filter rules before starting to print.
  352. */
  353. if (skip_report(KCSAN_VALUE_CHANGE_TRUE, stack_entries[skipnr]))
  354. return;
  355. if (other_info) {
  356. other_skipnr = sanitize_stack_entries(other_info->stack_entries,
  357. other_info->num_stack_entries,
  358. other_info->ai.ip, &other_reordered_to);
  359. other_frame = other_info->stack_entries[other_skipnr];
  360. /* @value_change is only known for the other thread */
  361. if (skip_report(value_change, other_frame))
  362. return;
  363. }
  364. if (rate_limit_report(this_frame, other_frame))
  365. return;
  366. /* Print report header. */
  367. pr_err("==================================================================\n");
  368. if (other_info) {
  369. int cmp;
  370. /*
  371. * Order functions lexographically for consistent bug titles.
  372. * Do not print offset of functions to keep title short.
  373. */
  374. cmp = sym_strcmp((void *)other_frame, (void *)this_frame);
  375. pr_err("BUG: KCSAN: %s in %ps / %ps\n",
  376. get_bug_type(ai->access_type | other_info->ai.access_type),
  377. (void *)(cmp < 0 ? other_frame : this_frame),
  378. (void *)(cmp < 0 ? this_frame : other_frame));
  379. } else {
  380. pr_err("BUG: KCSAN: %s in %pS\n", get_bug_type(ai->access_type),
  381. (void *)this_frame);
  382. }
  383. pr_err("\n");
  384. /* Print information about the racing accesses. */
  385. if (other_info) {
  386. pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n",
  387. get_access_type(other_info->ai.access_type), other_info->ai.ptr,
  388. other_info->ai.size, get_thread_desc(other_info->ai.task_pid),
  389. other_info->ai.cpu_id);
  390. /* Print the other thread's stack trace. */
  391. print_stack_trace(other_info->stack_entries + other_skipnr,
  392. other_info->num_stack_entries - other_skipnr,
  393. other_reordered_to);
  394. if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
  395. print_verbose_info(other_info->task);
  396. pr_err("\n");
  397. pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n",
  398. get_access_type(ai->access_type), ai->ptr, ai->size,
  399. get_thread_desc(ai->task_pid), ai->cpu_id);
  400. } else {
  401. pr_err("race at unknown origin, with %s to 0x%px of %zu bytes by %s on cpu %i:\n",
  402. get_access_type(ai->access_type), ai->ptr, ai->size,
  403. get_thread_desc(ai->task_pid), ai->cpu_id);
  404. }
  405. /* Print stack trace of this thread. */
  406. print_stack_trace(stack_entries + skipnr, num_stack_entries - skipnr, reordered_to);
  407. if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
  408. print_verbose_info(current);
  409. /* Print observed value change. */
  410. if (ai->size <= 8) {
  411. int hex_len = ai->size * 2;
  412. u64 diff = old ^ new;
  413. if (mask)
  414. diff &= mask;
  415. if (diff) {
  416. pr_err("\n");
  417. pr_err("value changed: 0x%0*llx -> 0x%0*llx\n",
  418. hex_len, old, hex_len, new);
  419. if (mask) {
  420. pr_err(" bits changed: 0x%0*llx with mask 0x%0*llx\n",
  421. hex_len, diff, hex_len, mask);
  422. }
  423. }
  424. }
  425. /* Print report footer. */
  426. pr_err("\n");
  427. pr_err("Reported by Kernel Concurrency Sanitizer on:\n");
  428. dump_stack_print_info(KERN_DEFAULT);
  429. pr_err("==================================================================\n");
  430. check_panic_on_warn("KCSAN");
  431. }
  432. static void release_report(unsigned long *flags, struct other_info *other_info)
  433. {
  434. /*
  435. * Use size to denote valid/invalid, since KCSAN entirely ignores
  436. * 0-sized accesses.
  437. */
  438. other_info->ai.size = 0;
  439. raw_spin_unlock_irqrestore(&report_lock, *flags);
  440. }
  441. /*
  442. * Sets @other_info->task and awaits consumption of @other_info.
  443. *
  444. * Precondition: report_lock is held.
  445. * Postcondition: report_lock is held.
  446. */
  447. static void set_other_info_task_blocking(unsigned long *flags,
  448. const struct access_info *ai,
  449. struct other_info *other_info)
  450. {
  451. /*
  452. * We may be instrumenting a code-path where current->state is already
  453. * something other than TASK_RUNNING.
  454. */
  455. const bool is_running = task_is_running(current);
  456. /*
  457. * To avoid deadlock in case we are in an interrupt here and this is a
  458. * race with a task on the same CPU (KCSAN_INTERRUPT_WATCHER), provide a
  459. * timeout to ensure this works in all contexts.
  460. *
  461. * Await approximately the worst case delay of the reporting thread (if
  462. * we are not interrupted).
  463. */
  464. int timeout = max(kcsan_udelay_task, kcsan_udelay_interrupt);
  465. other_info->task = current;
  466. do {
  467. if (is_running) {
  468. /*
  469. * Let lockdep know the real task is sleeping, to print
  470. * the held locks (recall we turned lockdep off, so
  471. * locking/unlocking @report_lock won't be recorded).
  472. */
  473. set_current_state(TASK_UNINTERRUPTIBLE);
  474. }
  475. raw_spin_unlock_irqrestore(&report_lock, *flags);
  476. /*
  477. * We cannot call schedule() since we also cannot reliably
  478. * determine if sleeping here is permitted -- see in_atomic().
  479. */
  480. udelay(1);
  481. raw_spin_lock_irqsave(&report_lock, *flags);
  482. if (timeout-- < 0) {
  483. /*
  484. * Abort. Reset @other_info->task to NULL, since it
  485. * appears the other thread is still going to consume
  486. * it. It will result in no verbose info printed for
  487. * this task.
  488. */
  489. other_info->task = NULL;
  490. break;
  491. }
  492. /*
  493. * If invalid, or @ptr nor @current matches, then @other_info
  494. * has been consumed and we may continue. If not, retry.
  495. */
  496. } while (other_info->ai.size && other_info->ai.ptr == ai->ptr &&
  497. other_info->task == current);
  498. if (is_running)
  499. set_current_state(TASK_RUNNING);
  500. }
  501. /* Populate @other_info; requires that the provided @other_info not in use. */
  502. static void prepare_report_producer(unsigned long *flags,
  503. const struct access_info *ai,
  504. struct other_info *other_info)
  505. {
  506. raw_spin_lock_irqsave(&report_lock, *flags);
  507. /*
  508. * The same @other_infos entry cannot be used concurrently, because
  509. * there is a one-to-one mapping to watchpoint slots (@watchpoints in
  510. * core.c), and a watchpoint is only released for reuse after reporting
  511. * is done by the consumer of @other_info. Therefore, it is impossible
  512. * for another concurrent prepare_report_producer() to set the same
  513. * @other_info, and are guaranteed exclusivity for the @other_infos
  514. * entry pointed to by @other_info.
  515. *
  516. * To check this property holds, size should never be non-zero here,
  517. * because every consumer of struct other_info resets size to 0 in
  518. * release_report().
  519. */
  520. WARN_ON(other_info->ai.size);
  521. other_info->ai = *ai;
  522. other_info->num_stack_entries = stack_trace_save(other_info->stack_entries, NUM_STACK_ENTRIES, 2);
  523. if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
  524. set_other_info_task_blocking(flags, ai, other_info);
  525. raw_spin_unlock_irqrestore(&report_lock, *flags);
  526. }
  527. /* Awaits producer to fill @other_info and then returns. */
  528. static bool prepare_report_consumer(unsigned long *flags,
  529. const struct access_info *ai,
  530. struct other_info *other_info)
  531. {
  532. raw_spin_lock_irqsave(&report_lock, *flags);
  533. while (!other_info->ai.size) { /* Await valid @other_info. */
  534. raw_spin_unlock_irqrestore(&report_lock, *flags);
  535. cpu_relax();
  536. raw_spin_lock_irqsave(&report_lock, *flags);
  537. }
  538. /* Should always have a matching access based on watchpoint encoding. */
  539. if (WARN_ON(!matching_access((unsigned long)other_info->ai.ptr & WATCHPOINT_ADDR_MASK, other_info->ai.size,
  540. (unsigned long)ai->ptr & WATCHPOINT_ADDR_MASK, ai->size)))
  541. goto discard;
  542. if (!matching_access((unsigned long)other_info->ai.ptr, other_info->ai.size,
  543. (unsigned long)ai->ptr, ai->size)) {
  544. /*
  545. * If the actual accesses to not match, this was a false
  546. * positive due to watchpoint encoding.
  547. */
  548. atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ENCODING_FALSE_POSITIVES]);
  549. goto discard;
  550. }
  551. return true;
  552. discard:
  553. release_report(flags, other_info);
  554. return false;
  555. }
  556. static struct access_info prepare_access_info(const volatile void *ptr, size_t size,
  557. int access_type, unsigned long ip)
  558. {
  559. return (struct access_info) {
  560. .ptr = ptr,
  561. .size = size,
  562. .access_type = access_type,
  563. .task_pid = in_task() ? task_pid_nr(current) : -1,
  564. .cpu_id = raw_smp_processor_id(),
  565. /* Only replace stack entry with @ip if scoped access. */
  566. .ip = (access_type & KCSAN_ACCESS_SCOPED) ? ip : 0,
  567. };
  568. }
  569. void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type,
  570. unsigned long ip, int watchpoint_idx)
  571. {
  572. const struct access_info ai = prepare_access_info(ptr, size, access_type, ip);
  573. unsigned long flags;
  574. kcsan_disable_current();
  575. lockdep_off(); /* See kcsan_report_known_origin(). */
  576. prepare_report_producer(&flags, &ai, &other_infos[watchpoint_idx]);
  577. lockdep_on();
  578. kcsan_enable_current();
  579. }
  580. void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type,
  581. unsigned long ip, enum kcsan_value_change value_change,
  582. int watchpoint_idx, u64 old, u64 new, u64 mask)
  583. {
  584. const struct access_info ai = prepare_access_info(ptr, size, access_type, ip);
  585. struct other_info *other_info = &other_infos[watchpoint_idx];
  586. unsigned long flags = 0;
  587. kcsan_disable_current();
  588. /*
  589. * Because we may generate reports when we're in scheduler code, the use
  590. * of printk() could deadlock. Until such time that all printing code
  591. * called in print_report() is scheduler-safe, accept the risk, and just
  592. * get our message out. As such, also disable lockdep to hide the
  593. * warning, and avoid disabling lockdep for the rest of the kernel.
  594. */
  595. lockdep_off();
  596. if (!prepare_report_consumer(&flags, &ai, other_info))
  597. goto out;
  598. /*
  599. * Never report if value_change is FALSE, only when it is
  600. * either TRUE or MAYBE. In case of MAYBE, further filtering may
  601. * be done once we know the full stack trace in print_report().
  602. */
  603. if (value_change != KCSAN_VALUE_CHANGE_FALSE)
  604. print_report(value_change, &ai, other_info, old, new, mask);
  605. release_report(&flags, other_info);
  606. out:
  607. lockdep_on();
  608. kcsan_enable_current();
  609. }
  610. void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type,
  611. unsigned long ip, u64 old, u64 new, u64 mask)
  612. {
  613. const struct access_info ai = prepare_access_info(ptr, size, access_type, ip);
  614. unsigned long flags;
  615. kcsan_disable_current();
  616. lockdep_off(); /* See kcsan_report_known_origin(). */
  617. raw_spin_lock_irqsave(&report_lock, flags);
  618. print_report(KCSAN_VALUE_CHANGE_TRUE, &ai, NULL, old, new, mask);
  619. raw_spin_unlock_irqrestore(&report_lock, flags);
  620. lockdep_on();
  621. kcsan_enable_current();
  622. }