debugfs.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * KCSAN debugfs interface.
  4. *
  5. * Copyright (C) 2019, Google LLC.
  6. */
  7. #define pr_fmt(fmt) "kcsan: " fmt
  8. #include <linux/atomic.h>
  9. #include <linux/bsearch.h>
  10. #include <linux/bug.h>
  11. #include <linux/debugfs.h>
  12. #include <linux/init.h>
  13. #include <linux/kallsyms.h>
  14. #include <linux/sched.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/slab.h>
  17. #include <linux/sort.h>
  18. #include <linux/string.h>
  19. #include <linux/uaccess.h>
  20. #include "kcsan.h"
  21. atomic_long_t kcsan_counters[KCSAN_COUNTER_COUNT];
  22. static const char *const counter_names[] = {
  23. [KCSAN_COUNTER_USED_WATCHPOINTS] = "used_watchpoints",
  24. [KCSAN_COUNTER_SETUP_WATCHPOINTS] = "setup_watchpoints",
  25. [KCSAN_COUNTER_DATA_RACES] = "data_races",
  26. [KCSAN_COUNTER_ASSERT_FAILURES] = "assert_failures",
  27. [KCSAN_COUNTER_NO_CAPACITY] = "no_capacity",
  28. [KCSAN_COUNTER_REPORT_RACES] = "report_races",
  29. [KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN] = "races_unknown_origin",
  30. [KCSAN_COUNTER_UNENCODABLE_ACCESSES] = "unencodable_accesses",
  31. [KCSAN_COUNTER_ENCODING_FALSE_POSITIVES] = "encoding_false_positives",
  32. };
  33. static_assert(ARRAY_SIZE(counter_names) == KCSAN_COUNTER_COUNT);
  34. /*
  35. * Addresses for filtering functions from reporting. This list can be used as a
  36. * whitelist or blacklist.
  37. */
  38. static struct {
  39. unsigned long *addrs; /* array of addresses */
  40. size_t size; /* current size */
  41. int used; /* number of elements used */
  42. bool sorted; /* if elements are sorted */
  43. bool whitelist; /* if list is a blacklist or whitelist */
  44. } report_filterlist = {
  45. .addrs = NULL,
  46. .size = 8, /* small initial size */
  47. .used = 0,
  48. .sorted = false,
  49. .whitelist = false, /* default is blacklist */
  50. };
  51. static DEFINE_SPINLOCK(report_filterlist_lock);
  52. /*
  53. * The microbenchmark allows benchmarking KCSAN core runtime only. To run
  54. * multiple threads, pipe 'microbench=<iters>' from multiple tasks into the
  55. * debugfs file. This will not generate any conflicts, and tests fast-path only.
  56. */
  57. static noinline void microbenchmark(unsigned long iters)
  58. {
  59. const struct kcsan_ctx ctx_save = current->kcsan_ctx;
  60. const bool was_enabled = READ_ONCE(kcsan_enabled);
  61. u64 cycles;
  62. /* We may have been called from an atomic region; reset context. */
  63. memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
  64. /*
  65. * Disable to benchmark fast-path for all accesses, and (expected
  66. * negligible) call into slow-path, but never set up watchpoints.
  67. */
  68. WRITE_ONCE(kcsan_enabled, false);
  69. pr_info("%s begin | iters: %lu\n", __func__, iters);
  70. cycles = get_cycles();
  71. while (iters--) {
  72. unsigned long addr = iters & ((PAGE_SIZE << 8) - 1);
  73. int type = !(iters & 0x7f) ? KCSAN_ACCESS_ATOMIC :
  74. (!(iters & 0xf) ? KCSAN_ACCESS_WRITE : 0);
  75. __kcsan_check_access((void *)addr, sizeof(long), type);
  76. }
  77. cycles = get_cycles() - cycles;
  78. pr_info("%s end | cycles: %llu\n", __func__, cycles);
  79. WRITE_ONCE(kcsan_enabled, was_enabled);
  80. /* restore context */
  81. current->kcsan_ctx = ctx_save;
  82. }
  83. static int cmp_filterlist_addrs(const void *rhs, const void *lhs)
  84. {
  85. const unsigned long a = *(const unsigned long *)rhs;
  86. const unsigned long b = *(const unsigned long *)lhs;
  87. return a < b ? -1 : a == b ? 0 : 1;
  88. }
  89. bool kcsan_skip_report_debugfs(unsigned long func_addr)
  90. {
  91. unsigned long symbolsize, offset;
  92. unsigned long flags;
  93. bool ret = false;
  94. if (!kallsyms_lookup_size_offset(func_addr, &symbolsize, &offset))
  95. return false;
  96. func_addr -= offset; /* Get function start */
  97. spin_lock_irqsave(&report_filterlist_lock, flags);
  98. if (report_filterlist.used == 0)
  99. goto out;
  100. /* Sort array if it is unsorted, and then do a binary search. */
  101. if (!report_filterlist.sorted) {
  102. sort(report_filterlist.addrs, report_filterlist.used,
  103. sizeof(unsigned long), cmp_filterlist_addrs, NULL);
  104. report_filterlist.sorted = true;
  105. }
  106. ret = !!bsearch(&func_addr, report_filterlist.addrs,
  107. report_filterlist.used, sizeof(unsigned long),
  108. cmp_filterlist_addrs);
  109. if (report_filterlist.whitelist)
  110. ret = !ret;
  111. out:
  112. spin_unlock_irqrestore(&report_filterlist_lock, flags);
  113. return ret;
  114. }
  115. static void set_report_filterlist_whitelist(bool whitelist)
  116. {
  117. unsigned long flags;
  118. spin_lock_irqsave(&report_filterlist_lock, flags);
  119. report_filterlist.whitelist = whitelist;
  120. spin_unlock_irqrestore(&report_filterlist_lock, flags);
  121. }
  122. /* Returns 0 on success, error-code otherwise. */
  123. static ssize_t insert_report_filterlist(const char *func)
  124. {
  125. unsigned long flags;
  126. unsigned long addr = kallsyms_lookup_name(func);
  127. ssize_t ret = 0;
  128. if (!addr) {
  129. pr_err("could not find function: '%s'\n", func);
  130. return -ENOENT;
  131. }
  132. spin_lock_irqsave(&report_filterlist_lock, flags);
  133. if (report_filterlist.addrs == NULL) {
  134. /* initial allocation */
  135. report_filterlist.addrs =
  136. kmalloc_array(report_filterlist.size,
  137. sizeof(unsigned long), GFP_ATOMIC);
  138. if (report_filterlist.addrs == NULL) {
  139. ret = -ENOMEM;
  140. goto out;
  141. }
  142. } else if (report_filterlist.used == report_filterlist.size) {
  143. /* resize filterlist */
  144. size_t new_size = report_filterlist.size * 2;
  145. unsigned long *new_addrs =
  146. krealloc(report_filterlist.addrs,
  147. new_size * sizeof(unsigned long), GFP_ATOMIC);
  148. if (new_addrs == NULL) {
  149. /* leave filterlist itself untouched */
  150. ret = -ENOMEM;
  151. goto out;
  152. }
  153. report_filterlist.size = new_size;
  154. report_filterlist.addrs = new_addrs;
  155. }
  156. /* Note: deduplicating should be done in userspace. */
  157. report_filterlist.addrs[report_filterlist.used++] =
  158. kallsyms_lookup_name(func);
  159. report_filterlist.sorted = false;
  160. out:
  161. spin_unlock_irqrestore(&report_filterlist_lock, flags);
  162. return ret;
  163. }
  164. static int show_info(struct seq_file *file, void *v)
  165. {
  166. int i;
  167. unsigned long flags;
  168. /* show stats */
  169. seq_printf(file, "enabled: %i\n", READ_ONCE(kcsan_enabled));
  170. for (i = 0; i < KCSAN_COUNTER_COUNT; ++i) {
  171. seq_printf(file, "%s: %ld\n", counter_names[i],
  172. atomic_long_read(&kcsan_counters[i]));
  173. }
  174. /* show filter functions, and filter type */
  175. spin_lock_irqsave(&report_filterlist_lock, flags);
  176. seq_printf(file, "\n%s functions: %s\n",
  177. report_filterlist.whitelist ? "whitelisted" : "blacklisted",
  178. report_filterlist.used == 0 ? "none" : "");
  179. for (i = 0; i < report_filterlist.used; ++i)
  180. seq_printf(file, " %ps\n", (void *)report_filterlist.addrs[i]);
  181. spin_unlock_irqrestore(&report_filterlist_lock, flags);
  182. return 0;
  183. }
  184. static int debugfs_open(struct inode *inode, struct file *file)
  185. {
  186. return single_open(file, show_info, NULL);
  187. }
  188. static ssize_t
  189. debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
  190. {
  191. char kbuf[KSYM_NAME_LEN];
  192. char *arg;
  193. int read_len = count < (sizeof(kbuf) - 1) ? count : (sizeof(kbuf) - 1);
  194. if (copy_from_user(kbuf, buf, read_len))
  195. return -EFAULT;
  196. kbuf[read_len] = '\0';
  197. arg = strstrip(kbuf);
  198. if (!strcmp(arg, "on")) {
  199. WRITE_ONCE(kcsan_enabled, true);
  200. } else if (!strcmp(arg, "off")) {
  201. WRITE_ONCE(kcsan_enabled, false);
  202. } else if (str_has_prefix(arg, "microbench=")) {
  203. unsigned long iters;
  204. if (kstrtoul(&arg[strlen("microbench=")], 0, &iters))
  205. return -EINVAL;
  206. microbenchmark(iters);
  207. } else if (!strcmp(arg, "whitelist")) {
  208. set_report_filterlist_whitelist(true);
  209. } else if (!strcmp(arg, "blacklist")) {
  210. set_report_filterlist_whitelist(false);
  211. } else if (arg[0] == '!') {
  212. ssize_t ret = insert_report_filterlist(&arg[1]);
  213. if (ret < 0)
  214. return ret;
  215. } else {
  216. return -EINVAL;
  217. }
  218. return count;
  219. }
  220. static const struct file_operations debugfs_ops =
  221. {
  222. .read = seq_read,
  223. .open = debugfs_open,
  224. .write = debugfs_write,
  225. .release = single_release
  226. };
  227. static int __init kcsan_debugfs_init(void)
  228. {
  229. debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
  230. return 0;
  231. }
  232. late_initcall(kcsan_debugfs_init);