report.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * KFENCE reporting.
  4. *
  5. * Copyright (C) 2020, Google LLC.
  6. */
  7. #include <linux/stdarg.h>
  8. #include <linux/kernel.h>
  9. #include <linux/lockdep.h>
  10. #include <linux/math.h>
  11. #include <linux/printk.h>
  12. #include <linux/sched/debug.h>
  13. #include <linux/seq_file.h>
  14. #include <linux/stacktrace.h>
  15. #include <linux/string.h>
  16. #include <trace/events/error_report.h>
  17. #include <asm/kfence.h>
  18. #include "kfence.h"
  19. /* May be overridden by <asm/kfence.h>. */
  20. #ifndef ARCH_FUNC_PREFIX
  21. #define ARCH_FUNC_PREFIX ""
  22. #endif
  23. extern bool no_hash_pointers;
  24. /* Helper function to either print to a seq_file or to console. */
  25. __printf(2, 3)
  26. static void seq_con_printf(struct seq_file *seq, const char *fmt, ...)
  27. {
  28. va_list args;
  29. va_start(args, fmt);
  30. if (seq)
  31. seq_vprintf(seq, fmt, args);
  32. else
  33. vprintk(fmt, args);
  34. va_end(args);
  35. }
  36. /*
  37. * Get the number of stack entries to skip to get out of MM internals. @type is
  38. * optional, and if set to NULL, assumes an allocation or free stack.
  39. */
  40. static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries,
  41. const enum kfence_error_type *type)
  42. {
  43. char buf[64];
  44. int skipnr, fallback = 0;
  45. if (type) {
  46. /* Depending on error type, find different stack entries. */
  47. switch (*type) {
  48. case KFENCE_ERROR_UAF:
  49. case KFENCE_ERROR_OOB:
  50. case KFENCE_ERROR_INVALID:
  51. /*
  52. * kfence_handle_page_fault() may be called with pt_regs
  53. * set to NULL; in that case we'll simply show the full
  54. * stack trace.
  55. */
  56. return 0;
  57. case KFENCE_ERROR_CORRUPTION:
  58. case KFENCE_ERROR_INVALID_FREE:
  59. break;
  60. }
  61. }
  62. for (skipnr = 0; skipnr < num_entries; skipnr++) {
  63. int len = scnprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skipnr]);
  64. if (str_has_prefix(buf, ARCH_FUNC_PREFIX "kfence_") ||
  65. str_has_prefix(buf, ARCH_FUNC_PREFIX "__kfence_") ||
  66. str_has_prefix(buf, ARCH_FUNC_PREFIX "__kmem_cache_free") ||
  67. !strncmp(buf, ARCH_FUNC_PREFIX "__slab_free", len)) {
  68. /*
  69. * In case of tail calls from any of the below to any of
  70. * the above, optimized by the compiler such that the
  71. * stack trace would omit the initial entry point below.
  72. */
  73. fallback = skipnr + 1;
  74. }
  75. /*
  76. * The below list should only include the initial entry points
  77. * into the slab allocators. Includes the *_bulk() variants by
  78. * checking prefixes.
  79. */
  80. if (str_has_prefix(buf, ARCH_FUNC_PREFIX "kfree") ||
  81. str_has_prefix(buf, ARCH_FUNC_PREFIX "kmem_cache_free") ||
  82. str_has_prefix(buf, ARCH_FUNC_PREFIX "__kmalloc") ||
  83. str_has_prefix(buf, ARCH_FUNC_PREFIX "kmem_cache_alloc"))
  84. goto found;
  85. }
  86. if (fallback < num_entries)
  87. return fallback;
  88. found:
  89. skipnr++;
  90. return skipnr < num_entries ? skipnr : 0;
  91. }
  92. static void kfence_print_stack(struct seq_file *seq, const struct kfence_metadata *meta,
  93. bool show_alloc)
  94. {
  95. const struct kfence_track *track = show_alloc ? &meta->alloc_track : &meta->free_track;
  96. u64 ts_sec = track->ts_nsec;
  97. unsigned long rem_nsec = do_div(ts_sec, NSEC_PER_SEC);
  98. /* Timestamp matches printk timestamp format. */
  99. seq_con_printf(seq, "%s by task %d on cpu %d at %lu.%06lus:\n",
  100. show_alloc ? "allocated" : "freed", track->pid,
  101. track->cpu, (unsigned long)ts_sec, rem_nsec / 1000);
  102. if (track->num_stack_entries) {
  103. /* Skip allocation/free internals stack. */
  104. int i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
  105. /* stack_trace_seq_print() does not exist; open code our own. */
  106. for (; i < track->num_stack_entries; i++)
  107. seq_con_printf(seq, " %pS\n", (void *)track->stack_entries[i]);
  108. } else {
  109. seq_con_printf(seq, " no %s stack\n", show_alloc ? "allocation" : "deallocation");
  110. }
  111. }
  112. void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta)
  113. {
  114. const int size = abs(meta->size);
  115. const unsigned long start = meta->addr;
  116. const struct kmem_cache *const cache = meta->cache;
  117. lockdep_assert_held(&meta->lock);
  118. if (meta->state == KFENCE_OBJECT_UNUSED) {
  119. seq_con_printf(seq, "kfence-#%td unused\n", meta - kfence_metadata);
  120. return;
  121. }
  122. seq_con_printf(seq, "kfence-#%td: 0x%p-0x%p, size=%d, cache=%s\n\n",
  123. meta - kfence_metadata, (void *)start, (void *)(start + size - 1),
  124. size, (cache && cache->name) ? cache->name : "<destroyed>");
  125. kfence_print_stack(seq, meta, true);
  126. if (meta->state == KFENCE_OBJECT_FREED) {
  127. seq_con_printf(seq, "\n");
  128. kfence_print_stack(seq, meta, false);
  129. }
  130. }
  131. /*
  132. * Show bytes at @addr that are different from the expected canary values, up to
  133. * @max_bytes.
  134. */
  135. static void print_diff_canary(unsigned long address, size_t bytes_to_show,
  136. const struct kfence_metadata *meta)
  137. {
  138. const unsigned long show_until_addr = address + bytes_to_show;
  139. const u8 *cur, *end;
  140. /* Do not show contents of object nor read into following guard page. */
  141. end = (const u8 *)(address < meta->addr ? min(show_until_addr, meta->addr)
  142. : min(show_until_addr, PAGE_ALIGN(address)));
  143. pr_cont("[");
  144. for (cur = (const u8 *)address; cur < end; cur++) {
  145. if (*cur == KFENCE_CANARY_PATTERN(cur))
  146. pr_cont(" .");
  147. else if (no_hash_pointers)
  148. pr_cont(" 0x%02x", *cur);
  149. else /* Do not leak kernel memory in non-debug builds. */
  150. pr_cont(" !");
  151. }
  152. pr_cont(" ]");
  153. }
  154. static const char *get_access_type(bool is_write)
  155. {
  156. return is_write ? "write" : "read";
  157. }
  158. void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs,
  159. const struct kfence_metadata *meta, enum kfence_error_type type)
  160. {
  161. unsigned long stack_entries[KFENCE_STACK_DEPTH] = { 0 };
  162. const ptrdiff_t object_index = meta ? meta - kfence_metadata : -1;
  163. int num_stack_entries;
  164. int skipnr = 0;
  165. if (regs) {
  166. num_stack_entries = stack_trace_save_regs(regs, stack_entries, KFENCE_STACK_DEPTH, 0);
  167. } else {
  168. num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 1);
  169. skipnr = get_stack_skipnr(stack_entries, num_stack_entries, &type);
  170. }
  171. /* Require non-NULL meta, except if KFENCE_ERROR_INVALID. */
  172. if (WARN_ON(type != KFENCE_ERROR_INVALID && !meta))
  173. return;
  174. if (meta)
  175. lockdep_assert_held(&meta->lock);
  176. /*
  177. * Because we may generate reports in printk-unfriendly parts of the
  178. * kernel, such as scheduler code, the use of printk() could deadlock.
  179. * Until such time that all printing code here is safe in all parts of
  180. * the kernel, accept the risk, and just get our message out (given the
  181. * system might already behave unpredictably due to the memory error).
  182. * As such, also disable lockdep to hide warnings, and avoid disabling
  183. * lockdep for the rest of the kernel.
  184. */
  185. lockdep_off();
  186. pr_err("==================================================================\n");
  187. /* Print report header. */
  188. switch (type) {
  189. case KFENCE_ERROR_OOB: {
  190. const bool left_of_object = address < meta->addr;
  191. pr_err("BUG: KFENCE: out-of-bounds %s in %pS\n\n", get_access_type(is_write),
  192. (void *)stack_entries[skipnr]);
  193. pr_err("Out-of-bounds %s at 0x%p (%luB %s of kfence-#%td):\n",
  194. get_access_type(is_write), (void *)address,
  195. left_of_object ? meta->addr - address : address - meta->addr,
  196. left_of_object ? "left" : "right", object_index);
  197. break;
  198. }
  199. case KFENCE_ERROR_UAF:
  200. pr_err("BUG: KFENCE: use-after-free %s in %pS\n\n", get_access_type(is_write),
  201. (void *)stack_entries[skipnr]);
  202. pr_err("Use-after-free %s at 0x%p (in kfence-#%td):\n",
  203. get_access_type(is_write), (void *)address, object_index);
  204. break;
  205. case KFENCE_ERROR_CORRUPTION:
  206. pr_err("BUG: KFENCE: memory corruption in %pS\n\n", (void *)stack_entries[skipnr]);
  207. pr_err("Corrupted memory at 0x%p ", (void *)address);
  208. print_diff_canary(address, 16, meta);
  209. pr_cont(" (in kfence-#%td):\n", object_index);
  210. break;
  211. case KFENCE_ERROR_INVALID:
  212. pr_err("BUG: KFENCE: invalid %s in %pS\n\n", get_access_type(is_write),
  213. (void *)stack_entries[skipnr]);
  214. pr_err("Invalid %s at 0x%p:\n", get_access_type(is_write),
  215. (void *)address);
  216. break;
  217. case KFENCE_ERROR_INVALID_FREE:
  218. pr_err("BUG: KFENCE: invalid free in %pS\n\n", (void *)stack_entries[skipnr]);
  219. pr_err("Invalid free of 0x%p (in kfence-#%td):\n", (void *)address,
  220. object_index);
  221. break;
  222. }
  223. /* Print stack trace and object info. */
  224. stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr, 0);
  225. if (meta) {
  226. pr_err("\n");
  227. kfence_print_object(NULL, meta);
  228. }
  229. /* Print report footer. */
  230. pr_err("\n");
  231. if (no_hash_pointers && regs)
  232. show_regs(regs);
  233. else
  234. dump_stack_print_info(KERN_ERR);
  235. trace_error_report_end(ERROR_DETECTOR_KFENCE, address);
  236. pr_err("==================================================================\n");
  237. lockdep_on();
  238. check_panic_on_warn("KFENCE");
  239. /* We encountered a memory safety error, taint the kernel! */
  240. add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK);
  241. }
  242. #ifdef CONFIG_PRINTK
  243. static void kfence_to_kp_stack(const struct kfence_track *track, void **kp_stack)
  244. {
  245. int i, j;
  246. i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
  247. for (j = 0; i < track->num_stack_entries && j < KS_ADDRS_COUNT; ++i, ++j)
  248. kp_stack[j] = (void *)track->stack_entries[i];
  249. if (j < KS_ADDRS_COUNT)
  250. kp_stack[j] = NULL;
  251. }
  252. bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
  253. {
  254. struct kfence_metadata *meta = addr_to_metadata((unsigned long)object);
  255. unsigned long flags;
  256. if (!meta)
  257. return false;
  258. /*
  259. * If state is UNUSED at least show the pointer requested; the rest
  260. * would be garbage data.
  261. */
  262. kpp->kp_ptr = object;
  263. /* Requesting info an a never-used object is almost certainly a bug. */
  264. if (WARN_ON(meta->state == KFENCE_OBJECT_UNUSED))
  265. return true;
  266. raw_spin_lock_irqsave(&meta->lock, flags);
  267. kpp->kp_slab = slab;
  268. kpp->kp_slab_cache = meta->cache;
  269. kpp->kp_objp = (void *)meta->addr;
  270. kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack);
  271. if (meta->state == KFENCE_OBJECT_FREED)
  272. kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack);
  273. /* get_stack_skipnr() ensures the first entry is outside allocator. */
  274. kpp->kp_ret = kpp->kp_stack[0];
  275. raw_spin_unlock_irqrestore(&meta->lock, flags);
  276. return true;
  277. }
  278. #endif