heap.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * This is for all the tests relating directly to heap memory, including
  4. * page allocation and slab allocations.
  5. */
  6. #include "lkdtm.h"
  7. #include <linux/slab.h>
  8. #include <linux/vmalloc.h>
  9. #include <linux/sched.h>
  10. static struct kmem_cache *double_free_cache;
  11. static struct kmem_cache *a_cache;
  12. static struct kmem_cache *b_cache;
  13. /*
  14. * Using volatile here means the compiler cannot ever make assumptions
  15. * about this value. This means compile-time length checks involving
  16. * this variable cannot be performed; only run-time checks.
  17. */
  18. static volatile int __offset = 1;
  19. /*
  20. * If there aren't guard pages, it's likely that a consecutive allocation will
  21. * let us overflow into the second allocation without overwriting something real.
  22. *
  23. * This should always be caught because there is an unconditional unmapped
  24. * page after vmap allocations.
  25. */
  26. static void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
  27. {
  28. char *one, *two;
  29. one = vzalloc(PAGE_SIZE);
  30. two = vzalloc(PAGE_SIZE);
  31. pr_info("Attempting vmalloc linear overflow ...\n");
  32. memset(one, 0xAA, PAGE_SIZE + __offset);
  33. vfree(two);
  34. vfree(one);
  35. }
  36. /*
  37. * This tries to stay within the next largest power-of-2 kmalloc cache
  38. * to avoid actually overwriting anything important if it's not detected
  39. * correctly.
  40. *
  41. * This should get caught by either memory tagging, KASan, or by using
  42. * CONFIG_SLUB_DEBUG=y and slub_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y).
  43. */
  44. static void lkdtm_SLAB_LINEAR_OVERFLOW(void)
  45. {
  46. size_t len = 1020;
  47. u32 *data = kmalloc(len, GFP_KERNEL);
  48. if (!data)
  49. return;
  50. pr_info("Attempting slab linear overflow ...\n");
  51. OPTIMIZER_HIDE_VAR(data);
  52. data[1024 / sizeof(u32)] = 0x12345678;
  53. kfree(data);
  54. }
  55. static void lkdtm_WRITE_AFTER_FREE(void)
  56. {
  57. int *base, *again;
  58. size_t len = 1024;
  59. /*
  60. * The slub allocator uses the first word to store the free
  61. * pointer in some configurations. Use the middle of the
  62. * allocation to avoid running into the freelist
  63. */
  64. size_t offset = (len / sizeof(*base)) / 2;
  65. base = kmalloc(len, GFP_KERNEL);
  66. if (!base)
  67. return;
  68. pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
  69. pr_info("Attempting bad write to freed memory at %p\n",
  70. &base[offset]);
  71. kfree(base);
  72. base[offset] = 0x0abcdef0;
  73. /* Attempt to notice the overwrite. */
  74. again = kmalloc(len, GFP_KERNEL);
  75. kfree(again);
  76. if (again != base)
  77. pr_info("Hmm, didn't get the same memory range.\n");
  78. }
  79. static void lkdtm_READ_AFTER_FREE(void)
  80. {
  81. int *base, *val, saw;
  82. size_t len = 1024;
  83. /*
  84. * The slub allocator will use the either the first word or
  85. * the middle of the allocation to store the free pointer,
  86. * depending on configurations. Store in the second word to
  87. * avoid running into the freelist.
  88. */
  89. size_t offset = sizeof(*base);
  90. base = kmalloc(len, GFP_KERNEL);
  91. if (!base) {
  92. pr_info("Unable to allocate base memory.\n");
  93. return;
  94. }
  95. val = kmalloc(len, GFP_KERNEL);
  96. if (!val) {
  97. pr_info("Unable to allocate val memory.\n");
  98. kfree(base);
  99. return;
  100. }
  101. *val = 0x12345678;
  102. base[offset] = *val;
  103. pr_info("Value in memory before free: %x\n", base[offset]);
  104. kfree(base);
  105. pr_info("Attempting bad read from freed memory\n");
  106. saw = base[offset];
  107. if (saw != *val) {
  108. /* Good! Poisoning happened, so declare a win. */
  109. pr_info("Memory correctly poisoned (%x)\n", saw);
  110. } else {
  111. pr_err("FAIL: Memory was not poisoned!\n");
  112. pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
  113. }
  114. kfree(val);
  115. }
  116. static void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
  117. {
  118. unsigned long p = __get_free_page(GFP_KERNEL);
  119. if (!p) {
  120. pr_info("Unable to allocate free page\n");
  121. return;
  122. }
  123. pr_info("Writing to the buddy page before free\n");
  124. memset((void *)p, 0x3, PAGE_SIZE);
  125. free_page(p);
  126. schedule();
  127. pr_info("Attempting bad write to the buddy page after free\n");
  128. memset((void *)p, 0x78, PAGE_SIZE);
  129. /* Attempt to notice the overwrite. */
  130. p = __get_free_page(GFP_KERNEL);
  131. free_page(p);
  132. schedule();
  133. }
  134. static void lkdtm_READ_BUDDY_AFTER_FREE(void)
  135. {
  136. unsigned long p = __get_free_page(GFP_KERNEL);
  137. int saw, *val;
  138. int *base;
  139. if (!p) {
  140. pr_info("Unable to allocate free page\n");
  141. return;
  142. }
  143. val = kmalloc(1024, GFP_KERNEL);
  144. if (!val) {
  145. pr_info("Unable to allocate val memory.\n");
  146. free_page(p);
  147. return;
  148. }
  149. base = (int *)p;
  150. *val = 0x12345678;
  151. base[0] = *val;
  152. pr_info("Value in memory before free: %x\n", base[0]);
  153. free_page(p);
  154. pr_info("Attempting to read from freed memory\n");
  155. saw = base[0];
  156. if (saw != *val) {
  157. /* Good! Poisoning happened, so declare a win. */
  158. pr_info("Memory correctly poisoned (%x)\n", saw);
  159. } else {
  160. pr_err("FAIL: Buddy page was not poisoned!\n");
  161. pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
  162. }
  163. kfree(val);
  164. }
  165. static void lkdtm_SLAB_INIT_ON_ALLOC(void)
  166. {
  167. u8 *first;
  168. u8 *val;
  169. first = kmalloc(512, GFP_KERNEL);
  170. if (!first) {
  171. pr_info("Unable to allocate 512 bytes the first time.\n");
  172. return;
  173. }
  174. memset(first, 0xAB, 512);
  175. kfree(first);
  176. val = kmalloc(512, GFP_KERNEL);
  177. if (!val) {
  178. pr_info("Unable to allocate 512 bytes the second time.\n");
  179. return;
  180. }
  181. if (val != first) {
  182. pr_warn("Reallocation missed clobbered memory.\n");
  183. }
  184. if (memchr(val, 0xAB, 512) == NULL) {
  185. pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
  186. } else {
  187. pr_err("FAIL: Slab was not initialized\n");
  188. pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
  189. }
  190. kfree(val);
  191. }
  192. static void lkdtm_BUDDY_INIT_ON_ALLOC(void)
  193. {
  194. u8 *first;
  195. u8 *val;
  196. first = (u8 *)__get_free_page(GFP_KERNEL);
  197. if (!first) {
  198. pr_info("Unable to allocate first free page\n");
  199. return;
  200. }
  201. memset(first, 0xAB, PAGE_SIZE);
  202. free_page((unsigned long)first);
  203. val = (u8 *)__get_free_page(GFP_KERNEL);
  204. if (!val) {
  205. pr_info("Unable to allocate second free page\n");
  206. return;
  207. }
  208. if (val != first) {
  209. pr_warn("Reallocation missed clobbered memory.\n");
  210. }
  211. if (memchr(val, 0xAB, PAGE_SIZE) == NULL) {
  212. pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
  213. } else {
  214. pr_err("FAIL: Slab was not initialized\n");
  215. pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
  216. }
  217. free_page((unsigned long)val);
  218. }
  219. static void lkdtm_SLAB_FREE_DOUBLE(void)
  220. {
  221. int *val;
  222. val = kmem_cache_alloc(double_free_cache, GFP_KERNEL);
  223. if (!val) {
  224. pr_info("Unable to allocate double_free_cache memory.\n");
  225. return;
  226. }
  227. /* Just make sure we got real memory. */
  228. *val = 0x12345678;
  229. pr_info("Attempting double slab free ...\n");
  230. kmem_cache_free(double_free_cache, val);
  231. kmem_cache_free(double_free_cache, val);
  232. }
  233. static void lkdtm_SLAB_FREE_CROSS(void)
  234. {
  235. int *val;
  236. val = kmem_cache_alloc(a_cache, GFP_KERNEL);
  237. if (!val) {
  238. pr_info("Unable to allocate a_cache memory.\n");
  239. return;
  240. }
  241. /* Just make sure we got real memory. */
  242. *val = 0x12345679;
  243. pr_info("Attempting cross-cache slab free ...\n");
  244. kmem_cache_free(b_cache, val);
  245. }
  246. static void lkdtm_SLAB_FREE_PAGE(void)
  247. {
  248. unsigned long p = __get_free_page(GFP_KERNEL);
  249. pr_info("Attempting non-Slab slab free ...\n");
  250. kmem_cache_free(NULL, (void *)p);
  251. free_page(p);
  252. }
  253. /*
  254. * We have constructors to keep the caches distinctly separated without
  255. * needing to boot with "slab_nomerge".
  256. */
  257. static void ctor_double_free(void *region)
  258. { }
  259. static void ctor_a(void *region)
  260. { }
  261. static void ctor_b(void *region)
  262. { }
  263. void __init lkdtm_heap_init(void)
  264. {
  265. double_free_cache = kmem_cache_create("lkdtm-heap-double_free",
  266. 64, 0, 0, ctor_double_free);
  267. a_cache = kmem_cache_create("lkdtm-heap-a", 64, 0, 0, ctor_a);
  268. b_cache = kmem_cache_create("lkdtm-heap-b", 64, 0, 0, ctor_b);
  269. }
  270. void __exit lkdtm_heap_exit(void)
  271. {
  272. kmem_cache_destroy(double_free_cache);
  273. kmem_cache_destroy(a_cache);
  274. kmem_cache_destroy(b_cache);
  275. }
  276. static struct crashtype crashtypes[] = {
  277. CRASHTYPE(SLAB_LINEAR_OVERFLOW),
  278. CRASHTYPE(VMALLOC_LINEAR_OVERFLOW),
  279. CRASHTYPE(WRITE_AFTER_FREE),
  280. CRASHTYPE(READ_AFTER_FREE),
  281. CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
  282. CRASHTYPE(READ_BUDDY_AFTER_FREE),
  283. CRASHTYPE(SLAB_INIT_ON_ALLOC),
  284. CRASHTYPE(BUDDY_INIT_ON_ALLOC),
  285. CRASHTYPE(SLAB_FREE_DOUBLE),
  286. CRASHTYPE(SLAB_FREE_CROSS),
  287. CRASHTYPE(SLAB_FREE_PAGE),
  288. };
  289. struct crashtype_category heap_crashtypes = {
  290. .crashtypes = crashtypes,
  291. .len = ARRAY_SIZE(crashtypes),
  292. };