kasan_test.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. *
  4. * Copyright (c) 2014 Samsung Electronics Co., Ltd.
  5. * Author: Andrey Ryabinin <[email protected]>
  6. */
  7. #include <linux/bitops.h>
  8. #include <linux/delay.h>
  9. #include <linux/kasan.h>
  10. #include <linux/kernel.h>
  11. #include <linux/mm.h>
  12. #include <linux/mman.h>
  13. #include <linux/module.h>
  14. #include <linux/printk.h>
  15. #include <linux/random.h>
  16. #include <linux/slab.h>
  17. #include <linux/string.h>
  18. #include <linux/uaccess.h>
  19. #include <linux/io.h>
  20. #include <linux/vmalloc.h>
  21. #include <linux/set_memory.h>
  22. #include <asm/page.h>
  23. #include <kunit/test.h>
  24. #include "kasan.h"
  25. #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
  26. /*
  27. * Some tests use these global variables to store return values from function
  28. * calls that could otherwise be eliminated by the compiler as dead code.
  29. */
  30. void *kasan_ptr_result;
  31. int kasan_int_result;
  32. static struct kunit_resource resource;
  33. static struct kunit_kasan_status test_status;
  34. static bool multishot;
  35. /*
  36. * Temporarily enable multi-shot mode. Otherwise, KASAN would only report the
  37. * first detected bug and panic the kernel if panic_on_warn is enabled. For
  38. * hardware tag-based KASAN also allow tag checking to be reenabled for each
  39. * test, see the comment for KUNIT_EXPECT_KASAN_FAIL().
  40. */
  41. static int kasan_test_init(struct kunit *test)
  42. {
  43. if (!kasan_enabled()) {
  44. kunit_err(test, "can't run KASAN tests with KASAN disabled");
  45. return -1;
  46. }
  47. multishot = kasan_save_enable_multi_shot();
  48. test_status.report_found = false;
  49. test_status.sync_fault = false;
  50. kunit_add_named_resource(test, NULL, NULL, &resource,
  51. "kasan_status", &test_status);
  52. return 0;
  53. }
  54. static void kasan_test_exit(struct kunit *test)
  55. {
  56. kasan_restore_multi_shot(multishot);
  57. KUNIT_EXPECT_FALSE(test, test_status.report_found);
  58. }
  59. /**
  60. * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
  61. * KASAN report; causes a test failure otherwise. This relies on a KUnit
  62. * resource named "kasan_status". Do not use this name for KUnit resources
  63. * outside of KASAN tests.
  64. *
  65. * For hardware tag-based KASAN, when a synchronous tag fault happens, tag
  66. * checking is auto-disabled. When this happens, this test handler reenables
  67. * tag checking. As tag checking can be only disabled or enabled per CPU,
  68. * this handler disables migration (preemption).
  69. *
  70. * Since the compiler doesn't see that the expression can change the test_status
  71. * fields, it can reorder or optimize away the accesses to those fields.
  72. * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
  73. * expression to prevent that.
  74. *
  75. * In between KUNIT_EXPECT_KASAN_FAIL checks, test_status.report_found is kept
  76. * as false. This allows detecting KASAN reports that happen outside of the
  77. * checks by asserting !test_status.report_found at the start of
  78. * KUNIT_EXPECT_KASAN_FAIL and in kasan_test_exit.
  79. */
  80. #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
  81. if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
  82. kasan_sync_fault_possible()) \
  83. migrate_disable(); \
  84. KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found)); \
  85. barrier(); \
  86. expression; \
  87. barrier(); \
  88. if (kasan_async_fault_possible()) \
  89. kasan_force_async_fault(); \
  90. if (!READ_ONCE(test_status.report_found)) { \
  91. KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \
  92. "expected in \"" #expression \
  93. "\", but none occurred"); \
  94. } \
  95. if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
  96. kasan_sync_fault_possible()) { \
  97. if (READ_ONCE(test_status.report_found) && \
  98. READ_ONCE(test_status.sync_fault)) \
  99. kasan_enable_hw_tags(); \
  100. migrate_enable(); \
  101. } \
  102. WRITE_ONCE(test_status.report_found, false); \
  103. } while (0)
  104. #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
  105. if (!IS_ENABLED(config)) \
  106. kunit_skip((test), "Test requires " #config "=y"); \
  107. } while (0)
  108. #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
  109. if (IS_ENABLED(config)) \
  110. kunit_skip((test), "Test requires " #config "=n"); \
  111. } while (0)
  112. static void kmalloc_oob_right(struct kunit *test)
  113. {
  114. char *ptr;
  115. size_t size = 128 - KASAN_GRANULE_SIZE - 5;
  116. ptr = kmalloc(size, GFP_KERNEL);
  117. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  118. OPTIMIZER_HIDE_VAR(ptr);
  119. /*
  120. * An unaligned access past the requested kmalloc size.
  121. * Only generic KASAN can precisely detect these.
  122. */
  123. if (IS_ENABLED(CONFIG_KASAN_GENERIC))
  124. KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x');
  125. /*
  126. * An aligned access into the first out-of-bounds granule that falls
  127. * within the aligned kmalloc object.
  128. */
  129. KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y');
  130. /* Out-of-bounds access past the aligned kmalloc object. */
  131. KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] =
  132. ptr[size + KASAN_GRANULE_SIZE + 5]);
  133. kfree(ptr);
  134. }
  135. static void kmalloc_oob_left(struct kunit *test)
  136. {
  137. char *ptr;
  138. size_t size = 15;
  139. ptr = kmalloc(size, GFP_KERNEL);
  140. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  141. OPTIMIZER_HIDE_VAR(ptr);
  142. KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
  143. kfree(ptr);
  144. }
  145. static void kmalloc_node_oob_right(struct kunit *test)
  146. {
  147. char *ptr;
  148. size_t size = 4096;
  149. ptr = kmalloc_node(size, GFP_KERNEL, 0);
  150. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  151. OPTIMIZER_HIDE_VAR(ptr);
  152. KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
  153. kfree(ptr);
  154. }
  155. /*
  156. * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
  157. * fit into a slab cache and therefore is allocated via the page allocator
  158. * fallback. Since this kind of fallback is only implemented for SLUB, these
  159. * tests are limited to that allocator.
  160. */
  161. static void kmalloc_pagealloc_oob_right(struct kunit *test)
  162. {
  163. char *ptr;
  164. size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
  165. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
  166. ptr = kmalloc(size, GFP_KERNEL);
  167. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  168. OPTIMIZER_HIDE_VAR(ptr);
  169. KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
  170. kfree(ptr);
  171. }
  172. static void kmalloc_pagealloc_uaf(struct kunit *test)
  173. {
  174. char *ptr;
  175. size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
  176. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
  177. ptr = kmalloc(size, GFP_KERNEL);
  178. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  179. kfree(ptr);
  180. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
  181. }
  182. static void kmalloc_pagealloc_invalid_free(struct kunit *test)
  183. {
  184. char *ptr;
  185. size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
  186. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
  187. ptr = kmalloc(size, GFP_KERNEL);
  188. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  189. KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
  190. }
  191. static void pagealloc_oob_right(struct kunit *test)
  192. {
  193. char *ptr;
  194. struct page *pages;
  195. size_t order = 4;
  196. size_t size = (1UL << (PAGE_SHIFT + order));
  197. /*
  198. * With generic KASAN page allocations have no redzones, thus
  199. * out-of-bounds detection is not guaranteed.
  200. * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
  201. */
  202. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
  203. pages = alloc_pages(GFP_KERNEL, order);
  204. ptr = page_address(pages);
  205. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  206. KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
  207. free_pages((unsigned long)ptr, order);
  208. }
  209. static void pagealloc_uaf(struct kunit *test)
  210. {
  211. char *ptr;
  212. struct page *pages;
  213. size_t order = 4;
  214. pages = alloc_pages(GFP_KERNEL, order);
  215. ptr = page_address(pages);
  216. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  217. free_pages((unsigned long)ptr, order);
  218. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
  219. }
  220. static void kmalloc_large_oob_right(struct kunit *test)
  221. {
  222. char *ptr;
  223. size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
  224. /*
  225. * Allocate a chunk that is large enough, but still fits into a slab
  226. * and does not trigger the page allocator fallback in SLUB.
  227. */
  228. ptr = kmalloc(size, GFP_KERNEL);
  229. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  230. OPTIMIZER_HIDE_VAR(ptr);
  231. KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
  232. kfree(ptr);
  233. }
  234. static void krealloc_more_oob_helper(struct kunit *test,
  235. size_t size1, size_t size2)
  236. {
  237. char *ptr1, *ptr2;
  238. size_t middle;
  239. KUNIT_ASSERT_LT(test, size1, size2);
  240. middle = size1 + (size2 - size1) / 2;
  241. ptr1 = kmalloc(size1, GFP_KERNEL);
  242. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
  243. ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
  244. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
  245. /* Suppress -Warray-bounds warnings. */
  246. OPTIMIZER_HIDE_VAR(ptr2);
  247. /* All offsets up to size2 must be accessible. */
  248. ptr2[size1 - 1] = 'x';
  249. ptr2[size1] = 'x';
  250. ptr2[middle] = 'x';
  251. ptr2[size2 - 1] = 'x';
  252. /* Generic mode is precise, so unaligned size2 must be inaccessible. */
  253. if (IS_ENABLED(CONFIG_KASAN_GENERIC))
  254. KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
  255. /* For all modes first aligned offset after size2 must be inaccessible. */
  256. KUNIT_EXPECT_KASAN_FAIL(test,
  257. ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
  258. kfree(ptr2);
  259. }
  260. static void krealloc_less_oob_helper(struct kunit *test,
  261. size_t size1, size_t size2)
  262. {
  263. char *ptr1, *ptr2;
  264. size_t middle;
  265. KUNIT_ASSERT_LT(test, size2, size1);
  266. middle = size2 + (size1 - size2) / 2;
  267. ptr1 = kmalloc(size1, GFP_KERNEL);
  268. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
  269. ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
  270. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
  271. /* Suppress -Warray-bounds warnings. */
  272. OPTIMIZER_HIDE_VAR(ptr2);
  273. /* Must be accessible for all modes. */
  274. ptr2[size2 - 1] = 'x';
  275. /* Generic mode is precise, so unaligned size2 must be inaccessible. */
  276. if (IS_ENABLED(CONFIG_KASAN_GENERIC))
  277. KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
  278. /* For all modes first aligned offset after size2 must be inaccessible. */
  279. KUNIT_EXPECT_KASAN_FAIL(test,
  280. ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
  281. /*
  282. * For all modes all size2, middle, and size1 should land in separate
  283. * granules and thus the latter two offsets should be inaccessible.
  284. */
  285. KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
  286. round_down(middle, KASAN_GRANULE_SIZE));
  287. KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
  288. round_down(size1, KASAN_GRANULE_SIZE));
  289. KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
  290. KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
  291. KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
  292. kfree(ptr2);
  293. }
  294. static void krealloc_more_oob(struct kunit *test)
  295. {
  296. krealloc_more_oob_helper(test, 201, 235);
  297. }
  298. static void krealloc_less_oob(struct kunit *test)
  299. {
  300. krealloc_less_oob_helper(test, 235, 201);
  301. }
  302. static void krealloc_pagealloc_more_oob(struct kunit *test)
  303. {
  304. /* page_alloc fallback in only implemented for SLUB. */
  305. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
  306. krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
  307. KMALLOC_MAX_CACHE_SIZE + 235);
  308. }
  309. static void krealloc_pagealloc_less_oob(struct kunit *test)
  310. {
  311. /* page_alloc fallback in only implemented for SLUB. */
  312. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
  313. krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
  314. KMALLOC_MAX_CACHE_SIZE + 201);
  315. }
  316. /*
  317. * Check that krealloc() detects a use-after-free, returns NULL,
  318. * and doesn't unpoison the freed object.
  319. */
  320. static void krealloc_uaf(struct kunit *test)
  321. {
  322. char *ptr1, *ptr2;
  323. int size1 = 201;
  324. int size2 = 235;
  325. ptr1 = kmalloc(size1, GFP_KERNEL);
  326. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
  327. kfree(ptr1);
  328. KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
  329. KUNIT_ASSERT_NULL(test, ptr2);
  330. KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
  331. }
  332. static void kmalloc_oob_16(struct kunit *test)
  333. {
  334. struct {
  335. u64 words[2];
  336. } *ptr1, *ptr2;
  337. /* This test is specifically crafted for the generic mode. */
  338. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
  339. ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
  340. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
  341. ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
  342. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
  343. OPTIMIZER_HIDE_VAR(ptr1);
  344. OPTIMIZER_HIDE_VAR(ptr2);
  345. KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
  346. kfree(ptr1);
  347. kfree(ptr2);
  348. }
  349. static void kmalloc_uaf_16(struct kunit *test)
  350. {
  351. struct {
  352. u64 words[2];
  353. } *ptr1, *ptr2;
  354. ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
  355. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
  356. ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
  357. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
  358. kfree(ptr2);
  359. KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
  360. kfree(ptr1);
  361. }
  362. /*
  363. * Note: in the memset tests below, the written range touches both valid and
  364. * invalid memory. This makes sure that the instrumentation does not only check
  365. * the starting address but the whole range.
  366. */
  367. static void kmalloc_oob_memset_2(struct kunit *test)
  368. {
  369. char *ptr;
  370. size_t size = 128 - KASAN_GRANULE_SIZE;
  371. ptr = kmalloc(size, GFP_KERNEL);
  372. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  373. OPTIMIZER_HIDE_VAR(size);
  374. KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, 2));
  375. kfree(ptr);
  376. }
  377. static void kmalloc_oob_memset_4(struct kunit *test)
  378. {
  379. char *ptr;
  380. size_t size = 128 - KASAN_GRANULE_SIZE;
  381. ptr = kmalloc(size, GFP_KERNEL);
  382. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  383. OPTIMIZER_HIDE_VAR(size);
  384. KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, 4));
  385. kfree(ptr);
  386. }
  387. static void kmalloc_oob_memset_8(struct kunit *test)
  388. {
  389. char *ptr;
  390. size_t size = 128 - KASAN_GRANULE_SIZE;
  391. ptr = kmalloc(size, GFP_KERNEL);
  392. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  393. OPTIMIZER_HIDE_VAR(size);
  394. KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, 8));
  395. kfree(ptr);
  396. }
  397. static void kmalloc_oob_memset_16(struct kunit *test)
  398. {
  399. char *ptr;
  400. size_t size = 128 - KASAN_GRANULE_SIZE;
  401. ptr = kmalloc(size, GFP_KERNEL);
  402. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  403. OPTIMIZER_HIDE_VAR(size);
  404. KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, 16));
  405. kfree(ptr);
  406. }
  407. static void kmalloc_oob_in_memset(struct kunit *test)
  408. {
  409. char *ptr;
  410. size_t size = 128 - KASAN_GRANULE_SIZE;
  411. ptr = kmalloc(size, GFP_KERNEL);
  412. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  413. OPTIMIZER_HIDE_VAR(ptr);
  414. OPTIMIZER_HIDE_VAR(size);
  415. KUNIT_EXPECT_KASAN_FAIL(test,
  416. memset(ptr, 0, size + KASAN_GRANULE_SIZE));
  417. kfree(ptr);
  418. }
  419. static void kmalloc_memmove_negative_size(struct kunit *test)
  420. {
  421. char *ptr;
  422. size_t size = 64;
  423. size_t invalid_size = -2;
  424. /*
  425. * Hardware tag-based mode doesn't check memmove for negative size.
  426. * As a result, this test introduces a side-effect memory corruption,
  427. * which can result in a crash.
  428. */
  429. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
  430. ptr = kmalloc(size, GFP_KERNEL);
  431. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  432. memset((char *)ptr, 0, 64);
  433. OPTIMIZER_HIDE_VAR(ptr);
  434. OPTIMIZER_HIDE_VAR(invalid_size);
  435. KUNIT_EXPECT_KASAN_FAIL(test,
  436. memmove((char *)ptr, (char *)ptr + 4, invalid_size));
  437. kfree(ptr);
  438. }
  439. static void kmalloc_memmove_invalid_size(struct kunit *test)
  440. {
  441. char *ptr;
  442. size_t size = 64;
  443. size_t invalid_size = size;
  444. ptr = kmalloc(size, GFP_KERNEL);
  445. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  446. memset((char *)ptr, 0, 64);
  447. OPTIMIZER_HIDE_VAR(ptr);
  448. OPTIMIZER_HIDE_VAR(invalid_size);
  449. KUNIT_EXPECT_KASAN_FAIL(test,
  450. memmove((char *)ptr, (char *)ptr + 4, invalid_size));
  451. kfree(ptr);
  452. }
  453. static void kmalloc_uaf(struct kunit *test)
  454. {
  455. char *ptr;
  456. size_t size = 10;
  457. ptr = kmalloc(size, GFP_KERNEL);
  458. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  459. kfree(ptr);
  460. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]);
  461. }
  462. static void kmalloc_uaf_memset(struct kunit *test)
  463. {
  464. char *ptr;
  465. size_t size = 33;
  466. /*
  467. * Only generic KASAN uses quarantine, which is required to avoid a
  468. * kernel memory corruption this test causes.
  469. */
  470. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
  471. ptr = kmalloc(size, GFP_KERNEL);
  472. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  473. kfree(ptr);
  474. KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
  475. }
  476. static void kmalloc_uaf2(struct kunit *test)
  477. {
  478. char *ptr1, *ptr2;
  479. size_t size = 43;
  480. int counter = 0;
  481. again:
  482. ptr1 = kmalloc(size, GFP_KERNEL);
  483. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
  484. kfree(ptr1);
  485. ptr2 = kmalloc(size, GFP_KERNEL);
  486. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
  487. /*
  488. * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
  489. * Allow up to 16 attempts at generating different tags.
  490. */
  491. if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
  492. kfree(ptr2);
  493. goto again;
  494. }
  495. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]);
  496. KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
  497. kfree(ptr2);
  498. }
  499. /*
  500. * Check that KASAN detects use-after-free when another object was allocated in
  501. * the same slot. Relevant for the tag-based modes, which do not use quarantine.
  502. */
  503. static void kmalloc_uaf3(struct kunit *test)
  504. {
  505. char *ptr1, *ptr2;
  506. size_t size = 100;
  507. /* This test is specifically crafted for tag-based modes. */
  508. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
  509. ptr1 = kmalloc(size, GFP_KERNEL);
  510. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
  511. kfree(ptr1);
  512. ptr2 = kmalloc(size, GFP_KERNEL);
  513. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
  514. kfree(ptr2);
  515. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]);
  516. }
  517. static void kfree_via_page(struct kunit *test)
  518. {
  519. char *ptr;
  520. size_t size = 8;
  521. struct page *page;
  522. unsigned long offset;
  523. ptr = kmalloc(size, GFP_KERNEL);
  524. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  525. page = virt_to_page(ptr);
  526. offset = offset_in_page(ptr);
  527. kfree(page_address(page) + offset);
  528. }
  529. static void kfree_via_phys(struct kunit *test)
  530. {
  531. char *ptr;
  532. size_t size = 8;
  533. phys_addr_t phys;
  534. ptr = kmalloc(size, GFP_KERNEL);
  535. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  536. phys = virt_to_phys(ptr);
  537. kfree(phys_to_virt(phys));
  538. }
  539. static void kmem_cache_oob(struct kunit *test)
  540. {
  541. char *p;
  542. size_t size = 200;
  543. struct kmem_cache *cache;
  544. cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
  545. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
  546. p = kmem_cache_alloc(cache, GFP_KERNEL);
  547. if (!p) {
  548. kunit_err(test, "Allocation failed: %s\n", __func__);
  549. kmem_cache_destroy(cache);
  550. return;
  551. }
  552. KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
  553. kmem_cache_free(cache, p);
  554. kmem_cache_destroy(cache);
  555. }
  556. static void kmem_cache_accounted(struct kunit *test)
  557. {
  558. int i;
  559. char *p;
  560. size_t size = 200;
  561. struct kmem_cache *cache;
  562. cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
  563. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
  564. /*
  565. * Several allocations with a delay to allow for lazy per memcg kmem
  566. * cache creation.
  567. */
  568. for (i = 0; i < 5; i++) {
  569. p = kmem_cache_alloc(cache, GFP_KERNEL);
  570. if (!p)
  571. goto free_cache;
  572. kmem_cache_free(cache, p);
  573. msleep(100);
  574. }
  575. free_cache:
  576. kmem_cache_destroy(cache);
  577. }
  578. static void kmem_cache_bulk(struct kunit *test)
  579. {
  580. struct kmem_cache *cache;
  581. size_t size = 200;
  582. char *p[10];
  583. bool ret;
  584. int i;
  585. cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
  586. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
  587. ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
  588. if (!ret) {
  589. kunit_err(test, "Allocation failed: %s\n", __func__);
  590. kmem_cache_destroy(cache);
  591. return;
  592. }
  593. for (i = 0; i < ARRAY_SIZE(p); i++)
  594. p[i][0] = p[i][size - 1] = 42;
  595. kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
  596. kmem_cache_destroy(cache);
  597. }
  598. static char global_array[10];
  599. static void kasan_global_oob_right(struct kunit *test)
  600. {
  601. /*
  602. * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
  603. * from failing here and panicking the kernel, access the array via a
  604. * volatile pointer, which will prevent the compiler from being able to
  605. * determine the array bounds.
  606. *
  607. * This access uses a volatile pointer to char (char *volatile) rather
  608. * than the more conventional pointer to volatile char (volatile char *)
  609. * because we want to prevent the compiler from making inferences about
  610. * the pointer itself (i.e. its array bounds), not the data that it
  611. * refers to.
  612. */
  613. char *volatile array = global_array;
  614. char *p = &array[ARRAY_SIZE(global_array) + 3];
  615. /* Only generic mode instruments globals. */
  616. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
  617. KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
  618. }
  619. static void kasan_global_oob_left(struct kunit *test)
  620. {
  621. char *volatile array = global_array;
  622. char *p = array - 3;
  623. /*
  624. * GCC is known to fail this test, skip it.
  625. * See https://bugzilla.kernel.org/show_bug.cgi?id=215051.
  626. */
  627. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_CC_IS_CLANG);
  628. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
  629. KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
  630. }
  631. /* Check that ksize() makes the whole object accessible. */
  632. static void ksize_unpoisons_memory(struct kunit *test)
  633. {
  634. char *ptr;
  635. size_t size = 123, real_size;
  636. ptr = kmalloc(size, GFP_KERNEL);
  637. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  638. real_size = ksize(ptr);
  639. OPTIMIZER_HIDE_VAR(ptr);
  640. /* This access shouldn't trigger a KASAN report. */
  641. ptr[size] = 'x';
  642. /* This one must. */
  643. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]);
  644. kfree(ptr);
  645. }
  646. /*
  647. * Check that a use-after-free is detected by ksize() and via normal accesses
  648. * after it.
  649. */
  650. static void ksize_uaf(struct kunit *test)
  651. {
  652. char *ptr;
  653. int size = 128 - KASAN_GRANULE_SIZE;
  654. ptr = kmalloc(size, GFP_KERNEL);
  655. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  656. kfree(ptr);
  657. OPTIMIZER_HIDE_VAR(ptr);
  658. KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
  659. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
  660. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
  661. }
  662. static void kasan_stack_oob(struct kunit *test)
  663. {
  664. char stack_array[10];
  665. /* See comment in kasan_global_oob_right. */
  666. char *volatile array = stack_array;
  667. char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
  668. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
  669. KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
  670. }
  671. static void kasan_alloca_oob_left(struct kunit *test)
  672. {
  673. volatile int i = 10;
  674. char alloca_array[i];
  675. /* See comment in kasan_global_oob_right. */
  676. char *volatile array = alloca_array;
  677. char *p = array - 1;
  678. /* Only generic mode instruments dynamic allocas. */
  679. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
  680. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
  681. KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
  682. }
  683. static void kasan_alloca_oob_right(struct kunit *test)
  684. {
  685. volatile int i = 10;
  686. char alloca_array[i];
  687. /* See comment in kasan_global_oob_right. */
  688. char *volatile array = alloca_array;
  689. char *p = array + i;
  690. /* Only generic mode instruments dynamic allocas. */
  691. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
  692. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
  693. KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
  694. }
  695. static void kmem_cache_double_free(struct kunit *test)
  696. {
  697. char *p;
  698. size_t size = 200;
  699. struct kmem_cache *cache;
  700. cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
  701. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
  702. p = kmem_cache_alloc(cache, GFP_KERNEL);
  703. if (!p) {
  704. kunit_err(test, "Allocation failed: %s\n", __func__);
  705. kmem_cache_destroy(cache);
  706. return;
  707. }
  708. kmem_cache_free(cache, p);
  709. KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
  710. kmem_cache_destroy(cache);
  711. }
  712. static void kmem_cache_invalid_free(struct kunit *test)
  713. {
  714. char *p;
  715. size_t size = 200;
  716. struct kmem_cache *cache;
  717. cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
  718. NULL);
  719. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
  720. p = kmem_cache_alloc(cache, GFP_KERNEL);
  721. if (!p) {
  722. kunit_err(test, "Allocation failed: %s\n", __func__);
  723. kmem_cache_destroy(cache);
  724. return;
  725. }
  726. /* Trigger invalid free, the object doesn't get freed. */
  727. KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
  728. /*
  729. * Properly free the object to prevent the "Objects remaining in
  730. * test_cache on __kmem_cache_shutdown" BUG failure.
  731. */
  732. kmem_cache_free(cache, p);
  733. kmem_cache_destroy(cache);
  734. }
  735. static void empty_cache_ctor(void *object) { }
  736. static void kmem_cache_double_destroy(struct kunit *test)
  737. {
  738. struct kmem_cache *cache;
  739. /* Provide a constructor to prevent cache merging. */
  740. cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor);
  741. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
  742. kmem_cache_destroy(cache);
  743. KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
  744. }
  745. static void kasan_memchr(struct kunit *test)
  746. {
  747. char *ptr;
  748. size_t size = 24;
  749. /*
  750. * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
  751. * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
  752. */
  753. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
  754. if (OOB_TAG_OFF)
  755. size = round_up(size, OOB_TAG_OFF);
  756. ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
  757. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  758. OPTIMIZER_HIDE_VAR(ptr);
  759. OPTIMIZER_HIDE_VAR(size);
  760. KUNIT_EXPECT_KASAN_FAIL(test,
  761. kasan_ptr_result = memchr(ptr, '1', size + 1));
  762. kfree(ptr);
  763. }
  764. static void kasan_memcmp(struct kunit *test)
  765. {
  766. char *ptr;
  767. size_t size = 24;
  768. int arr[9];
  769. /*
  770. * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
  771. * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
  772. */
  773. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
  774. if (OOB_TAG_OFF)
  775. size = round_up(size, OOB_TAG_OFF);
  776. ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
  777. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  778. memset(arr, 0, sizeof(arr));
  779. OPTIMIZER_HIDE_VAR(ptr);
  780. OPTIMIZER_HIDE_VAR(size);
  781. KUNIT_EXPECT_KASAN_FAIL(test,
  782. kasan_int_result = memcmp(ptr, arr, size+1));
  783. kfree(ptr);
  784. }
  785. static void kasan_strings(struct kunit *test)
  786. {
  787. char *ptr;
  788. size_t size = 24;
  789. /*
  790. * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
  791. * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
  792. */
  793. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
  794. ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
  795. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  796. kfree(ptr);
  797. /*
  798. * Try to cause only 1 invalid access (less spam in dmesg).
  799. * For that we need ptr to point to zeroed byte.
  800. * Skip metadata that could be stored in freed object so ptr
  801. * will likely point to zeroed byte.
  802. */
  803. ptr += 16;
  804. KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
  805. KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
  806. KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
  807. KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
  808. KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
  809. KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
  810. }
  811. static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
  812. {
  813. KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
  814. KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
  815. KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
  816. KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
  817. KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
  818. KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
  819. KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
  820. KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
  821. }
  822. static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
  823. {
  824. KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
  825. KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
  826. KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
  827. KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
  828. KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
  829. KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
  830. KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
  831. KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
  832. #if defined(clear_bit_unlock_is_negative_byte)
  833. KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
  834. clear_bit_unlock_is_negative_byte(nr, addr));
  835. #endif
  836. }
  837. static void kasan_bitops_generic(struct kunit *test)
  838. {
  839. long *bits;
  840. /* This test is specifically crafted for the generic mode. */
  841. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
  842. /*
  843. * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
  844. * this way we do not actually corrupt other memory.
  845. */
  846. bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
  847. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
  848. /*
  849. * Below calls try to access bit within allocated memory; however, the
  850. * below accesses are still out-of-bounds, since bitops are defined to
  851. * operate on the whole long the bit is in.
  852. */
  853. kasan_bitops_modify(test, BITS_PER_LONG, bits);
  854. /*
  855. * Below calls try to access bit beyond allocated memory.
  856. */
  857. kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
  858. kfree(bits);
  859. }
  860. static void kasan_bitops_tags(struct kunit *test)
  861. {
  862. long *bits;
  863. /* This test is specifically crafted for tag-based modes. */
  864. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
  865. /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
  866. bits = kzalloc(48, GFP_KERNEL);
  867. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
  868. /* Do the accesses past the 48 allocated bytes, but within the redone. */
  869. kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
  870. kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
  871. kfree(bits);
  872. }
  873. static void kmalloc_double_kzfree(struct kunit *test)
  874. {
  875. char *ptr;
  876. size_t size = 16;
  877. ptr = kmalloc(size, GFP_KERNEL);
  878. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  879. kfree_sensitive(ptr);
  880. KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
  881. }
  882. static void vmalloc_helpers_tags(struct kunit *test)
  883. {
  884. void *ptr;
  885. /* This test is intended for tag-based modes. */
  886. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
  887. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
  888. ptr = vmalloc(PAGE_SIZE);
  889. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  890. /* Check that the returned pointer is tagged. */
  891. KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
  892. KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
  893. /* Make sure exported vmalloc helpers handle tagged pointers. */
  894. KUNIT_ASSERT_TRUE(test, is_vmalloc_addr(ptr));
  895. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vmalloc_to_page(ptr));
  896. #if !IS_MODULE(CONFIG_KASAN_KUNIT_TEST)
  897. {
  898. int rv;
  899. /* Make sure vmalloc'ed memory permissions can be changed. */
  900. rv = set_memory_ro((unsigned long)ptr, 1);
  901. KUNIT_ASSERT_GE(test, rv, 0);
  902. rv = set_memory_rw((unsigned long)ptr, 1);
  903. KUNIT_ASSERT_GE(test, rv, 0);
  904. }
  905. #endif
  906. vfree(ptr);
  907. }
  908. static void vmalloc_oob(struct kunit *test)
  909. {
  910. char *v_ptr, *p_ptr;
  911. struct page *page;
  912. size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5;
  913. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
  914. v_ptr = vmalloc(size);
  915. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
  916. OPTIMIZER_HIDE_VAR(v_ptr);
  917. /*
  918. * We have to be careful not to hit the guard page in vmalloc tests.
  919. * The MMU will catch that and crash us.
  920. */
  921. /* Make sure in-bounds accesses are valid. */
  922. v_ptr[0] = 0;
  923. v_ptr[size - 1] = 0;
  924. /*
  925. * An unaligned access past the requested vmalloc size.
  926. * Only generic KASAN can precisely detect these.
  927. */
  928. if (IS_ENABLED(CONFIG_KASAN_GENERIC))
  929. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]);
  930. /* An aligned access into the first out-of-bounds granule. */
  931. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size + 5]);
  932. /* Check that in-bounds accesses to the physical page are valid. */
  933. page = vmalloc_to_page(v_ptr);
  934. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
  935. p_ptr = page_address(page);
  936. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
  937. p_ptr[0] = 0;
  938. vfree(v_ptr);
  939. /*
  940. * We can't check for use-after-unmap bugs in this nor in the following
  941. * vmalloc tests, as the page might be fully unmapped and accessing it
  942. * will crash the kernel.
  943. */
  944. }
  945. static void vmap_tags(struct kunit *test)
  946. {
  947. char *p_ptr, *v_ptr;
  948. struct page *p_page, *v_page;
  949. /*
  950. * This test is specifically crafted for the software tag-based mode,
  951. * the only tag-based mode that poisons vmap mappings.
  952. */
  953. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
  954. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
  955. p_page = alloc_pages(GFP_KERNEL, 1);
  956. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page);
  957. p_ptr = page_address(p_page);
  958. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
  959. v_ptr = vmap(&p_page, 1, VM_MAP, PAGE_KERNEL);
  960. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
  961. /*
  962. * We can't check for out-of-bounds bugs in this nor in the following
  963. * vmalloc tests, as allocations have page granularity and accessing
  964. * the guard page will crash the kernel.
  965. */
  966. KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
  967. KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
  968. /* Make sure that in-bounds accesses through both pointers work. */
  969. *p_ptr = 0;
  970. *v_ptr = 0;
  971. /* Make sure vmalloc_to_page() correctly recovers the page pointer. */
  972. v_page = vmalloc_to_page(v_ptr);
  973. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_page);
  974. KUNIT_EXPECT_PTR_EQ(test, p_page, v_page);
  975. vunmap(v_ptr);
  976. free_pages((unsigned long)p_ptr, 1);
  977. }
  978. static void vm_map_ram_tags(struct kunit *test)
  979. {
  980. char *p_ptr, *v_ptr;
  981. struct page *page;
  982. /*
  983. * This test is specifically crafted for the software tag-based mode,
  984. * the only tag-based mode that poisons vm_map_ram mappings.
  985. */
  986. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
  987. page = alloc_pages(GFP_KERNEL, 1);
  988. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
  989. p_ptr = page_address(page);
  990. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
  991. v_ptr = vm_map_ram(&page, 1, -1);
  992. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
  993. KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
  994. KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
  995. /* Make sure that in-bounds accesses through both pointers work. */
  996. *p_ptr = 0;
  997. *v_ptr = 0;
  998. vm_unmap_ram(v_ptr, 1);
  999. free_pages((unsigned long)p_ptr, 1);
  1000. }
  1001. static void vmalloc_percpu(struct kunit *test)
  1002. {
  1003. char __percpu *ptr;
  1004. int cpu;
  1005. /*
  1006. * This test is specifically crafted for the software tag-based mode,
  1007. * the only tag-based mode that poisons percpu mappings.
  1008. */
  1009. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
  1010. ptr = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
  1011. for_each_possible_cpu(cpu) {
  1012. char *c_ptr = per_cpu_ptr(ptr, cpu);
  1013. KUNIT_EXPECT_GE(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_MIN);
  1014. KUNIT_EXPECT_LT(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_KERNEL);
  1015. /* Make sure that in-bounds accesses don't crash the kernel. */
  1016. *c_ptr = 0;
  1017. }
  1018. free_percpu(ptr);
  1019. }
  1020. /*
  1021. * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
  1022. * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
  1023. * modes.
  1024. */
  1025. static void match_all_not_assigned(struct kunit *test)
  1026. {
  1027. char *ptr;
  1028. struct page *pages;
  1029. int i, size, order;
  1030. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
  1031. for (i = 0; i < 256; i++) {
  1032. size = prandom_u32_max(1024) + 1;
  1033. ptr = kmalloc(size, GFP_KERNEL);
  1034. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  1035. KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
  1036. KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
  1037. kfree(ptr);
  1038. }
  1039. for (i = 0; i < 256; i++) {
  1040. order = prandom_u32_max(4) + 1;
  1041. pages = alloc_pages(GFP_KERNEL, order);
  1042. ptr = page_address(pages);
  1043. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  1044. KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
  1045. KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
  1046. free_pages((unsigned long)ptr, order);
  1047. }
  1048. if (!IS_ENABLED(CONFIG_KASAN_VMALLOC))
  1049. return;
  1050. for (i = 0; i < 256; i++) {
  1051. size = prandom_u32_max(1024) + 1;
  1052. ptr = vmalloc(size);
  1053. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  1054. KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
  1055. KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
  1056. vfree(ptr);
  1057. }
  1058. }
  1059. /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
  1060. static void match_all_ptr_tag(struct kunit *test)
  1061. {
  1062. char *ptr;
  1063. u8 tag;
  1064. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
  1065. ptr = kmalloc(128, GFP_KERNEL);
  1066. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  1067. /* Backup the assigned tag. */
  1068. tag = get_tag(ptr);
  1069. KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
  1070. /* Reset the tag to 0xff.*/
  1071. ptr = set_tag(ptr, KASAN_TAG_KERNEL);
  1072. /* This access shouldn't trigger a KASAN report. */
  1073. *ptr = 0;
  1074. /* Recover the pointer tag and free. */
  1075. ptr = set_tag(ptr, tag);
  1076. kfree(ptr);
  1077. }
  1078. /* Check that there are no match-all memory tags for tag-based modes. */
  1079. static void match_all_mem_tag(struct kunit *test)
  1080. {
  1081. char *ptr;
  1082. int tag;
  1083. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
  1084. ptr = kmalloc(128, GFP_KERNEL);
  1085. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  1086. KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
  1087. /* For each possible tag value not matching the pointer tag. */
  1088. for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
  1089. if (tag == get_tag(ptr))
  1090. continue;
  1091. /* Mark the first memory granule with the chosen memory tag. */
  1092. kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
  1093. /* This access must cause a KASAN report. */
  1094. KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
  1095. }
  1096. /* Recover the memory tag and free. */
  1097. kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
  1098. kfree(ptr);
  1099. }
  1100. static struct kunit_case kasan_kunit_test_cases[] = {
  1101. KUNIT_CASE(kmalloc_oob_right),
  1102. KUNIT_CASE(kmalloc_oob_left),
  1103. KUNIT_CASE(kmalloc_node_oob_right),
  1104. KUNIT_CASE(kmalloc_pagealloc_oob_right),
  1105. KUNIT_CASE(kmalloc_pagealloc_uaf),
  1106. KUNIT_CASE(kmalloc_pagealloc_invalid_free),
  1107. KUNIT_CASE(pagealloc_oob_right),
  1108. KUNIT_CASE(pagealloc_uaf),
  1109. KUNIT_CASE(kmalloc_large_oob_right),
  1110. KUNIT_CASE(krealloc_more_oob),
  1111. KUNIT_CASE(krealloc_less_oob),
  1112. KUNIT_CASE(krealloc_pagealloc_more_oob),
  1113. KUNIT_CASE(krealloc_pagealloc_less_oob),
  1114. KUNIT_CASE(krealloc_uaf),
  1115. KUNIT_CASE(kmalloc_oob_16),
  1116. KUNIT_CASE(kmalloc_uaf_16),
  1117. KUNIT_CASE(kmalloc_oob_in_memset),
  1118. KUNIT_CASE(kmalloc_oob_memset_2),
  1119. KUNIT_CASE(kmalloc_oob_memset_4),
  1120. KUNIT_CASE(kmalloc_oob_memset_8),
  1121. KUNIT_CASE(kmalloc_oob_memset_16),
  1122. KUNIT_CASE(kmalloc_memmove_negative_size),
  1123. KUNIT_CASE(kmalloc_memmove_invalid_size),
  1124. KUNIT_CASE(kmalloc_uaf),
  1125. KUNIT_CASE(kmalloc_uaf_memset),
  1126. KUNIT_CASE(kmalloc_uaf2),
  1127. KUNIT_CASE(kmalloc_uaf3),
  1128. KUNIT_CASE(kfree_via_page),
  1129. KUNIT_CASE(kfree_via_phys),
  1130. KUNIT_CASE(kmem_cache_oob),
  1131. KUNIT_CASE(kmem_cache_accounted),
  1132. KUNIT_CASE(kmem_cache_bulk),
  1133. KUNIT_CASE(kasan_global_oob_right),
  1134. KUNIT_CASE(kasan_global_oob_left),
  1135. KUNIT_CASE(kasan_stack_oob),
  1136. KUNIT_CASE(kasan_alloca_oob_left),
  1137. KUNIT_CASE(kasan_alloca_oob_right),
  1138. KUNIT_CASE(ksize_unpoisons_memory),
  1139. KUNIT_CASE(ksize_uaf),
  1140. KUNIT_CASE(kmem_cache_double_free),
  1141. KUNIT_CASE(kmem_cache_invalid_free),
  1142. KUNIT_CASE(kmem_cache_double_destroy),
  1143. KUNIT_CASE(kasan_memchr),
  1144. KUNIT_CASE(kasan_memcmp),
  1145. KUNIT_CASE(kasan_strings),
  1146. KUNIT_CASE(kasan_bitops_generic),
  1147. KUNIT_CASE(kasan_bitops_tags),
  1148. KUNIT_CASE(kmalloc_double_kzfree),
  1149. KUNIT_CASE(vmalloc_helpers_tags),
  1150. KUNIT_CASE(vmalloc_oob),
  1151. KUNIT_CASE(vmap_tags),
  1152. KUNIT_CASE(vm_map_ram_tags),
  1153. KUNIT_CASE(vmalloc_percpu),
  1154. KUNIT_CASE(match_all_not_assigned),
  1155. KUNIT_CASE(match_all_ptr_tag),
  1156. KUNIT_CASE(match_all_mem_tag),
  1157. {}
  1158. };
  1159. static struct kunit_suite kasan_kunit_test_suite = {
  1160. .name = "kasan",
  1161. .init = kasan_test_init,
  1162. .test_cases = kasan_kunit_test_cases,
  1163. .exit = kasan_test_exit,
  1164. };
  1165. kunit_test_suite(kasan_kunit_test_suite);
  1166. MODULE_LICENSE("GPL");