bugs.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * This is for all the tests related to logic bugs (e.g. bad dereferences,
  4. * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
  5. * lockups) along with other things that don't fit well into existing LKDTM
  6. * test source files.
  7. */
  8. #include "lkdtm.h"
  9. #include <linux/list.h>
  10. #include <linux/sched.h>
  11. #include <linux/sched/signal.h>
  12. #include <linux/sched/task_stack.h>
  13. #include <linux/uaccess.h>
  14. #include <linux/slab.h>
  15. #if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
  16. #include <asm/desc.h>
  17. #endif
  18. struct lkdtm_list {
  19. struct list_head node;
  20. };
  21. /*
  22. * Make sure our attempts to over run the kernel stack doesn't trigger
  23. * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
  24. * recurse past the end of THREAD_SIZE by default.
  25. */
  26. #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
  27. #define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
  28. #else
  29. #define REC_STACK_SIZE (THREAD_SIZE / 8UL)
  30. #endif
  31. #define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
  32. static int recur_count = REC_NUM_DEFAULT;
  33. static DEFINE_SPINLOCK(lock_me_up);
  34. /*
  35. * Make sure compiler does not optimize this function or stack frame away:
  36. * - function marked noinline
  37. * - stack variables are marked volatile
  38. * - stack variables are written (memset()) and read (buf[..] passed as arg)
  39. * - function may have external effects (memzero_explicit())
  40. * - no tail recursion possible
  41. */
  42. static int noinline recursive_loop(int remaining)
  43. {
  44. volatile char buf[REC_STACK_SIZE];
  45. volatile int ret;
  46. memset((void *)buf, remaining & 0xFF, sizeof(buf));
  47. if (!remaining)
  48. ret = 0;
  49. else
  50. ret = recursive_loop((int)buf[remaining % sizeof(buf)] - 1);
  51. memzero_explicit((void *)buf, sizeof(buf));
  52. return ret;
  53. }
  54. /* If the depth is negative, use the default, otherwise keep parameter. */
  55. void __init lkdtm_bugs_init(int *recur_param)
  56. {
  57. if (*recur_param < 0)
  58. *recur_param = recur_count;
  59. else
  60. recur_count = *recur_param;
  61. }
  62. static void lkdtm_PANIC(void)
  63. {
  64. panic("dumptest");
  65. }
  66. static void lkdtm_BUG(void)
  67. {
  68. BUG();
  69. }
  70. static int warn_counter;
  71. static void lkdtm_WARNING(void)
  72. {
  73. WARN_ON(++warn_counter);
  74. }
  75. static void lkdtm_WARNING_MESSAGE(void)
  76. {
  77. WARN(1, "Warning message trigger count: %d\n", ++warn_counter);
  78. }
  79. static void lkdtm_EXCEPTION(void)
  80. {
  81. *((volatile int *) 0) = 0;
  82. }
  83. static void lkdtm_LOOP(void)
  84. {
  85. for (;;)
  86. ;
  87. }
  88. static void lkdtm_EXHAUST_STACK(void)
  89. {
  90. pr_info("Calling function with %lu frame size to depth %d ...\n",
  91. REC_STACK_SIZE, recur_count);
  92. recursive_loop(recur_count);
  93. pr_info("FAIL: survived without exhausting stack?!\n");
  94. }
  95. static noinline void __lkdtm_CORRUPT_STACK(void *stack)
  96. {
  97. memset(stack, '\xff', 64);
  98. }
  99. /* This should trip the stack canary, not corrupt the return address. */
  100. static noinline void lkdtm_CORRUPT_STACK(void)
  101. {
  102. /* Use default char array length that triggers stack protection. */
  103. char data[8] __aligned(sizeof(void *));
  104. pr_info("Corrupting stack containing char array ...\n");
  105. __lkdtm_CORRUPT_STACK((void *)&data);
  106. }
  107. /* Same as above but will only get a canary with -fstack-protector-strong */
  108. static noinline void lkdtm_CORRUPT_STACK_STRONG(void)
  109. {
  110. union {
  111. unsigned short shorts[4];
  112. unsigned long *ptr;
  113. } data __aligned(sizeof(void *));
  114. pr_info("Corrupting stack containing union ...\n");
  115. __lkdtm_CORRUPT_STACK((void *)&data);
  116. }
  117. static pid_t stack_pid;
  118. static unsigned long stack_addr;
  119. static void lkdtm_REPORT_STACK(void)
  120. {
  121. volatile uintptr_t magic;
  122. pid_t pid = task_pid_nr(current);
  123. if (pid != stack_pid) {
  124. pr_info("Starting stack offset tracking for pid %d\n", pid);
  125. stack_pid = pid;
  126. stack_addr = (uintptr_t)&magic;
  127. }
  128. pr_info("Stack offset: %d\n", (int)(stack_addr - (uintptr_t)&magic));
  129. }
  130. static pid_t stack_canary_pid;
  131. static unsigned long stack_canary;
  132. static unsigned long stack_canary_offset;
  133. static noinline void __lkdtm_REPORT_STACK_CANARY(void *stack)
  134. {
  135. int i = 0;
  136. pid_t pid = task_pid_nr(current);
  137. unsigned long *canary = (unsigned long *)stack;
  138. unsigned long current_offset = 0, init_offset = 0;
  139. /* Do our best to find the canary in a 16 word window ... */
  140. for (i = 1; i < 16; i++) {
  141. canary = (unsigned long *)stack + i;
  142. #ifdef CONFIG_STACKPROTECTOR
  143. if (*canary == current->stack_canary)
  144. current_offset = i;
  145. if (*canary == init_task.stack_canary)
  146. init_offset = i;
  147. #endif
  148. }
  149. if (current_offset == 0) {
  150. /*
  151. * If the canary doesn't match what's in the task_struct,
  152. * we're either using a global canary or the stack frame
  153. * layout changed.
  154. */
  155. if (init_offset != 0) {
  156. pr_err("FAIL: global stack canary found at offset %ld (canary for pid %d matches init_task's)!\n",
  157. init_offset, pid);
  158. } else {
  159. pr_warn("FAIL: did not correctly locate stack canary :(\n");
  160. pr_expected_config(CONFIG_STACKPROTECTOR);
  161. }
  162. return;
  163. } else if (init_offset != 0) {
  164. pr_warn("WARNING: found both current and init_task canaries nearby?!\n");
  165. }
  166. canary = (unsigned long *)stack + current_offset;
  167. if (stack_canary_pid == 0) {
  168. stack_canary = *canary;
  169. stack_canary_pid = pid;
  170. stack_canary_offset = current_offset;
  171. pr_info("Recorded stack canary for pid %d at offset %ld\n",
  172. stack_canary_pid, stack_canary_offset);
  173. } else if (pid == stack_canary_pid) {
  174. pr_warn("ERROR: saw pid %d again -- please use a new pid\n", pid);
  175. } else {
  176. if (current_offset != stack_canary_offset) {
  177. pr_warn("ERROR: canary offset changed from %ld to %ld!?\n",
  178. stack_canary_offset, current_offset);
  179. return;
  180. }
  181. if (*canary == stack_canary) {
  182. pr_warn("FAIL: canary identical for pid %d and pid %d at offset %ld!\n",
  183. stack_canary_pid, pid, current_offset);
  184. } else {
  185. pr_info("ok: stack canaries differ between pid %d and pid %d at offset %ld.\n",
  186. stack_canary_pid, pid, current_offset);
  187. /* Reset the test. */
  188. stack_canary_pid = 0;
  189. }
  190. }
  191. }
  192. static void lkdtm_REPORT_STACK_CANARY(void)
  193. {
  194. /* Use default char array length that triggers stack protection. */
  195. char data[8] __aligned(sizeof(void *)) = { };
  196. __lkdtm_REPORT_STACK_CANARY((void *)&data);
  197. }
  198. static void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
  199. {
  200. static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
  201. u32 *p;
  202. u32 val = 0x12345678;
  203. p = (u32 *)(data + 1);
  204. if (*p == 0)
  205. val = 0x87654321;
  206. *p = val;
  207. if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
  208. pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n");
  209. }
  210. static void lkdtm_SOFTLOCKUP(void)
  211. {
  212. preempt_disable();
  213. for (;;)
  214. cpu_relax();
  215. }
  216. static void lkdtm_HARDLOCKUP(void)
  217. {
  218. local_irq_disable();
  219. for (;;)
  220. cpu_relax();
  221. }
  222. static void lkdtm_SPINLOCKUP(void)
  223. {
  224. /* Must be called twice to trigger. */
  225. spin_lock(&lock_me_up);
  226. /* Let sparse know we intended to exit holding the lock. */
  227. __release(&lock_me_up);
  228. }
  229. static void lkdtm_HUNG_TASK(void)
  230. {
  231. set_current_state(TASK_UNINTERRUPTIBLE);
  232. schedule();
  233. }
  234. volatile unsigned int huge = INT_MAX - 2;
  235. volatile unsigned int ignored;
  236. static void lkdtm_OVERFLOW_SIGNED(void)
  237. {
  238. int value;
  239. value = huge;
  240. pr_info("Normal signed addition ...\n");
  241. value += 1;
  242. ignored = value;
  243. pr_info("Overflowing signed addition ...\n");
  244. value += 4;
  245. ignored = value;
  246. }
  247. static void lkdtm_OVERFLOW_UNSIGNED(void)
  248. {
  249. unsigned int value;
  250. value = huge;
  251. pr_info("Normal unsigned addition ...\n");
  252. value += 1;
  253. ignored = value;
  254. pr_info("Overflowing unsigned addition ...\n");
  255. value += 4;
  256. ignored = value;
  257. }
  258. /* Intentionally using old-style flex array definition of 1 byte. */
  259. struct array_bounds_flex_array {
  260. int one;
  261. int two;
  262. char data[1];
  263. };
  264. struct array_bounds {
  265. int one;
  266. int two;
  267. char data[8];
  268. int three;
  269. };
  270. static void lkdtm_ARRAY_BOUNDS(void)
  271. {
  272. struct array_bounds_flex_array *not_checked;
  273. struct array_bounds *checked;
  274. volatile int i;
  275. not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
  276. checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
  277. if (!not_checked || !checked) {
  278. kfree(not_checked);
  279. kfree(checked);
  280. return;
  281. }
  282. pr_info("Array access within bounds ...\n");
  283. /* For both, touch all bytes in the actual member size. */
  284. for (i = 0; i < sizeof(checked->data); i++)
  285. checked->data[i] = 'A';
  286. /*
  287. * For the uninstrumented flex array member, also touch 1 byte
  288. * beyond to verify it is correctly uninstrumented.
  289. */
  290. for (i = 0; i < sizeof(not_checked->data) + 1; i++)
  291. not_checked->data[i] = 'A';
  292. pr_info("Array access beyond bounds ...\n");
  293. for (i = 0; i < sizeof(checked->data) + 1; i++)
  294. checked->data[i] = 'B';
  295. kfree(not_checked);
  296. kfree(checked);
  297. pr_err("FAIL: survived array bounds overflow!\n");
  298. if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
  299. pr_expected_config(CONFIG_UBSAN_TRAP);
  300. else
  301. pr_expected_config(CONFIG_UBSAN_BOUNDS);
  302. }
  303. static void lkdtm_CORRUPT_LIST_ADD(void)
  304. {
  305. /*
  306. * Initially, an empty list via LIST_HEAD:
  307. * test_head.next = &test_head
  308. * test_head.prev = &test_head
  309. */
  310. LIST_HEAD(test_head);
  311. struct lkdtm_list good, bad;
  312. void *target[2] = { };
  313. void *redirection = &target;
  314. pr_info("attempting good list addition\n");
  315. /*
  316. * Adding to the list performs these actions:
  317. * test_head.next->prev = &good.node
  318. * good.node.next = test_head.next
  319. * good.node.prev = test_head
  320. * test_head.next = good.node
  321. */
  322. list_add(&good.node, &test_head);
  323. pr_info("attempting corrupted list addition\n");
  324. /*
  325. * In simulating this "write what where" primitive, the "what" is
  326. * the address of &bad.node, and the "where" is the address held
  327. * by "redirection".
  328. */
  329. test_head.next = redirection;
  330. list_add(&bad.node, &test_head);
  331. if (target[0] == NULL && target[1] == NULL)
  332. pr_err("Overwrite did not happen, but no BUG?!\n");
  333. else {
  334. pr_err("list_add() corruption not detected!\n");
  335. pr_expected_config(CONFIG_DEBUG_LIST);
  336. }
  337. }
  338. static void lkdtm_CORRUPT_LIST_DEL(void)
  339. {
  340. LIST_HEAD(test_head);
  341. struct lkdtm_list item;
  342. void *target[2] = { };
  343. void *redirection = &target;
  344. list_add(&item.node, &test_head);
  345. pr_info("attempting good list removal\n");
  346. list_del(&item.node);
  347. pr_info("attempting corrupted list removal\n");
  348. list_add(&item.node, &test_head);
  349. /* As with the list_add() test above, this corrupts "next". */
  350. item.node.next = redirection;
  351. list_del(&item.node);
  352. if (target[0] == NULL && target[1] == NULL)
  353. pr_err("Overwrite did not happen, but no BUG?!\n");
  354. else {
  355. pr_err("list_del() corruption not detected!\n");
  356. pr_expected_config(CONFIG_DEBUG_LIST);
  357. }
  358. }
  359. /* Test that VMAP_STACK is actually allocating with a leading guard page */
  360. static void lkdtm_STACK_GUARD_PAGE_LEADING(void)
  361. {
  362. const unsigned char *stack = task_stack_page(current);
  363. const unsigned char *ptr = stack - 1;
  364. volatile unsigned char byte;
  365. pr_info("attempting bad read from page below current stack\n");
  366. byte = *ptr;
  367. pr_err("FAIL: accessed page before stack! (byte: %x)\n", byte);
  368. }
  369. /* Test that VMAP_STACK is actually allocating with a trailing guard page */
  370. static void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
  371. {
  372. const unsigned char *stack = task_stack_page(current);
  373. const unsigned char *ptr = stack + THREAD_SIZE;
  374. volatile unsigned char byte;
  375. pr_info("attempting bad read from page above current stack\n");
  376. byte = *ptr;
  377. pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte);
  378. }
  379. static void lkdtm_UNSET_SMEP(void)
  380. {
  381. #if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
  382. #define MOV_CR4_DEPTH 64
  383. void (*direct_write_cr4)(unsigned long val);
  384. unsigned char *insn;
  385. unsigned long cr4;
  386. int i;
  387. cr4 = native_read_cr4();
  388. if ((cr4 & X86_CR4_SMEP) != X86_CR4_SMEP) {
  389. pr_err("FAIL: SMEP not in use\n");
  390. return;
  391. }
  392. cr4 &= ~(X86_CR4_SMEP);
  393. pr_info("trying to clear SMEP normally\n");
  394. native_write_cr4(cr4);
  395. if (cr4 == native_read_cr4()) {
  396. pr_err("FAIL: pinning SMEP failed!\n");
  397. cr4 |= X86_CR4_SMEP;
  398. pr_info("restoring SMEP\n");
  399. native_write_cr4(cr4);
  400. return;
  401. }
  402. pr_info("ok: SMEP did not get cleared\n");
  403. /*
  404. * To test the post-write pinning verification we need to call
  405. * directly into the middle of native_write_cr4() where the
  406. * cr4 write happens, skipping any pinning. This searches for
  407. * the cr4 writing instruction.
  408. */
  409. insn = (unsigned char *)native_write_cr4;
  410. for (i = 0; i < MOV_CR4_DEPTH; i++) {
  411. /* mov %rdi, %cr4 */
  412. if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7)
  413. break;
  414. /* mov %rdi,%rax; mov %rax, %cr4 */
  415. if (insn[i] == 0x48 && insn[i+1] == 0x89 &&
  416. insn[i+2] == 0xf8 && insn[i+3] == 0x0f &&
  417. insn[i+4] == 0x22 && insn[i+5] == 0xe0)
  418. break;
  419. }
  420. if (i >= MOV_CR4_DEPTH) {
  421. pr_info("ok: cannot locate cr4 writing call gadget\n");
  422. return;
  423. }
  424. direct_write_cr4 = (void *)(insn + i);
  425. pr_info("trying to clear SMEP with call gadget\n");
  426. direct_write_cr4(cr4);
  427. if (native_read_cr4() & X86_CR4_SMEP) {
  428. pr_info("ok: SMEP removal was reverted\n");
  429. } else {
  430. pr_err("FAIL: cleared SMEP not detected!\n");
  431. cr4 |= X86_CR4_SMEP;
  432. pr_info("restoring SMEP\n");
  433. native_write_cr4(cr4);
  434. }
  435. #else
  436. pr_err("XFAIL: this test is x86_64-only\n");
  437. #endif
  438. }
  439. static void lkdtm_DOUBLE_FAULT(void)
  440. {
  441. #if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
  442. /*
  443. * Trigger #DF by setting the stack limit to zero. This clobbers
  444. * a GDT TLS slot, which is okay because the current task will die
  445. * anyway due to the double fault.
  446. */
  447. struct desc_struct d = {
  448. .type = 3, /* expand-up, writable, accessed data */
  449. .p = 1, /* present */
  450. .d = 1, /* 32-bit */
  451. .g = 0, /* limit in bytes */
  452. .s = 1, /* not system */
  453. };
  454. local_irq_disable();
  455. write_gdt_entry(get_cpu_gdt_rw(smp_processor_id()),
  456. GDT_ENTRY_TLS_MIN, &d, DESCTYPE_S);
  457. /*
  458. * Put our zero-limit segment in SS and then trigger a fault. The
  459. * 4-byte access to (%esp) will fault with #SS, and the attempt to
  460. * deliver the fault will recursively cause #SS and result in #DF.
  461. * This whole process happens while NMIs and MCEs are blocked by the
  462. * MOV SS window. This is nice because an NMI with an invalid SS
  463. * would also double-fault, resulting in the NMI or MCE being lost.
  464. */
  465. asm volatile ("movw %0, %%ss; addl $0, (%%esp)" ::
  466. "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3)));
  467. pr_err("FAIL: tried to double fault but didn't die\n");
  468. #else
  469. pr_err("XFAIL: this test is ia32-only\n");
  470. #endif
  471. }
  472. #ifdef CONFIG_ARM64
  473. static noinline void change_pac_parameters(void)
  474. {
  475. if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) {
  476. /* Reset the keys of current task */
  477. ptrauth_thread_init_kernel(current);
  478. ptrauth_thread_switch_kernel(current);
  479. }
  480. }
  481. #endif
  482. static noinline void lkdtm_CORRUPT_PAC(void)
  483. {
  484. #ifdef CONFIG_ARM64
  485. #define CORRUPT_PAC_ITERATE 10
  486. int i;
  487. if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
  488. pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH_KERNEL\n");
  489. if (!system_supports_address_auth()) {
  490. pr_err("FAIL: CPU lacks pointer authentication feature\n");
  491. return;
  492. }
  493. pr_info("changing PAC parameters to force function return failure...\n");
  494. /*
  495. * PAC is a hash value computed from input keys, return address and
  496. * stack pointer. As pac has fewer bits so there is a chance of
  497. * collision, so iterate few times to reduce the collision probability.
  498. */
  499. for (i = 0; i < CORRUPT_PAC_ITERATE; i++)
  500. change_pac_parameters();
  501. pr_err("FAIL: survived PAC changes! Kernel may be unstable from here\n");
  502. #else
  503. pr_err("XFAIL: this test is arm64-only\n");
  504. #endif
  505. }
  506. static struct crashtype crashtypes[] = {
  507. CRASHTYPE(PANIC),
  508. CRASHTYPE(BUG),
  509. CRASHTYPE(WARNING),
  510. CRASHTYPE(WARNING_MESSAGE),
  511. CRASHTYPE(EXCEPTION),
  512. CRASHTYPE(LOOP),
  513. CRASHTYPE(EXHAUST_STACK),
  514. CRASHTYPE(CORRUPT_STACK),
  515. CRASHTYPE(CORRUPT_STACK_STRONG),
  516. CRASHTYPE(REPORT_STACK),
  517. CRASHTYPE(REPORT_STACK_CANARY),
  518. CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
  519. CRASHTYPE(SOFTLOCKUP),
  520. CRASHTYPE(HARDLOCKUP),
  521. CRASHTYPE(SPINLOCKUP),
  522. CRASHTYPE(HUNG_TASK),
  523. CRASHTYPE(OVERFLOW_SIGNED),
  524. CRASHTYPE(OVERFLOW_UNSIGNED),
  525. CRASHTYPE(ARRAY_BOUNDS),
  526. CRASHTYPE(CORRUPT_LIST_ADD),
  527. CRASHTYPE(CORRUPT_LIST_DEL),
  528. CRASHTYPE(STACK_GUARD_PAGE_LEADING),
  529. CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
  530. CRASHTYPE(UNSET_SMEP),
  531. CRASHTYPE(DOUBLE_FAULT),
  532. CRASHTYPE(CORRUPT_PAC),
  533. };
  534. struct crashtype_category bugs_crashtypes = {
  535. .crashtypes = crashtypes,
  536. .len = ARRAY_SIZE(crashtypes),
  537. };