stackleak.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * This code fills the used part of the kernel stack with a poison value
  4. * before returning to userspace. It's part of the STACKLEAK feature
  5. * ported from grsecurity/PaX.
  6. *
  7. * Author: Alexander Popov <[email protected]>
  8. *
  9. * STACKLEAK reduces the information which kernel stack leak bugs can
  10. * reveal and blocks some uninitialized stack variable attacks.
  11. */
  12. #include <linux/stackleak.h>
  13. #include <linux/kprobes.h>
  14. #ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
  15. #include <linux/jump_label.h>
  16. #include <linux/sysctl.h>
  17. #include <linux/init.h>
  18. static DEFINE_STATIC_KEY_FALSE(stack_erasing_bypass);
  19. #ifdef CONFIG_SYSCTL
  20. static int stack_erasing_sysctl(struct ctl_table *table, int write,
  21. void __user *buffer, size_t *lenp, loff_t *ppos)
  22. {
  23. int ret = 0;
  24. int state = !static_branch_unlikely(&stack_erasing_bypass);
  25. int prev_state = state;
  26. table->data = &state;
  27. table->maxlen = sizeof(int);
  28. ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  29. state = !!state;
  30. if (ret || !write || state == prev_state)
  31. return ret;
  32. if (state)
  33. static_branch_disable(&stack_erasing_bypass);
  34. else
  35. static_branch_enable(&stack_erasing_bypass);
  36. pr_warn("stackleak: kernel stack erasing is %s\n",
  37. state ? "enabled" : "disabled");
  38. return ret;
  39. }
  40. static struct ctl_table stackleak_sysctls[] = {
  41. {
  42. .procname = "stack_erasing",
  43. .data = NULL,
  44. .maxlen = sizeof(int),
  45. .mode = 0600,
  46. .proc_handler = stack_erasing_sysctl,
  47. .extra1 = SYSCTL_ZERO,
  48. .extra2 = SYSCTL_ONE,
  49. },
  50. {}
  51. };
  52. static int __init stackleak_sysctls_init(void)
  53. {
  54. register_sysctl_init("kernel", stackleak_sysctls);
  55. return 0;
  56. }
  57. late_initcall(stackleak_sysctls_init);
  58. #endif /* CONFIG_SYSCTL */
  59. #define skip_erasing() static_branch_unlikely(&stack_erasing_bypass)
  60. #else
  61. #define skip_erasing() false
  62. #endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
  63. static __always_inline void __stackleak_erase(bool on_task_stack)
  64. {
  65. const unsigned long task_stack_low = stackleak_task_low_bound(current);
  66. const unsigned long task_stack_high = stackleak_task_high_bound(current);
  67. unsigned long erase_low, erase_high;
  68. erase_low = stackleak_find_top_of_poison(task_stack_low,
  69. current->lowest_stack);
  70. #ifdef CONFIG_STACKLEAK_METRICS
  71. current->prev_lowest_stack = erase_low;
  72. #endif
  73. /*
  74. * Write poison to the task's stack between 'erase_low' and
  75. * 'erase_high'.
  76. *
  77. * If we're running on a different stack (e.g. an entry trampoline
  78. * stack) we can erase everything below the pt_regs at the top of the
  79. * task stack.
  80. *
  81. * If we're running on the task stack itself, we must not clobber any
  82. * stack used by this function and its caller. We assume that this
  83. * function has a fixed-size stack frame, and the current stack pointer
  84. * doesn't change while we write poison.
  85. */
  86. if (on_task_stack)
  87. erase_high = current_stack_pointer;
  88. else
  89. erase_high = task_stack_high;
  90. while (erase_low < erase_high) {
  91. *(unsigned long *)erase_low = STACKLEAK_POISON;
  92. erase_low += sizeof(unsigned long);
  93. }
  94. /* Reset the 'lowest_stack' value for the next syscall */
  95. current->lowest_stack = task_stack_high;
  96. }
  97. /*
  98. * Erase and poison the portion of the task stack used since the last erase.
  99. * Can be called from the task stack or an entry stack when the task stack is
  100. * no longer in use.
  101. */
  102. asmlinkage void noinstr stackleak_erase(void)
  103. {
  104. if (skip_erasing())
  105. return;
  106. __stackleak_erase(on_thread_stack());
  107. }
  108. /*
  109. * Erase and poison the portion of the task stack used since the last erase.
  110. * Can only be called from the task stack.
  111. */
  112. asmlinkage void noinstr stackleak_erase_on_task_stack(void)
  113. {
  114. if (skip_erasing())
  115. return;
  116. __stackleak_erase(true);
  117. }
  118. /*
  119. * Erase and poison the portion of the task stack used since the last erase.
  120. * Can only be called from a stack other than the task stack.
  121. */
  122. asmlinkage void noinstr stackleak_erase_off_task_stack(void)
  123. {
  124. if (skip_erasing())
  125. return;
  126. __stackleak_erase(false);
  127. }
  128. void __used __no_caller_saved_registers noinstr stackleak_track_stack(void)
  129. {
  130. unsigned long sp = current_stack_pointer;
  131. /*
  132. * Having CONFIG_STACKLEAK_TRACK_MIN_SIZE larger than
  133. * STACKLEAK_SEARCH_DEPTH makes the poison search in
  134. * stackleak_erase() unreliable. Let's prevent that.
  135. */
  136. BUILD_BUG_ON(CONFIG_STACKLEAK_TRACK_MIN_SIZE > STACKLEAK_SEARCH_DEPTH);
  137. /* 'lowest_stack' should be aligned on the register width boundary */
  138. sp = ALIGN(sp, sizeof(unsigned long));
  139. if (sp < current->lowest_stack &&
  140. sp >= stackleak_task_low_bound(current)) {
  141. current->lowest_stack = sp;
  142. }
  143. }
  144. EXPORT_SYMBOL(stackleak_track_stack);