task_stack.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_SCHED_TASK_STACK_H
  3. #define _LINUX_SCHED_TASK_STACK_H
  4. /*
  5. * task->stack (kernel stack) handling interfaces:
  6. */
  7. #include <linux/sched.h>
  8. #include <linux/magic.h>
  9. #ifdef CONFIG_THREAD_INFO_IN_TASK
  10. /*
  11. * When accessing the stack of a non-current task that might exit, use
  12. * try_get_task_stack() instead. task_stack_page will return a pointer
  13. * that could get freed out from under you.
  14. */
  15. static __always_inline void *task_stack_page(const struct task_struct *task)
  16. {
  17. return task->stack;
  18. }
  19. #define setup_thread_stack(new,old) do { } while(0)
  20. static __always_inline unsigned long *end_of_stack(const struct task_struct *task)
  21. {
  22. #ifdef CONFIG_STACK_GROWSUP
  23. return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1;
  24. #else
  25. return task->stack;
  26. #endif
  27. }
  28. #elif !defined(__HAVE_THREAD_FUNCTIONS)
  29. #define task_stack_page(task) ((void *)(task)->stack)
  30. static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
  31. {
  32. *task_thread_info(p) = *task_thread_info(org);
  33. task_thread_info(p)->task = p;
  34. }
  35. /*
  36. * Return the address of the last usable long on the stack.
  37. *
  38. * When the stack grows down, this is just above the thread
  39. * info struct. Going any lower will corrupt the threadinfo.
  40. *
  41. * When the stack grows up, this is the highest address.
  42. * Beyond that position, we corrupt data on the next page.
  43. */
  44. static inline unsigned long *end_of_stack(struct task_struct *p)
  45. {
  46. #ifdef CONFIG_STACK_GROWSUP
  47. return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
  48. #else
  49. return (unsigned long *)(task_thread_info(p) + 1);
  50. #endif
  51. }
  52. #endif
  53. #ifdef CONFIG_THREAD_INFO_IN_TASK
  54. static inline void *try_get_task_stack(struct task_struct *tsk)
  55. {
  56. return refcount_inc_not_zero(&tsk->stack_refcount) ?
  57. task_stack_page(tsk) : NULL;
  58. }
  59. extern void put_task_stack(struct task_struct *tsk);
  60. #else
  61. static inline void *try_get_task_stack(struct task_struct *tsk)
  62. {
  63. return task_stack_page(tsk);
  64. }
  65. static inline void put_task_stack(struct task_struct *tsk) {}
  66. #endif
  67. void exit_task_stack_account(struct task_struct *tsk);
  68. #define task_stack_end_corrupted(task) \
  69. (*(end_of_stack(task)) != STACK_END_MAGIC)
  70. static inline int object_is_on_stack(const void *obj)
  71. {
  72. void *stack = task_stack_page(current);
  73. return (obj >= stack) && (obj < (stack + THREAD_SIZE));
  74. }
  75. extern void thread_stack_cache_init(void);
  76. #ifdef CONFIG_DEBUG_STACK_USAGE
  77. static inline unsigned long stack_not_used(struct task_struct *p)
  78. {
  79. unsigned long *n = end_of_stack(p);
  80. do { /* Skip over canary */
  81. # ifdef CONFIG_STACK_GROWSUP
  82. n--;
  83. # else
  84. n++;
  85. # endif
  86. } while (!*n);
  87. # ifdef CONFIG_STACK_GROWSUP
  88. return (unsigned long)end_of_stack(p) - (unsigned long)n;
  89. # else
  90. return (unsigned long)n - (unsigned long)end_of_stack(p);
  91. # endif
  92. }
  93. #endif
  94. extern void set_task_stack_end_magic(struct task_struct *tsk);
  95. #ifndef __HAVE_ARCH_KSTACK_END
  96. static inline int kstack_end(void *addr)
  97. {
  98. /* Reliable end of stack detection:
  99. * Some APM bios versions misalign the stack
  100. */
  101. return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
  102. }
  103. #endif
  104. #endif /* _LINUX_SCHED_TASK_STACK_H */