irq_stack.h 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_X86_IRQ_STACK_H
  3. #define _ASM_X86_IRQ_STACK_H
  4. #include <linux/ptrace.h>
  5. #include <linux/objtool.h>
  6. #include <asm/processor.h>
  7. #ifdef CONFIG_X86_64
  8. /*
  9. * Macro to inline switching to an interrupt stack and invoking function
  10. * calls from there. The following rules apply:
  11. *
  12. * - Ordering:
  13. *
  14. * 1. Write the stack pointer into the top most place of the irq
  15. * stack. This ensures that the various unwinders can link back to the
  16. * original stack.
  17. *
  18. * 2. Switch the stack pointer to the top of the irq stack.
  19. *
  20. * 3. Invoke whatever needs to be done (@asm_call argument)
  21. *
  22. * 4. Pop the original stack pointer from the top of the irq stack
  23. * which brings it back to the original stack where it left off.
  24. *
  25. * - Function invocation:
  26. *
  27. * To allow flexible usage of the macro, the actual function code including
  28. * the store of the arguments in the call ABI registers is handed in via
  29. * the @asm_call argument.
  30. *
  31. * - Local variables:
  32. *
  33. * @tos:
  34. * The @tos variable holds a pointer to the top of the irq stack and
  35. * _must_ be allocated in a non-callee saved register as this is a
  36. * restriction coming from objtool.
  37. *
  38. * Note, that (tos) is both in input and output constraints to ensure
  39. * that the compiler does not assume that R11 is left untouched in
  40. * case this macro is used in some place where the per cpu interrupt
  41. * stack pointer is used again afterwards
  42. *
  43. * - Function arguments:
  44. * The function argument(s), if any, have to be defined in register
  45. * variables at the place where this is invoked. Storing the
  46. * argument(s) in the proper register(s) is part of the @asm_call
  47. *
  48. * - Constraints:
  49. *
  50. * The constraints have to be done very carefully because the compiler
  51. * does not know about the assembly call.
  52. *
  53. * output:
  54. * As documented already above the @tos variable is required to be in
  55. * the output constraints to make the compiler aware that R11 cannot be
  56. * reused after the asm() statement.
  57. *
  58. * For builds with CONFIG_UNWINDER_FRAME_POINTER, ASM_CALL_CONSTRAINT is
  59. * required as well as this prevents certain creative GCC variants from
  60. * misplacing the ASM code.
  61. *
  62. * input:
  63. * - func:
  64. * Immediate, which tells the compiler that the function is referenced.
  65. *
  66. * - tos:
  67. * Register. The actual register is defined by the variable declaration.
  68. *
  69. * - function arguments:
  70. * The constraints are handed in via the 'argconstr' argument list. They
  71. * describe the register arguments which are used in @asm_call.
  72. *
  73. * clobbers:
  74. * Function calls can clobber anything except the callee-saved
  75. * registers. Tell the compiler.
  76. */
  77. #define call_on_stack(stack, func, asm_call, argconstr...) \
  78. { \
  79. register void *tos asm("r11"); \
  80. \
  81. tos = ((void *)(stack)); \
  82. \
  83. asm_inline volatile( \
  84. "movq %%rsp, (%[tos]) \n" \
  85. "movq %[tos], %%rsp \n" \
  86. \
  87. asm_call \
  88. \
  89. "popq %%rsp \n" \
  90. \
  91. : "+r" (tos), ASM_CALL_CONSTRAINT \
  92. : [__func] "i" (func), [tos] "r" (tos) argconstr \
  93. : "cc", "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", \
  94. "memory" \
  95. ); \
  96. }
  97. #define ASM_CALL_ARG0 \
  98. "call %P[__func] \n" \
  99. ASM_REACHABLE
  100. #define ASM_CALL_ARG1 \
  101. "movq %[arg1], %%rdi \n" \
  102. ASM_CALL_ARG0
  103. #define ASM_CALL_ARG2 \
  104. "movq %[arg2], %%rsi \n" \
  105. ASM_CALL_ARG1
  106. #define ASM_CALL_ARG3 \
  107. "movq %[arg3], %%rdx \n" \
  108. ASM_CALL_ARG2
  109. #define call_on_irqstack(func, asm_call, argconstr...) \
  110. call_on_stack(__this_cpu_read(hardirq_stack_ptr), \
  111. func, asm_call, argconstr)
  112. /* Macros to assert type correctness for run_*_on_irqstack macros */
  113. #define assert_function_type(func, proto) \
  114. static_assert(__builtin_types_compatible_p(typeof(&func), proto))
  115. #define assert_arg_type(arg, proto) \
  116. static_assert(__builtin_types_compatible_p(typeof(arg), proto))
  117. /*
  118. * Macro to invoke system vector and device interrupt C handlers.
  119. */
  120. #define call_on_irqstack_cond(func, regs, asm_call, constr, c_args...) \
  121. { \
  122. /* \
  123. * User mode entry and interrupt on the irq stack do not \
  124. * switch stacks. If from user mode the task stack is empty. \
  125. */ \
  126. if (user_mode(regs) || __this_cpu_read(hardirq_stack_inuse)) { \
  127. irq_enter_rcu(); \
  128. func(c_args); \
  129. irq_exit_rcu(); \
  130. } else { \
  131. /* \
  132. * Mark the irq stack inuse _before_ and unmark _after_ \
  133. * switching stacks. Interrupts are disabled in both \
  134. * places. Invoke the stack switch macro with the call \
  135. * sequence which matches the above direct invocation. \
  136. */ \
  137. __this_cpu_write(hardirq_stack_inuse, true); \
  138. call_on_irqstack(func, asm_call, constr); \
  139. __this_cpu_write(hardirq_stack_inuse, false); \
  140. } \
  141. }
  142. /*
  143. * Function call sequence for __call_on_irqstack() for system vectors.
  144. *
  145. * Note that irq_enter_rcu() and irq_exit_rcu() do not use the input
  146. * mechanism because these functions are global and cannot be optimized out
  147. * when compiling a particular source file which uses one of these macros.
  148. *
  149. * The argument (regs) does not need to be pushed or stashed in a callee
  150. * saved register to be safe vs. the irq_enter_rcu() call because the
  151. * clobbers already prevent the compiler from storing it in a callee
  152. * clobbered register. As the compiler has to preserve @regs for the final
  153. * call to idtentry_exit() anyway, it's likely that it does not cause extra
  154. * effort for this asm magic.
  155. */
  156. #define ASM_CALL_SYSVEC \
  157. "call irq_enter_rcu \n" \
  158. ASM_CALL_ARG1 \
  159. "call irq_exit_rcu \n"
  160. #define SYSVEC_CONSTRAINTS , [arg1] "r" (regs)
  161. #define run_sysvec_on_irqstack_cond(func, regs) \
  162. { \
  163. assert_function_type(func, void (*)(struct pt_regs *)); \
  164. assert_arg_type(regs, struct pt_regs *); \
  165. \
  166. call_on_irqstack_cond(func, regs, ASM_CALL_SYSVEC, \
  167. SYSVEC_CONSTRAINTS, regs); \
  168. }
  169. /*
  170. * As in ASM_CALL_SYSVEC above the clobbers force the compiler to store
  171. * @regs and @vector in callee saved registers.
  172. */
  173. #define ASM_CALL_IRQ \
  174. "call irq_enter_rcu \n" \
  175. ASM_CALL_ARG2 \
  176. "call irq_exit_rcu \n"
  177. #define IRQ_CONSTRAINTS , [arg1] "r" (regs), [arg2] "r" ((unsigned long)vector)
  178. #define run_irq_on_irqstack_cond(func, regs, vector) \
  179. { \
  180. assert_function_type(func, void (*)(struct pt_regs *, u32)); \
  181. assert_arg_type(regs, struct pt_regs *); \
  182. assert_arg_type(vector, u32); \
  183. \
  184. call_on_irqstack_cond(func, regs, ASM_CALL_IRQ, \
  185. IRQ_CONSTRAINTS, regs, vector); \
  186. }
  187. #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
  188. /*
  189. * Macro to invoke __do_softirq on the irq stack. This is only called from
  190. * task context when bottom halves are about to be reenabled and soft
  191. * interrupts are pending to be processed. The interrupt stack cannot be in
  192. * use here.
  193. */
  194. #define do_softirq_own_stack() \
  195. { \
  196. __this_cpu_write(hardirq_stack_inuse, true); \
  197. call_on_irqstack(__do_softirq, ASM_CALL_ARG0); \
  198. __this_cpu_write(hardirq_stack_inuse, false); \
  199. }
  200. #endif
  201. #else /* CONFIG_X86_64 */
  202. /* System vector handlers always run on the stack they interrupted. */
  203. #define run_sysvec_on_irqstack_cond(func, regs) \
  204. { \
  205. irq_enter_rcu(); \
  206. func(regs); \
  207. irq_exit_rcu(); \
  208. }
  209. /* Switches to the irq stack within func() */
  210. #define run_irq_on_irqstack_cond(func, regs, vector) \
  211. { \
  212. irq_enter_rcu(); \
  213. func(regs, vector); \
  214. irq_exit_rcu(); \
  215. }
  216. #endif /* !CONFIG_X86_64 */
  217. #endif