extable.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* Rewritten by Rusty Russell, on the backs of many others...
  3. Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM.
  4. */
  5. #include <linux/elf.h>
  6. #include <linux/ftrace.h>
  7. #include <linux/memory.h>
  8. #include <linux/extable.h>
  9. #include <linux/module.h>
  10. #include <linux/mutex.h>
  11. #include <linux/init.h>
  12. #include <linux/kprobes.h>
  13. #include <linux/filter.h>
  14. #include <asm/sections.h>
  15. #include <linux/uaccess.h>
  16. /*
  17. * mutex protecting text section modification (dynamic code patching).
  18. * some users need to sleep (allocating memory...) while they hold this lock.
  19. *
  20. * Note: Also protects SMP-alternatives modification on x86.
  21. *
  22. * NOT exported to modules - patching kernel text is a really delicate matter.
  23. */
  24. DEFINE_MUTEX(text_mutex);
  25. extern struct exception_table_entry __start___ex_table[];
  26. extern struct exception_table_entry __stop___ex_table[];
  27. /* Cleared by build time tools if the table is already sorted. */
  28. u32 __initdata __visible main_extable_sort_needed = 1;
  29. /* Sort the kernel's built-in exception table */
  30. void __init sort_main_extable(void)
  31. {
  32. if (main_extable_sort_needed &&
  33. &__stop___ex_table > &__start___ex_table) {
  34. pr_notice("Sorting __ex_table...\n");
  35. sort_extable(__start___ex_table, __stop___ex_table);
  36. }
  37. }
  38. /* Given an address, look for it in the kernel exception table */
  39. const
  40. struct exception_table_entry *search_kernel_exception_table(unsigned long addr)
  41. {
  42. return search_extable(__start___ex_table,
  43. __stop___ex_table - __start___ex_table, addr);
  44. }
  45. /* Given an address, look for it in the exception tables. */
  46. const struct exception_table_entry *search_exception_tables(unsigned long addr)
  47. {
  48. const struct exception_table_entry *e;
  49. e = search_kernel_exception_table(addr);
  50. if (!e)
  51. e = search_module_extables(addr);
  52. if (!e)
  53. e = search_bpf_extables(addr);
  54. return e;
  55. }
  56. int notrace core_kernel_text(unsigned long addr)
  57. {
  58. if (is_kernel_text(addr))
  59. return 1;
  60. if (system_state < SYSTEM_FREEING_INITMEM &&
  61. is_kernel_inittext(addr))
  62. return 1;
  63. return 0;
  64. }
  65. int __kernel_text_address(unsigned long addr)
  66. {
  67. if (kernel_text_address(addr))
  68. return 1;
  69. /*
  70. * There might be init symbols in saved stacktraces.
  71. * Give those symbols a chance to be printed in
  72. * backtraces (such as lockdep traces).
  73. *
  74. * Since we are after the module-symbols check, there's
  75. * no danger of address overlap:
  76. */
  77. if (is_kernel_inittext(addr))
  78. return 1;
  79. return 0;
  80. }
  81. int kernel_text_address(unsigned long addr)
  82. {
  83. bool no_rcu;
  84. int ret = 1;
  85. if (core_kernel_text(addr))
  86. return 1;
  87. /*
  88. * If a stack dump happens while RCU is not watching, then
  89. * RCU needs to be notified that it requires to start
  90. * watching again. This can happen either by tracing that
  91. * triggers a stack trace, or a WARN() that happens during
  92. * coming back from idle, or cpu on or offlining.
  93. *
  94. * is_module_text_address() as well as the kprobe slots,
  95. * is_bpf_text_address() and is_bpf_image_address require
  96. * RCU to be watching.
  97. */
  98. no_rcu = !rcu_is_watching();
  99. /* Treat this like an NMI as it can happen anywhere */
  100. if (no_rcu)
  101. ct_nmi_enter();
  102. if (is_module_text_address(addr))
  103. goto out;
  104. if (is_ftrace_trampoline(addr))
  105. goto out;
  106. if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr))
  107. goto out;
  108. if (is_bpf_text_address(addr))
  109. goto out;
  110. ret = 0;
  111. out:
  112. if (no_rcu)
  113. ct_nmi_exit();
  114. return ret;
  115. }
  116. /*
  117. * On some architectures (PPC64, IA64, PARISC) function pointers
  118. * are actually only tokens to some data that then holds the
  119. * real function address. As a result, to find if a function
  120. * pointer is part of the kernel text, we need to do some
  121. * special dereferencing first.
  122. */
  123. #ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS
  124. void *dereference_function_descriptor(void *ptr)
  125. {
  126. func_desc_t *desc = ptr;
  127. void *p;
  128. if (!get_kernel_nofault(p, (void *)&desc->addr))
  129. ptr = p;
  130. return ptr;
  131. }
  132. EXPORT_SYMBOL_GPL(dereference_function_descriptor);
  133. void *dereference_kernel_function_descriptor(void *ptr)
  134. {
  135. if (ptr < (void *)__start_opd || ptr >= (void *)__end_opd)
  136. return ptr;
  137. return dereference_function_descriptor(ptr);
  138. }
  139. #endif
  140. int func_ptr_is_kernel_text(void *ptr)
  141. {
  142. unsigned long addr;
  143. addr = (unsigned long) dereference_function_descriptor(ptr);
  144. if (core_kernel_text(addr))
  145. return 1;
  146. return is_module_text_address(addr);
  147. }