patch.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2020 SiFive
  4. */
  5. #include <linux/spinlock.h>
  6. #include <linux/mm.h>
  7. #include <linux/memory.h>
  8. #include <linux/uaccess.h>
  9. #include <linux/stop_machine.h>
  10. #include <asm/kprobes.h>
  11. #include <asm/cacheflush.h>
  12. #include <asm/fixmap.h>
  13. #include <asm/ftrace.h>
  14. #include <asm/patch.h>
  15. struct patch_insn {
  16. void *addr;
  17. u32 insn;
  18. atomic_t cpu_count;
  19. };
  20. int riscv_patch_in_stop_machine = false;
  21. #ifdef CONFIG_MMU
  22. /*
  23. * The fix_to_virt(, idx) needs a const value (not a dynamic variable of
  24. * reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses".
  25. * So use '__always_inline' and 'const unsigned int fixmap' here.
  26. */
  27. static __always_inline void *patch_map(void *addr, const unsigned int fixmap)
  28. {
  29. uintptr_t uintaddr = (uintptr_t) addr;
  30. struct page *page;
  31. if (core_kernel_text(uintaddr))
  32. page = phys_to_page(__pa_symbol(addr));
  33. else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
  34. page = vmalloc_to_page(addr);
  35. else
  36. return addr;
  37. BUG_ON(!page);
  38. return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
  39. (uintaddr & ~PAGE_MASK));
  40. }
  41. static void patch_unmap(int fixmap)
  42. {
  43. clear_fixmap(fixmap);
  44. }
  45. NOKPROBE_SYMBOL(patch_unmap);
  46. static int patch_insn_write(void *addr, const void *insn, size_t len)
  47. {
  48. void *waddr = addr;
  49. bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE;
  50. int ret;
  51. /*
  52. * Before reaching here, it was expected to lock the text_mutex
  53. * already, so we don't need to give another lock here and could
  54. * ensure that it was safe between each cores.
  55. *
  56. * We're currently using stop_machine() for ftrace & kprobes, and while
  57. * that ensures text_mutex is held before installing the mappings it
  58. * does not ensure text_mutex is held by the calling thread. That's
  59. * safe but triggers a lockdep failure, so just elide it for that
  60. * specific case.
  61. */
  62. if (!riscv_patch_in_stop_machine)
  63. lockdep_assert_held(&text_mutex);
  64. if (across_pages)
  65. patch_map(addr + len, FIX_TEXT_POKE1);
  66. waddr = patch_map(addr, FIX_TEXT_POKE0);
  67. ret = copy_to_kernel_nofault(waddr, insn, len);
  68. patch_unmap(FIX_TEXT_POKE0);
  69. if (across_pages)
  70. patch_unmap(FIX_TEXT_POKE1);
  71. return ret;
  72. }
  73. NOKPROBE_SYMBOL(patch_insn_write);
  74. #else
  75. static int patch_insn_write(void *addr, const void *insn, size_t len)
  76. {
  77. return copy_to_kernel_nofault(addr, insn, len);
  78. }
  79. NOKPROBE_SYMBOL(patch_insn_write);
  80. #endif /* CONFIG_MMU */
  81. int patch_text_nosync(void *addr, const void *insns, size_t len)
  82. {
  83. u32 *tp = addr;
  84. int ret;
  85. ret = patch_insn_write(tp, insns, len);
  86. if (!ret)
  87. flush_icache_range((uintptr_t) tp, (uintptr_t) tp + len);
  88. return ret;
  89. }
  90. NOKPROBE_SYMBOL(patch_text_nosync);
  91. static int patch_text_cb(void *data)
  92. {
  93. struct patch_insn *patch = data;
  94. int ret = 0;
  95. if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
  96. ret =
  97. patch_text_nosync(patch->addr, &patch->insn,
  98. GET_INSN_LENGTH(patch->insn));
  99. atomic_inc(&patch->cpu_count);
  100. } else {
  101. while (atomic_read(&patch->cpu_count) <= num_online_cpus())
  102. cpu_relax();
  103. smp_mb();
  104. }
  105. return ret;
  106. }
  107. NOKPROBE_SYMBOL(patch_text_cb);
  108. int patch_text(void *addr, u32 insn)
  109. {
  110. int ret;
  111. struct patch_insn patch = {
  112. .addr = addr,
  113. .insn = insn,
  114. .cpu_count = ATOMIC_INIT(0),
  115. };
  116. /*
  117. * kprobes takes text_mutex, before calling patch_text(), but as we call
  118. * calls stop_machine(), the lockdep assertion in patch_insn_write()
  119. * gets confused by the context in which the lock is taken.
  120. * Instead, ensure the lock is held before calling stop_machine(), and
  121. * set riscv_patch_in_stop_machine to skip the check in
  122. * patch_insn_write().
  123. */
  124. lockdep_assert_held(&text_mutex);
  125. riscv_patch_in_stop_machine = true;
  126. ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask);
  127. riscv_patch_in_stop_machine = false;
  128. return ret;
  129. }
  130. NOKPROBE_SYMBOL(patch_text);