patching.c 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <linux/kernel.h>
  3. #include <linux/mm.h>
  4. #include <linux/smp.h>
  5. #include <linux/spinlock.h>
  6. #include <linux/stop_machine.h>
  7. #include <linux/uaccess.h>
  8. #include <asm/cacheflush.h>
  9. #include <asm/fixmap.h>
  10. #include <asm/insn.h>
  11. #include <asm/kprobes.h>
  12. #include <asm/patching.h>
  13. #include <asm/sections.h>
  14. static DEFINE_RAW_SPINLOCK(patch_lock);
  15. static bool is_exit_text(unsigned long addr)
  16. {
  17. /* discarded with init text/data */
  18. return system_state < SYSTEM_RUNNING &&
  19. addr >= (unsigned long)__exittext_begin &&
  20. addr < (unsigned long)__exittext_end;
  21. }
  22. static bool is_image_text(unsigned long addr)
  23. {
  24. return core_kernel_text(addr) || is_exit_text(addr);
  25. }
  26. static void __kprobes *patch_map(void *addr, int fixmap)
  27. {
  28. unsigned long uintaddr = (uintptr_t) addr;
  29. bool image = is_image_text(uintaddr);
  30. struct page *page;
  31. if (image)
  32. page = phys_to_page(__pa_symbol(addr));
  33. else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
  34. page = vmalloc_to_page(addr);
  35. else
  36. return addr;
  37. BUG_ON(!page);
  38. return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
  39. (uintaddr & ~PAGE_MASK));
  40. }
  41. static void __kprobes patch_unmap(int fixmap)
  42. {
  43. clear_fixmap(fixmap);
  44. }
  45. /*
  46. * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
  47. * little-endian.
  48. */
  49. int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
  50. {
  51. int ret;
  52. __le32 val;
  53. ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
  54. if (!ret)
  55. *insnp = le32_to_cpu(val);
  56. return ret;
  57. }
  58. static int __kprobes __aarch64_text_write(void *dst, void *src, size_t size)
  59. {
  60. unsigned long flags;
  61. void *waddr;
  62. int ret;
  63. raw_spin_lock_irqsave(&patch_lock, flags);
  64. waddr = patch_map(dst, FIX_TEXT_POKE0);
  65. ret = copy_to_kernel_nofault(waddr, src, size);
  66. patch_unmap(FIX_TEXT_POKE0);
  67. raw_spin_unlock_irqrestore(&patch_lock, flags);
  68. return ret;
  69. }
  70. int __kprobes aarch64_insn_write(void *addr, u32 insn)
  71. {
  72. __le32 __insn = cpu_to_le32(insn);
  73. return __aarch64_text_write(addr, &__insn, AARCH64_INSN_SIZE);
  74. }
  75. int __kprobes aarch64_addr_write(void *addr, u64 dst)
  76. {
  77. return __aarch64_text_write(addr, &dst, sizeof(dst));
  78. }
  79. int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
  80. {
  81. u32 *tp = addr;
  82. int ret;
  83. /* A64 instructions must be word aligned */
  84. if ((uintptr_t)tp & 0x3)
  85. return -EINVAL;
  86. ret = aarch64_insn_write(tp, insn);
  87. if (ret == 0)
  88. caches_clean_inval_pou((uintptr_t)tp,
  89. (uintptr_t)tp + AARCH64_INSN_SIZE);
  90. return ret;
  91. }
  92. struct aarch64_insn_patch {
  93. void **text_addrs;
  94. u32 *new_insns;
  95. int insn_cnt;
  96. atomic_t cpu_count;
  97. };
  98. static int __kprobes aarch64_insn_patch_text_cb(void *arg)
  99. {
  100. int i, ret = 0;
  101. struct aarch64_insn_patch *pp = arg;
  102. /* The last CPU becomes master */
  103. if (atomic_inc_return(&pp->cpu_count) == num_online_cpus()) {
  104. for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
  105. ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
  106. pp->new_insns[i]);
  107. /* Notify other processors with an additional increment. */
  108. atomic_inc(&pp->cpu_count);
  109. } else {
  110. while (atomic_read(&pp->cpu_count) <= num_online_cpus())
  111. cpu_relax();
  112. isb();
  113. }
  114. return ret;
  115. }
  116. int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
  117. {
  118. struct aarch64_insn_patch patch = {
  119. .text_addrs = addrs,
  120. .new_insns = insns,
  121. .insn_cnt = cnt,
  122. .cpu_count = ATOMIC_INIT(0),
  123. };
  124. if (cnt <= 0)
  125. return -EINVAL;
  126. return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
  127. cpu_online_mask);
  128. }