123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157 |
- // SPDX-License-Identifier: GPL-2.0-only
- #include <linux/kernel.h>
- #include <linux/mm.h>
- #include <linux/smp.h>
- #include <linux/spinlock.h>
- #include <linux/stop_machine.h>
- #include <linux/uaccess.h>
- #include <asm/cacheflush.h>
- #include <asm/fixmap.h>
- #include <asm/insn.h>
- #include <asm/kprobes.h>
- #include <asm/patching.h>
- #include <asm/sections.h>
- static DEFINE_RAW_SPINLOCK(patch_lock);
- static bool is_exit_text(unsigned long addr)
- {
- /* discarded with init text/data */
- return system_state < SYSTEM_RUNNING &&
- addr >= (unsigned long)__exittext_begin &&
- addr < (unsigned long)__exittext_end;
- }
- static bool is_image_text(unsigned long addr)
- {
- return core_kernel_text(addr) || is_exit_text(addr);
- }
- static void __kprobes *patch_map(void *addr, int fixmap)
- {
- unsigned long uintaddr = (uintptr_t) addr;
- bool image = is_image_text(uintaddr);
- struct page *page;
- if (image)
- page = phys_to_page(__pa_symbol(addr));
- else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
- page = vmalloc_to_page(addr);
- else
- return addr;
- BUG_ON(!page);
- return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
- (uintaddr & ~PAGE_MASK));
- }
- static void __kprobes patch_unmap(int fixmap)
- {
- clear_fixmap(fixmap);
- }
- /*
- * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
- * little-endian.
- */
- int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
- {
- int ret;
- __le32 val;
- ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
- if (!ret)
- *insnp = le32_to_cpu(val);
- return ret;
- }
- static int __kprobes __aarch64_text_write(void *dst, void *src, size_t size)
- {
- unsigned long flags;
- void *waddr;
- int ret;
- raw_spin_lock_irqsave(&patch_lock, flags);
- waddr = patch_map(dst, FIX_TEXT_POKE0);
- ret = copy_to_kernel_nofault(waddr, src, size);
- patch_unmap(FIX_TEXT_POKE0);
- raw_spin_unlock_irqrestore(&patch_lock, flags);
- return ret;
- }
- int __kprobes aarch64_insn_write(void *addr, u32 insn)
- {
- __le32 __insn = cpu_to_le32(insn);
- return __aarch64_text_write(addr, &__insn, AARCH64_INSN_SIZE);
- }
- int __kprobes aarch64_addr_write(void *addr, u64 dst)
- {
- return __aarch64_text_write(addr, &dst, sizeof(dst));
- }
- int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
- {
- u32 *tp = addr;
- int ret;
- /* A64 instructions must be word aligned */
- if ((uintptr_t)tp & 0x3)
- return -EINVAL;
- ret = aarch64_insn_write(tp, insn);
- if (ret == 0)
- caches_clean_inval_pou((uintptr_t)tp,
- (uintptr_t)tp + AARCH64_INSN_SIZE);
- return ret;
- }
- struct aarch64_insn_patch {
- void **text_addrs;
- u32 *new_insns;
- int insn_cnt;
- atomic_t cpu_count;
- };
- static int __kprobes aarch64_insn_patch_text_cb(void *arg)
- {
- int i, ret = 0;
- struct aarch64_insn_patch *pp = arg;
- /* The last CPU becomes master */
- if (atomic_inc_return(&pp->cpu_count) == num_online_cpus()) {
- for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
- ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
- pp->new_insns[i]);
- /* Notify other processors with an additional increment. */
- atomic_inc(&pp->cpu_count);
- } else {
- while (atomic_read(&pp->cpu_count) <= num_online_cpus())
- cpu_relax();
- isb();
- }
- return ret;
- }
- int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
- {
- struct aarch64_insn_patch patch = {
- .text_addrs = addrs,
- .new_insns = insns,
- .insn_cnt = cnt,
- .cpu_count = ATOMIC_INIT(0),
- };
- if (cnt <= 0)
- return -EINVAL;
- return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
- cpu_online_mask);
- }
|