mmio-mod.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. *
  4. * Copyright (C) IBM Corporation, 2005
  5. * Jeff Muizelaar, 2006, 2007
  6. * Pekka Paalanen, 2008 <[email protected]>
  7. *
  8. * Derived from the read-mod example from relay-examples by Tom Zanussi.
  9. */
  10. #define pr_fmt(fmt) "mmiotrace: " fmt
  11. #include <linux/moduleparam.h>
  12. #include <linux/debugfs.h>
  13. #include <linux/slab.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/io.h>
  16. #include <linux/mmiotrace.h>
  17. #include <linux/pgtable.h>
  18. #include <asm/e820/api.h> /* for ISA_START_ADDRESS */
  19. #include <linux/atomic.h>
  20. #include <linux/percpu.h>
  21. #include <linux/cpu.h>
  22. #include "pf_in.h"
  23. struct trap_reason {
  24. unsigned long addr;
  25. unsigned long ip;
  26. enum reason_type type;
  27. int active_traces;
  28. };
  29. struct remap_trace {
  30. struct list_head list;
  31. struct kmmio_probe probe;
  32. resource_size_t phys;
  33. unsigned long id;
  34. };
  35. /* Accessed per-cpu. */
  36. static DEFINE_PER_CPU(struct trap_reason, pf_reason);
  37. static DEFINE_PER_CPU(struct mmiotrace_rw, cpu_trace);
  38. static DEFINE_MUTEX(mmiotrace_mutex);
  39. static DEFINE_SPINLOCK(trace_lock);
  40. static atomic_t mmiotrace_enabled;
  41. static LIST_HEAD(trace_list); /* struct remap_trace */
  42. /*
  43. * Locking in this file:
  44. * - mmiotrace_mutex enforces enable/disable_mmiotrace() critical sections.
  45. * - mmiotrace_enabled may be modified only when holding mmiotrace_mutex
  46. * and trace_lock.
  47. * - Routines depending on is_enabled() must take trace_lock.
  48. * - trace_list users must hold trace_lock.
  49. * - is_enabled() guarantees that mmio_trace_{rw,mapping} are allowed.
  50. * - pre/post callbacks assume the effect of is_enabled() being true.
  51. */
  52. /* module parameters */
  53. static unsigned long filter_offset;
  54. static bool nommiotrace;
  55. static bool trace_pc;
  56. module_param(filter_offset, ulong, 0);
  57. module_param(nommiotrace, bool, 0);
  58. module_param(trace_pc, bool, 0);
  59. MODULE_PARM_DESC(filter_offset, "Start address of traced mappings.");
  60. MODULE_PARM_DESC(nommiotrace, "Disable actual MMIO tracing.");
  61. MODULE_PARM_DESC(trace_pc, "Record address of faulting instructions.");
  62. static bool is_enabled(void)
  63. {
  64. return atomic_read(&mmiotrace_enabled);
  65. }
  66. static void print_pte(unsigned long address)
  67. {
  68. unsigned int level;
  69. pte_t *pte = lookup_address(address, &level);
  70. if (!pte) {
  71. pr_err("Error in %s: no pte for page 0x%08lx\n",
  72. __func__, address);
  73. return;
  74. }
  75. if (level == PG_LEVEL_2M) {
  76. pr_emerg("4MB pages are not currently supported: 0x%08lx\n",
  77. address);
  78. BUG();
  79. }
  80. pr_info("pte for 0x%lx: 0x%llx 0x%llx\n",
  81. address,
  82. (unsigned long long)pte_val(*pte),
  83. (unsigned long long)pte_val(*pte) & _PAGE_PRESENT);
  84. }
  85. /*
  86. * For some reason the pre/post pairs have been called in an
  87. * unmatched order. Report and die.
  88. */
  89. static void die_kmmio_nesting_error(struct pt_regs *regs, unsigned long addr)
  90. {
  91. const struct trap_reason *my_reason = &get_cpu_var(pf_reason);
  92. pr_emerg("unexpected fault for address: 0x%08lx, last fault for address: 0x%08lx\n",
  93. addr, my_reason->addr);
  94. print_pte(addr);
  95. pr_emerg("faulting IP is at %pS\n", (void *)regs->ip);
  96. pr_emerg("last faulting IP was at %pS\n", (void *)my_reason->ip);
  97. #ifdef __i386__
  98. pr_emerg("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
  99. regs->ax, regs->bx, regs->cx, regs->dx);
  100. pr_emerg("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
  101. regs->si, regs->di, regs->bp, regs->sp);
  102. #else
  103. pr_emerg("rax: %016lx rcx: %016lx rdx: %016lx\n",
  104. regs->ax, regs->cx, regs->dx);
  105. pr_emerg("rsi: %016lx rdi: %016lx rbp: %016lx rsp: %016lx\n",
  106. regs->si, regs->di, regs->bp, regs->sp);
  107. #endif
  108. put_cpu_var(pf_reason);
  109. BUG();
  110. }
  111. static void pre(struct kmmio_probe *p, struct pt_regs *regs,
  112. unsigned long addr)
  113. {
  114. struct trap_reason *my_reason = &get_cpu_var(pf_reason);
  115. struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace);
  116. const unsigned long instptr = instruction_pointer(regs);
  117. const enum reason_type type = get_ins_type(instptr);
  118. struct remap_trace *trace = p->private;
  119. /* it doesn't make sense to have more than one active trace per cpu */
  120. if (my_reason->active_traces)
  121. die_kmmio_nesting_error(regs, addr);
  122. else
  123. my_reason->active_traces++;
  124. my_reason->type = type;
  125. my_reason->addr = addr;
  126. my_reason->ip = instptr;
  127. my_trace->phys = addr - trace->probe.addr + trace->phys;
  128. my_trace->map_id = trace->id;
  129. /*
  130. * Only record the program counter when requested.
  131. * It may taint clean-room reverse engineering.
  132. */
  133. if (trace_pc)
  134. my_trace->pc = instptr;
  135. else
  136. my_trace->pc = 0;
  137. /*
  138. * XXX: the timestamp recorded will be *after* the tracing has been
  139. * done, not at the time we hit the instruction. SMP implications
  140. * on event ordering?
  141. */
  142. switch (type) {
  143. case REG_READ:
  144. my_trace->opcode = MMIO_READ;
  145. my_trace->width = get_ins_mem_width(instptr);
  146. break;
  147. case REG_WRITE:
  148. my_trace->opcode = MMIO_WRITE;
  149. my_trace->width = get_ins_mem_width(instptr);
  150. my_trace->value = get_ins_reg_val(instptr, regs);
  151. break;
  152. case IMM_WRITE:
  153. my_trace->opcode = MMIO_WRITE;
  154. my_trace->width = get_ins_mem_width(instptr);
  155. my_trace->value = get_ins_imm_val(instptr);
  156. break;
  157. default:
  158. {
  159. unsigned char *ip = (unsigned char *)instptr;
  160. my_trace->opcode = MMIO_UNKNOWN_OP;
  161. my_trace->width = 0;
  162. my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
  163. *(ip + 2);
  164. }
  165. }
  166. put_cpu_var(cpu_trace);
  167. put_cpu_var(pf_reason);
  168. }
  169. static void post(struct kmmio_probe *p, unsigned long condition,
  170. struct pt_regs *regs)
  171. {
  172. struct trap_reason *my_reason = &get_cpu_var(pf_reason);
  173. struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace);
  174. /* this should always return the active_trace count to 0 */
  175. my_reason->active_traces--;
  176. if (my_reason->active_traces) {
  177. pr_emerg("unexpected post handler");
  178. BUG();
  179. }
  180. switch (my_reason->type) {
  181. case REG_READ:
  182. my_trace->value = get_ins_reg_val(my_reason->ip, regs);
  183. break;
  184. default:
  185. break;
  186. }
  187. mmio_trace_rw(my_trace);
  188. put_cpu_var(cpu_trace);
  189. put_cpu_var(pf_reason);
  190. }
  191. static void ioremap_trace_core(resource_size_t offset, unsigned long size,
  192. void __iomem *addr)
  193. {
  194. static atomic_t next_id;
  195. struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
  196. /* These are page-unaligned. */
  197. struct mmiotrace_map map = {
  198. .phys = offset,
  199. .virt = (unsigned long)addr,
  200. .len = size,
  201. .opcode = MMIO_PROBE
  202. };
  203. if (!trace) {
  204. pr_err("kmalloc failed in ioremap\n");
  205. return;
  206. }
  207. *trace = (struct remap_trace) {
  208. .probe = {
  209. .addr = (unsigned long)addr,
  210. .len = size,
  211. .pre_handler = pre,
  212. .post_handler = post,
  213. .private = trace
  214. },
  215. .phys = offset,
  216. .id = atomic_inc_return(&next_id)
  217. };
  218. map.map_id = trace->id;
  219. spin_lock_irq(&trace_lock);
  220. if (!is_enabled()) {
  221. kfree(trace);
  222. goto not_enabled;
  223. }
  224. mmio_trace_mapping(&map);
  225. list_add_tail(&trace->list, &trace_list);
  226. if (!nommiotrace)
  227. register_kmmio_probe(&trace->probe);
  228. not_enabled:
  229. spin_unlock_irq(&trace_lock);
  230. }
  231. void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
  232. void __iomem *addr)
  233. {
  234. if (!is_enabled()) /* recheck and proper locking in *_core() */
  235. return;
  236. pr_debug("ioremap_*(0x%llx, 0x%lx) = %p\n",
  237. (unsigned long long)offset, size, addr);
  238. if ((filter_offset) && (offset != filter_offset))
  239. return;
  240. ioremap_trace_core(offset, size, addr);
  241. }
  242. static void iounmap_trace_core(volatile void __iomem *addr)
  243. {
  244. struct mmiotrace_map map = {
  245. .phys = 0,
  246. .virt = (unsigned long)addr,
  247. .len = 0,
  248. .opcode = MMIO_UNPROBE
  249. };
  250. struct remap_trace *trace;
  251. struct remap_trace *tmp;
  252. struct remap_trace *found_trace = NULL;
  253. pr_debug("Unmapping %p.\n", addr);
  254. spin_lock_irq(&trace_lock);
  255. if (!is_enabled())
  256. goto not_enabled;
  257. list_for_each_entry_safe(trace, tmp, &trace_list, list) {
  258. if ((unsigned long)addr == trace->probe.addr) {
  259. if (!nommiotrace)
  260. unregister_kmmio_probe(&trace->probe);
  261. list_del(&trace->list);
  262. found_trace = trace;
  263. break;
  264. }
  265. }
  266. map.map_id = (found_trace) ? found_trace->id : -1;
  267. mmio_trace_mapping(&map);
  268. not_enabled:
  269. spin_unlock_irq(&trace_lock);
  270. if (found_trace) {
  271. synchronize_rcu(); /* unregister_kmmio_probe() requirement */
  272. kfree(found_trace);
  273. }
  274. }
  275. void mmiotrace_iounmap(volatile void __iomem *addr)
  276. {
  277. might_sleep();
  278. if (is_enabled()) /* recheck and proper locking in *_core() */
  279. iounmap_trace_core(addr);
  280. }
  281. int mmiotrace_printk(const char *fmt, ...)
  282. {
  283. int ret = 0;
  284. va_list args;
  285. unsigned long flags;
  286. va_start(args, fmt);
  287. spin_lock_irqsave(&trace_lock, flags);
  288. if (is_enabled())
  289. ret = mmio_trace_printk(fmt, args);
  290. spin_unlock_irqrestore(&trace_lock, flags);
  291. va_end(args);
  292. return ret;
  293. }
  294. EXPORT_SYMBOL(mmiotrace_printk);
  295. static void clear_trace_list(void)
  296. {
  297. struct remap_trace *trace;
  298. struct remap_trace *tmp;
  299. /*
  300. * No locking required, because the caller ensures we are in a
  301. * critical section via mutex, and is_enabled() is false,
  302. * i.e. nothing can traverse or modify this list.
  303. * Caller also ensures is_enabled() cannot change.
  304. */
  305. list_for_each_entry(trace, &trace_list, list) {
  306. pr_notice("purging non-iounmapped trace @0x%08lx, size 0x%lx.\n",
  307. trace->probe.addr, trace->probe.len);
  308. if (!nommiotrace)
  309. unregister_kmmio_probe(&trace->probe);
  310. }
  311. synchronize_rcu(); /* unregister_kmmio_probe() requirement */
  312. list_for_each_entry_safe(trace, tmp, &trace_list, list) {
  313. list_del(&trace->list);
  314. kfree(trace);
  315. }
  316. }
  317. #ifdef CONFIG_HOTPLUG_CPU
  318. static cpumask_var_t downed_cpus;
  319. static void enter_uniprocessor(void)
  320. {
  321. int cpu;
  322. int err;
  323. if (!cpumask_available(downed_cpus) &&
  324. !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) {
  325. pr_notice("Failed to allocate mask\n");
  326. goto out;
  327. }
  328. cpus_read_lock();
  329. cpumask_copy(downed_cpus, cpu_online_mask);
  330. cpumask_clear_cpu(cpumask_first(cpu_online_mask), downed_cpus);
  331. if (num_online_cpus() > 1)
  332. pr_notice("Disabling non-boot CPUs...\n");
  333. cpus_read_unlock();
  334. for_each_cpu(cpu, downed_cpus) {
  335. err = remove_cpu(cpu);
  336. if (!err)
  337. pr_info("CPU%d is down.\n", cpu);
  338. else
  339. pr_err("Error taking CPU%d down: %d\n", cpu, err);
  340. }
  341. out:
  342. if (num_online_cpus() > 1)
  343. pr_warn("multiple CPUs still online, may miss events.\n");
  344. }
  345. static void leave_uniprocessor(void)
  346. {
  347. int cpu;
  348. int err;
  349. if (!cpumask_available(downed_cpus) || cpumask_empty(downed_cpus))
  350. return;
  351. pr_notice("Re-enabling CPUs...\n");
  352. for_each_cpu(cpu, downed_cpus) {
  353. err = add_cpu(cpu);
  354. if (!err)
  355. pr_info("enabled CPU%d.\n", cpu);
  356. else
  357. pr_err("cannot re-enable CPU%d: %d\n", cpu, err);
  358. }
  359. }
  360. #else /* !CONFIG_HOTPLUG_CPU */
  361. static void enter_uniprocessor(void)
  362. {
  363. if (num_online_cpus() > 1)
  364. pr_warn("multiple CPUs are online, may miss events. "
  365. "Suggest booting with maxcpus=1 kernel argument.\n");
  366. }
  367. static void leave_uniprocessor(void)
  368. {
  369. }
  370. #endif
  371. void enable_mmiotrace(void)
  372. {
  373. mutex_lock(&mmiotrace_mutex);
  374. if (is_enabled())
  375. goto out;
  376. if (nommiotrace)
  377. pr_info("MMIO tracing disabled.\n");
  378. kmmio_init();
  379. enter_uniprocessor();
  380. spin_lock_irq(&trace_lock);
  381. atomic_inc(&mmiotrace_enabled);
  382. spin_unlock_irq(&trace_lock);
  383. pr_info("enabled.\n");
  384. out:
  385. mutex_unlock(&mmiotrace_mutex);
  386. }
  387. void disable_mmiotrace(void)
  388. {
  389. mutex_lock(&mmiotrace_mutex);
  390. if (!is_enabled())
  391. goto out;
  392. spin_lock_irq(&trace_lock);
  393. atomic_dec(&mmiotrace_enabled);
  394. BUG_ON(is_enabled());
  395. spin_unlock_irq(&trace_lock);
  396. clear_trace_list(); /* guarantees: no more kmmio callbacks */
  397. leave_uniprocessor();
  398. kmmio_cleanup();
  399. pr_info("disabled.\n");
  400. out:
  401. mutex_unlock(&mmiotrace_mutex);
  402. }