kprobes.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. #ifndef _LINUX_KPROBES_H
  3. #define _LINUX_KPROBES_H
  4. /*
  5. * Kernel Probes (KProbes)
  6. *
  7. * Copyright (C) IBM Corporation, 2002, 2004
  8. *
  9. * 2002-Oct Created by Vamsi Krishna S <[email protected]> Kernel
  10. * Probes initial implementation ( includes suggestions from
  11. * Rusty Russell).
  12. * 2004-July Suparna Bhattacharya <[email protected]> added jumper probes
  13. * interface to access function arguments.
  14. * 2005-May Hien Nguyen <[email protected]> and Jim Keniston
  15. * <[email protected]> and Prasanna S Panchamukhi
  16. * <[email protected]> added function-return probes.
  17. */
  18. #include <linux/compiler.h>
  19. #include <linux/linkage.h>
  20. #include <linux/list.h>
  21. #include <linux/notifier.h>
  22. #include <linux/smp.h>
  23. #include <linux/bug.h>
  24. #include <linux/percpu.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/rcupdate.h>
  27. #include <linux/mutex.h>
  28. #include <linux/ftrace.h>
  29. #include <linux/refcount.h>
  30. #include <linux/freelist.h>
  31. #include <linux/rethook.h>
  32. #include <asm/kprobes.h>
  33. #ifdef CONFIG_KPROBES
  34. /* kprobe_status settings */
  35. #define KPROBE_HIT_ACTIVE 0x00000001
  36. #define KPROBE_HIT_SS 0x00000002
  37. #define KPROBE_REENTER 0x00000004
  38. #define KPROBE_HIT_SSDONE 0x00000008
  39. #else /* !CONFIG_KPROBES */
  40. #include <asm-generic/kprobes.h>
  41. typedef int kprobe_opcode_t;
  42. struct arch_specific_insn {
  43. int dummy;
  44. };
  45. #endif /* CONFIG_KPROBES */
  46. struct kprobe;
  47. struct pt_regs;
  48. struct kretprobe;
  49. struct kretprobe_instance;
  50. typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *);
  51. typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *,
  52. unsigned long flags);
  53. typedef int (*kretprobe_handler_t) (struct kretprobe_instance *,
  54. struct pt_regs *);
  55. struct kprobe {
  56. struct hlist_node hlist;
  57. /* list of kprobes for multi-handler support */
  58. struct list_head list;
  59. /*count the number of times this probe was temporarily disarmed */
  60. unsigned long nmissed;
  61. /* location of the probe point */
  62. kprobe_opcode_t *addr;
  63. /* Allow user to indicate symbol name of the probe point */
  64. const char *symbol_name;
  65. /* Offset into the symbol */
  66. unsigned int offset;
  67. /* Called before addr is executed. */
  68. kprobe_pre_handler_t pre_handler;
  69. /* Called after addr is executed, unless... */
  70. kprobe_post_handler_t post_handler;
  71. /* Saved opcode (which has been replaced with breakpoint) */
  72. kprobe_opcode_t opcode;
  73. /* copy of the original instruction */
  74. struct arch_specific_insn ainsn;
  75. /*
  76. * Indicates various status flags.
  77. * Protected by kprobe_mutex after this kprobe is registered.
  78. */
  79. u32 flags;
  80. };
  81. /* Kprobe status flags */
  82. #define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */
  83. #define KPROBE_FLAG_DISABLED 2 /* probe is temporarily disabled */
  84. #define KPROBE_FLAG_OPTIMIZED 4 /*
  85. * probe is really optimized.
  86. * NOTE:
  87. * this flag is only for optimized_kprobe.
  88. */
  89. #define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */
  90. #define KPROBE_FLAG_ON_FUNC_ENTRY 16 /* probe is on the function entry */
  91. /* Has this kprobe gone ? */
  92. static inline bool kprobe_gone(struct kprobe *p)
  93. {
  94. return p->flags & KPROBE_FLAG_GONE;
  95. }
  96. /* Is this kprobe disabled ? */
  97. static inline bool kprobe_disabled(struct kprobe *p)
  98. {
  99. return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE);
  100. }
  101. /* Is this kprobe really running optimized path ? */
  102. static inline bool kprobe_optimized(struct kprobe *p)
  103. {
  104. return p->flags & KPROBE_FLAG_OPTIMIZED;
  105. }
  106. /* Is this kprobe uses ftrace ? */
  107. static inline bool kprobe_ftrace(struct kprobe *p)
  108. {
  109. return p->flags & KPROBE_FLAG_FTRACE;
  110. }
  111. /*
  112. * Function-return probe -
  113. * Note:
  114. * User needs to provide a handler function, and initialize maxactive.
  115. * maxactive - The maximum number of instances of the probed function that
  116. * can be active concurrently.
  117. * nmissed - tracks the number of times the probed function's return was
  118. * ignored, due to maxactive being too low.
  119. *
  120. */
  121. struct kretprobe_holder {
  122. struct kretprobe __rcu *rp;
  123. refcount_t ref;
  124. };
  125. struct kretprobe {
  126. struct kprobe kp;
  127. kretprobe_handler_t handler;
  128. kretprobe_handler_t entry_handler;
  129. int maxactive;
  130. int nmissed;
  131. size_t data_size;
  132. #ifdef CONFIG_KRETPROBE_ON_RETHOOK
  133. struct rethook *rh;
  134. #else
  135. struct freelist_head freelist;
  136. struct kretprobe_holder *rph;
  137. #endif
  138. };
  139. #define KRETPROBE_MAX_DATA_SIZE 4096
  140. struct kretprobe_instance {
  141. #ifdef CONFIG_KRETPROBE_ON_RETHOOK
  142. struct rethook_node node;
  143. #else
  144. union {
  145. struct freelist_node freelist;
  146. struct rcu_head rcu;
  147. };
  148. struct llist_node llist;
  149. struct kretprobe_holder *rph;
  150. kprobe_opcode_t *ret_addr;
  151. void *fp;
  152. #endif
  153. char data[];
  154. };
  155. struct kretprobe_blackpoint {
  156. const char *name;
  157. void *addr;
  158. };
  159. struct kprobe_blacklist_entry {
  160. struct list_head list;
  161. unsigned long start_addr;
  162. unsigned long end_addr;
  163. };
  164. #ifdef CONFIG_KPROBES
  165. DECLARE_PER_CPU(struct kprobe *, current_kprobe);
  166. DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  167. extern void kprobe_busy_begin(void);
  168. extern void kprobe_busy_end(void);
  169. #ifdef CONFIG_KRETPROBES
  170. /* Check whether @p is used for implementing a trampoline. */
  171. extern int arch_trampoline_kprobe(struct kprobe *p);
  172. #ifdef CONFIG_KRETPROBE_ON_RETHOOK
  173. static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
  174. {
  175. /* rethook::data is non-changed field, so that you can access it freely. */
  176. return (struct kretprobe *)ri->node.rethook->data;
  177. }
  178. static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
  179. {
  180. return ri->node.ret_addr;
  181. }
  182. #else
  183. extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
  184. struct pt_regs *regs);
  185. void arch_kretprobe_fixup_return(struct pt_regs *regs,
  186. kprobe_opcode_t *correct_ret_addr);
  187. void __kretprobe_trampoline(void);
  188. /*
  189. * Since some architecture uses structured function pointer,
  190. * use dereference_function_descriptor() to get real function address.
  191. */
  192. static nokprobe_inline void *kretprobe_trampoline_addr(void)
  193. {
  194. return dereference_kernel_function_descriptor(__kretprobe_trampoline);
  195. }
  196. /* If the trampoline handler called from a kprobe, use this version */
  197. unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
  198. void *frame_pointer);
  199. static nokprobe_inline
  200. unsigned long kretprobe_trampoline_handler(struct pt_regs *regs,
  201. void *frame_pointer)
  202. {
  203. unsigned long ret;
  204. /*
  205. * Set a dummy kprobe for avoiding kretprobe recursion.
  206. * Since kretprobe never runs in kprobe handler, no kprobe must
  207. * be running at this point.
  208. */
  209. kprobe_busy_begin();
  210. ret = __kretprobe_trampoline_handler(regs, frame_pointer);
  211. kprobe_busy_end();
  212. return ret;
  213. }
  214. static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
  215. {
  216. return rcu_dereference_check(ri->rph->rp, rcu_read_lock_any_held());
  217. }
  218. static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
  219. {
  220. return (unsigned long)ri->ret_addr;
  221. }
  222. #endif /* CONFIG_KRETPROBE_ON_RETHOOK */
  223. #else /* !CONFIG_KRETPROBES */
  224. static inline void arch_prepare_kretprobe(struct kretprobe *rp,
  225. struct pt_regs *regs)
  226. {
  227. }
  228. static inline int arch_trampoline_kprobe(struct kprobe *p)
  229. {
  230. return 0;
  231. }
  232. #endif /* CONFIG_KRETPROBES */
  233. /* Markers of '_kprobe_blacklist' section */
  234. extern unsigned long __start_kprobe_blacklist[];
  235. extern unsigned long __stop_kprobe_blacklist[];
  236. extern struct kretprobe_blackpoint kretprobe_blacklist[];
  237. #ifdef CONFIG_KPROBES_SANITY_TEST
  238. extern int init_test_probes(void);
  239. #else /* !CONFIG_KPROBES_SANITY_TEST */
  240. static inline int init_test_probes(void)
  241. {
  242. return 0;
  243. }
  244. #endif /* CONFIG_KPROBES_SANITY_TEST */
  245. extern int arch_prepare_kprobe(struct kprobe *p);
  246. extern void arch_arm_kprobe(struct kprobe *p);
  247. extern void arch_disarm_kprobe(struct kprobe *p);
  248. extern int arch_init_kprobes(void);
  249. extern void kprobes_inc_nmissed_count(struct kprobe *p);
  250. extern bool arch_within_kprobe_blacklist(unsigned long addr);
  251. extern int arch_populate_kprobe_blacklist(void);
  252. extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
  253. extern bool within_kprobe_blacklist(unsigned long addr);
  254. extern int kprobe_add_ksym_blacklist(unsigned long entry);
  255. extern int kprobe_add_area_blacklist(unsigned long start, unsigned long end);
  256. struct kprobe_insn_cache {
  257. struct mutex mutex;
  258. void *(*alloc)(void); /* allocate insn page */
  259. void (*free)(void *); /* free insn page */
  260. const char *sym; /* symbol for insn pages */
  261. struct list_head pages; /* list of kprobe_insn_page */
  262. size_t insn_size; /* size of instruction slot */
  263. int nr_garbage;
  264. };
  265. #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
  266. extern kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c);
  267. extern void __free_insn_slot(struct kprobe_insn_cache *c,
  268. kprobe_opcode_t *slot, int dirty);
  269. /* sleep-less address checking routine */
  270. extern bool __is_insn_slot_addr(struct kprobe_insn_cache *c,
  271. unsigned long addr);
  272. #define DEFINE_INSN_CACHE_OPS(__name) \
  273. extern struct kprobe_insn_cache kprobe_##__name##_slots; \
  274. \
  275. static inline kprobe_opcode_t *get_##__name##_slot(void) \
  276. { \
  277. return __get_insn_slot(&kprobe_##__name##_slots); \
  278. } \
  279. \
  280. static inline void free_##__name##_slot(kprobe_opcode_t *slot, int dirty)\
  281. { \
  282. __free_insn_slot(&kprobe_##__name##_slots, slot, dirty); \
  283. } \
  284. \
  285. static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
  286. { \
  287. return __is_insn_slot_addr(&kprobe_##__name##_slots, addr); \
  288. }
  289. #define KPROBE_INSN_PAGE_SYM "kprobe_insn_page"
  290. #define KPROBE_OPTINSN_PAGE_SYM "kprobe_optinsn_page"
  291. int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
  292. unsigned long *value, char *type, char *sym);
  293. #else /* !__ARCH_WANT_KPROBES_INSN_SLOT */
  294. #define DEFINE_INSN_CACHE_OPS(__name) \
  295. static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
  296. { \
  297. return 0; \
  298. }
  299. #endif
  300. DEFINE_INSN_CACHE_OPS(insn);
  301. #ifdef CONFIG_OPTPROBES
  302. /*
  303. * Internal structure for direct jump optimized probe
  304. */
  305. struct optimized_kprobe {
  306. struct kprobe kp;
  307. struct list_head list; /* list for optimizing queue */
  308. struct arch_optimized_insn optinsn;
  309. };
  310. /* Architecture dependent functions for direct jump optimization */
  311. extern int arch_prepared_optinsn(struct arch_optimized_insn *optinsn);
  312. extern int arch_check_optimized_kprobe(struct optimized_kprobe *op);
  313. extern int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
  314. struct kprobe *orig);
  315. extern void arch_remove_optimized_kprobe(struct optimized_kprobe *op);
  316. extern void arch_optimize_kprobes(struct list_head *oplist);
  317. extern void arch_unoptimize_kprobes(struct list_head *oplist,
  318. struct list_head *done_list);
  319. extern void arch_unoptimize_kprobe(struct optimized_kprobe *op);
  320. extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
  321. kprobe_opcode_t *addr);
  322. extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
  323. DEFINE_INSN_CACHE_OPS(optinsn);
  324. extern void wait_for_kprobe_optimizer(void);
  325. bool optprobe_queued_unopt(struct optimized_kprobe *op);
  326. bool kprobe_disarmed(struct kprobe *p);
  327. #else /* !CONFIG_OPTPROBES */
  328. static inline void wait_for_kprobe_optimizer(void) { }
  329. #endif /* CONFIG_OPTPROBES */
  330. #ifdef CONFIG_KPROBES_ON_FTRACE
  331. extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
  332. struct ftrace_ops *ops, struct ftrace_regs *fregs);
  333. extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
  334. #else
  335. static inline int arch_prepare_kprobe_ftrace(struct kprobe *p)
  336. {
  337. return -EINVAL;
  338. }
  339. #endif /* CONFIG_KPROBES_ON_FTRACE */
  340. /* Get the kprobe at this addr (if any) - called with preemption disabled */
  341. struct kprobe *get_kprobe(void *addr);
  342. /* kprobe_running() will just return the current_kprobe on this CPU */
  343. static inline struct kprobe *kprobe_running(void)
  344. {
  345. return __this_cpu_read(current_kprobe);
  346. }
  347. static inline void reset_current_kprobe(void)
  348. {
  349. __this_cpu_write(current_kprobe, NULL);
  350. }
  351. static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
  352. {
  353. return this_cpu_ptr(&kprobe_ctlblk);
  354. }
  355. kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset);
  356. kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset, bool *on_func_entry);
  357. int register_kprobe(struct kprobe *p);
  358. void unregister_kprobe(struct kprobe *p);
  359. int register_kprobes(struct kprobe **kps, int num);
  360. void unregister_kprobes(struct kprobe **kps, int num);
  361. int register_kretprobe(struct kretprobe *rp);
  362. void unregister_kretprobe(struct kretprobe *rp);
  363. int register_kretprobes(struct kretprobe **rps, int num);
  364. void unregister_kretprobes(struct kretprobe **rps, int num);
  365. #if defined(CONFIG_KRETPROBE_ON_RETHOOK) || !defined(CONFIG_KRETPROBES)
  366. #define kprobe_flush_task(tk) do {} while (0)
  367. #else
  368. void kprobe_flush_task(struct task_struct *tk);
  369. #endif
  370. void kprobe_free_init_mem(void);
  371. int disable_kprobe(struct kprobe *kp);
  372. int enable_kprobe(struct kprobe *kp);
  373. void dump_kprobe(struct kprobe *kp);
  374. void *alloc_insn_page(void);
  375. void *alloc_optinsn_page(void);
  376. void free_optinsn_page(void *page);
  377. int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
  378. char *sym);
  379. int arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
  380. char *type, char *sym);
  381. #else /* !CONFIG_KPROBES: */
  382. static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
  383. {
  384. return 0;
  385. }
  386. static inline struct kprobe *get_kprobe(void *addr)
  387. {
  388. return NULL;
  389. }
  390. static inline struct kprobe *kprobe_running(void)
  391. {
  392. return NULL;
  393. }
  394. #define kprobe_busy_begin() do {} while (0)
  395. #define kprobe_busy_end() do {} while (0)
  396. static inline int register_kprobe(struct kprobe *p)
  397. {
  398. return -EOPNOTSUPP;
  399. }
  400. static inline int register_kprobes(struct kprobe **kps, int num)
  401. {
  402. return -EOPNOTSUPP;
  403. }
  404. static inline void unregister_kprobe(struct kprobe *p)
  405. {
  406. }
  407. static inline void unregister_kprobes(struct kprobe **kps, int num)
  408. {
  409. }
  410. static inline int register_kretprobe(struct kretprobe *rp)
  411. {
  412. return -EOPNOTSUPP;
  413. }
  414. static inline int register_kretprobes(struct kretprobe **rps, int num)
  415. {
  416. return -EOPNOTSUPP;
  417. }
  418. static inline void unregister_kretprobe(struct kretprobe *rp)
  419. {
  420. }
  421. static inline void unregister_kretprobes(struct kretprobe **rps, int num)
  422. {
  423. }
  424. static inline void kprobe_flush_task(struct task_struct *tk)
  425. {
  426. }
  427. static inline void kprobe_free_init_mem(void)
  428. {
  429. }
  430. static inline int disable_kprobe(struct kprobe *kp)
  431. {
  432. return -EOPNOTSUPP;
  433. }
  434. static inline int enable_kprobe(struct kprobe *kp)
  435. {
  436. return -EOPNOTSUPP;
  437. }
  438. static inline bool within_kprobe_blacklist(unsigned long addr)
  439. {
  440. return true;
  441. }
  442. static inline int kprobe_get_kallsym(unsigned int symnum, unsigned long *value,
  443. char *type, char *sym)
  444. {
  445. return -ERANGE;
  446. }
  447. #endif /* CONFIG_KPROBES */
  448. static inline int disable_kretprobe(struct kretprobe *rp)
  449. {
  450. return disable_kprobe(&rp->kp);
  451. }
  452. static inline int enable_kretprobe(struct kretprobe *rp)
  453. {
  454. return enable_kprobe(&rp->kp);
  455. }
  456. #ifndef CONFIG_KPROBES
  457. static inline bool is_kprobe_insn_slot(unsigned long addr)
  458. {
  459. return false;
  460. }
  461. #endif /* !CONFIG_KPROBES */
  462. #ifndef CONFIG_OPTPROBES
  463. static inline bool is_kprobe_optinsn_slot(unsigned long addr)
  464. {
  465. return false;
  466. }
  467. #endif /* !CONFIG_OPTPROBES */
  468. #ifdef CONFIG_KRETPROBES
  469. #ifdef CONFIG_KRETPROBE_ON_RETHOOK
  470. static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
  471. {
  472. return is_rethook_trampoline(addr);
  473. }
  474. static nokprobe_inline
  475. unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
  476. struct llist_node **cur)
  477. {
  478. return rethook_find_ret_addr(tsk, (unsigned long)fp, cur);
  479. }
  480. #else
  481. static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
  482. {
  483. return (void *)addr == kretprobe_trampoline_addr();
  484. }
  485. unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
  486. struct llist_node **cur);
  487. #endif
  488. #else
  489. static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
  490. {
  491. return false;
  492. }
  493. static nokprobe_inline
  494. unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
  495. struct llist_node **cur)
  496. {
  497. return 0;
  498. }
  499. #endif
  500. /* Returns true if kprobes handled the fault */
  501. static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs,
  502. unsigned int trap)
  503. {
  504. if (!IS_ENABLED(CONFIG_KPROBES))
  505. return false;
  506. if (user_mode(regs))
  507. return false;
  508. /*
  509. * To be potentially processing a kprobe fault and to be allowed
  510. * to call kprobe_running(), we have to be non-preemptible.
  511. */
  512. if (preemptible())
  513. return false;
  514. if (!kprobe_running())
  515. return false;
  516. return kprobe_fault_handler(regs, trap);
  517. }
  518. #endif /* _LINUX_KPROBES_H */