patch.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * patch.c - livepatch patching functions
  4. *
  5. * Copyright (C) 2014 Seth Jennings <[email protected]>
  6. * Copyright (C) 2014 SUSE
  7. * Copyright (C) 2015 Josh Poimboeuf <[email protected]>
  8. */
  9. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10. #include <linux/livepatch.h>
  11. #include <linux/list.h>
  12. #include <linux/ftrace.h>
  13. #include <linux/rculist.h>
  14. #include <linux/slab.h>
  15. #include <linux/bug.h>
  16. #include <linux/printk.h>
  17. #include "core.h"
  18. #include "patch.h"
  19. #include "transition.h"
  20. static LIST_HEAD(klp_ops);
  21. struct klp_ops *klp_find_ops(void *old_func)
  22. {
  23. struct klp_ops *ops;
  24. struct klp_func *func;
  25. list_for_each_entry(ops, &klp_ops, node) {
  26. func = list_first_entry(&ops->func_stack, struct klp_func,
  27. stack_node);
  28. if (func->old_func == old_func)
  29. return ops;
  30. }
  31. return NULL;
  32. }
  33. static void notrace klp_ftrace_handler(unsigned long ip,
  34. unsigned long parent_ip,
  35. struct ftrace_ops *fops,
  36. struct ftrace_regs *fregs)
  37. {
  38. struct klp_ops *ops;
  39. struct klp_func *func;
  40. int patch_state;
  41. int bit;
  42. ops = container_of(fops, struct klp_ops, fops);
  43. /*
  44. * The ftrace_test_recursion_trylock() will disable preemption,
  45. * which is required for the variant of synchronize_rcu() that is
  46. * used to allow patching functions where RCU is not watching.
  47. * See klp_synchronize_transition() for more details.
  48. */
  49. bit = ftrace_test_recursion_trylock(ip, parent_ip);
  50. if (WARN_ON_ONCE(bit < 0))
  51. return;
  52. func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
  53. stack_node);
  54. /*
  55. * func should never be NULL because preemption should be disabled here
  56. * and unregister_ftrace_function() does the equivalent of a
  57. * synchronize_rcu() before the func_stack removal.
  58. */
  59. if (WARN_ON_ONCE(!func))
  60. goto unlock;
  61. /*
  62. * In the enable path, enforce the order of the ops->func_stack and
  63. * func->transition reads. The corresponding write barrier is in
  64. * __klp_enable_patch().
  65. *
  66. * (Note that this barrier technically isn't needed in the disable
  67. * path. In the rare case where klp_update_patch_state() runs before
  68. * this handler, its TIF_PATCH_PENDING read and this func->transition
  69. * read need to be ordered. But klp_update_patch_state() already
  70. * enforces that.)
  71. */
  72. smp_rmb();
  73. if (unlikely(func->transition)) {
  74. /*
  75. * Enforce the order of the func->transition and
  76. * current->patch_state reads. Otherwise we could read an
  77. * out-of-date task state and pick the wrong function. The
  78. * corresponding write barrier is in klp_init_transition().
  79. */
  80. smp_rmb();
  81. patch_state = current->patch_state;
  82. WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
  83. if (patch_state == KLP_UNPATCHED) {
  84. /*
  85. * Use the previously patched version of the function.
  86. * If no previous patches exist, continue with the
  87. * original function.
  88. */
  89. func = list_entry_rcu(func->stack_node.next,
  90. struct klp_func, stack_node);
  91. if (&func->stack_node == &ops->func_stack)
  92. goto unlock;
  93. }
  94. }
  95. /*
  96. * NOPs are used to replace existing patches with original code.
  97. * Do nothing! Setting pc would cause an infinite loop.
  98. */
  99. if (func->nop)
  100. goto unlock;
  101. ftrace_instruction_pointer_set(fregs, (unsigned long)func->new_func);
  102. unlock:
  103. ftrace_test_recursion_unlock(bit);
  104. }
  105. static void klp_unpatch_func(struct klp_func *func)
  106. {
  107. struct klp_ops *ops;
  108. if (WARN_ON(!func->patched))
  109. return;
  110. if (WARN_ON(!func->old_func))
  111. return;
  112. ops = klp_find_ops(func->old_func);
  113. if (WARN_ON(!ops))
  114. return;
  115. if (list_is_singular(&ops->func_stack)) {
  116. unsigned long ftrace_loc;
  117. ftrace_loc = ftrace_location((unsigned long)func->old_func);
  118. if (WARN_ON(!ftrace_loc))
  119. return;
  120. WARN_ON(unregister_ftrace_function(&ops->fops));
  121. WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
  122. list_del_rcu(&func->stack_node);
  123. list_del(&ops->node);
  124. kfree(ops);
  125. } else {
  126. list_del_rcu(&func->stack_node);
  127. }
  128. func->patched = false;
  129. }
  130. static int klp_patch_func(struct klp_func *func)
  131. {
  132. struct klp_ops *ops;
  133. int ret;
  134. if (WARN_ON(!func->old_func))
  135. return -EINVAL;
  136. if (WARN_ON(func->patched))
  137. return -EINVAL;
  138. ops = klp_find_ops(func->old_func);
  139. if (!ops) {
  140. unsigned long ftrace_loc;
  141. ftrace_loc = ftrace_location((unsigned long)func->old_func);
  142. if (!ftrace_loc) {
  143. pr_err("failed to find location for function '%s'\n",
  144. func->old_name);
  145. return -EINVAL;
  146. }
  147. ops = kzalloc(sizeof(*ops), GFP_KERNEL);
  148. if (!ops)
  149. return -ENOMEM;
  150. ops->fops.func = klp_ftrace_handler;
  151. ops->fops.flags = FTRACE_OPS_FL_DYNAMIC |
  152. #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
  153. FTRACE_OPS_FL_SAVE_REGS |
  154. #endif
  155. FTRACE_OPS_FL_IPMODIFY |
  156. FTRACE_OPS_FL_PERMANENT;
  157. list_add(&ops->node, &klp_ops);
  158. INIT_LIST_HEAD(&ops->func_stack);
  159. list_add_rcu(&func->stack_node, &ops->func_stack);
  160. ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
  161. if (ret) {
  162. pr_err("failed to set ftrace filter for function '%s' (%d)\n",
  163. func->old_name, ret);
  164. goto err;
  165. }
  166. ret = register_ftrace_function(&ops->fops);
  167. if (ret) {
  168. pr_err("failed to register ftrace handler for function '%s' (%d)\n",
  169. func->old_name, ret);
  170. ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
  171. goto err;
  172. }
  173. } else {
  174. list_add_rcu(&func->stack_node, &ops->func_stack);
  175. }
  176. func->patched = true;
  177. return 0;
  178. err:
  179. list_del_rcu(&func->stack_node);
  180. list_del(&ops->node);
  181. kfree(ops);
  182. return ret;
  183. }
  184. static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
  185. {
  186. struct klp_func *func;
  187. klp_for_each_func(obj, func) {
  188. if (nops_only && !func->nop)
  189. continue;
  190. if (func->patched)
  191. klp_unpatch_func(func);
  192. }
  193. if (obj->dynamic || !nops_only)
  194. obj->patched = false;
  195. }
  196. void klp_unpatch_object(struct klp_object *obj)
  197. {
  198. __klp_unpatch_object(obj, false);
  199. }
  200. int klp_patch_object(struct klp_object *obj)
  201. {
  202. struct klp_func *func;
  203. int ret;
  204. if (WARN_ON(obj->patched))
  205. return -EINVAL;
  206. klp_for_each_func(obj, func) {
  207. ret = klp_patch_func(func);
  208. if (ret) {
  209. klp_unpatch_object(obj);
  210. return ret;
  211. }
  212. }
  213. obj->patched = true;
  214. return 0;
  215. }
  216. static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only)
  217. {
  218. struct klp_object *obj;
  219. klp_for_each_object(patch, obj)
  220. if (obj->patched)
  221. __klp_unpatch_object(obj, nops_only);
  222. }
  223. void klp_unpatch_objects(struct klp_patch *patch)
  224. {
  225. __klp_unpatch_objects(patch, false);
  226. }
  227. void klp_unpatch_objects_dynamic(struct klp_patch *patch)
  228. {
  229. __klp_unpatch_objects(patch, true);
  230. }