error-inject.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. // SPDX-License-Identifier: GPL-2.0
  2. // error-inject.c: Function-level error injection table
  3. #include <linux/error-injection.h>
  4. #include <linux/debugfs.h>
  5. #include <linux/kallsyms.h>
  6. #include <linux/kprobes.h>
  7. #include <linux/module.h>
  8. #include <linux/mutex.h>
  9. #include <linux/list.h>
  10. #include <linux/slab.h>
  11. #include <asm/sections.h>
  12. /* Whitelist of symbols that can be overridden for error injection. */
  13. static LIST_HEAD(error_injection_list);
  14. static DEFINE_MUTEX(ei_mutex);
  15. struct ei_entry {
  16. struct list_head list;
  17. unsigned long start_addr;
  18. unsigned long end_addr;
  19. int etype;
  20. void *priv;
  21. };
  22. bool within_error_injection_list(unsigned long addr)
  23. {
  24. struct ei_entry *ent;
  25. bool ret = false;
  26. mutex_lock(&ei_mutex);
  27. list_for_each_entry(ent, &error_injection_list, list) {
  28. if (addr >= ent->start_addr && addr < ent->end_addr) {
  29. ret = true;
  30. break;
  31. }
  32. }
  33. mutex_unlock(&ei_mutex);
  34. return ret;
  35. }
  36. int get_injectable_error_type(unsigned long addr)
  37. {
  38. struct ei_entry *ent;
  39. int ei_type = EI_ETYPE_NONE;
  40. mutex_lock(&ei_mutex);
  41. list_for_each_entry(ent, &error_injection_list, list) {
  42. if (addr >= ent->start_addr && addr < ent->end_addr) {
  43. ei_type = ent->etype;
  44. break;
  45. }
  46. }
  47. mutex_unlock(&ei_mutex);
  48. return ei_type;
  49. }
  50. /*
  51. * Lookup and populate the error_injection_list.
  52. *
  53. * For safety reasons we only allow certain functions to be overridden with
  54. * bpf_error_injection, so we need to populate the list of the symbols that have
  55. * been marked as safe for overriding.
  56. */
  57. static void populate_error_injection_list(struct error_injection_entry *start,
  58. struct error_injection_entry *end,
  59. void *priv)
  60. {
  61. struct error_injection_entry *iter;
  62. struct ei_entry *ent;
  63. unsigned long entry, offset = 0, size = 0;
  64. mutex_lock(&ei_mutex);
  65. for (iter = start; iter < end; iter++) {
  66. entry = (unsigned long)dereference_symbol_descriptor((void *)iter->addr);
  67. if (!kernel_text_address(entry) ||
  68. !kallsyms_lookup_size_offset(entry, &size, &offset)) {
  69. pr_err("Failed to find error inject entry at %p\n",
  70. (void *)entry);
  71. continue;
  72. }
  73. ent = kmalloc(sizeof(*ent), GFP_KERNEL);
  74. if (!ent)
  75. break;
  76. ent->start_addr = entry;
  77. ent->end_addr = entry + size;
  78. ent->etype = iter->etype;
  79. ent->priv = priv;
  80. INIT_LIST_HEAD(&ent->list);
  81. list_add_tail(&ent->list, &error_injection_list);
  82. }
  83. mutex_unlock(&ei_mutex);
  84. }
  85. /* Markers of the _error_inject_whitelist section */
  86. extern struct error_injection_entry __start_error_injection_whitelist[];
  87. extern struct error_injection_entry __stop_error_injection_whitelist[];
  88. static void __init populate_kernel_ei_list(void)
  89. {
  90. populate_error_injection_list(__start_error_injection_whitelist,
  91. __stop_error_injection_whitelist,
  92. NULL);
  93. }
  94. #ifdef CONFIG_MODULES
  95. static void module_load_ei_list(struct module *mod)
  96. {
  97. if (!mod->num_ei_funcs)
  98. return;
  99. populate_error_injection_list(mod->ei_funcs,
  100. mod->ei_funcs + mod->num_ei_funcs, mod);
  101. }
  102. static void module_unload_ei_list(struct module *mod)
  103. {
  104. struct ei_entry *ent, *n;
  105. if (!mod->num_ei_funcs)
  106. return;
  107. mutex_lock(&ei_mutex);
  108. list_for_each_entry_safe(ent, n, &error_injection_list, list) {
  109. if (ent->priv == mod) {
  110. list_del_init(&ent->list);
  111. kfree(ent);
  112. }
  113. }
  114. mutex_unlock(&ei_mutex);
  115. }
  116. /* Module notifier call back, checking error injection table on the module */
  117. static int ei_module_callback(struct notifier_block *nb,
  118. unsigned long val, void *data)
  119. {
  120. struct module *mod = data;
  121. if (val == MODULE_STATE_COMING)
  122. module_load_ei_list(mod);
  123. else if (val == MODULE_STATE_GOING)
  124. module_unload_ei_list(mod);
  125. return NOTIFY_DONE;
  126. }
  127. static struct notifier_block ei_module_nb = {
  128. .notifier_call = ei_module_callback,
  129. .priority = 0
  130. };
  131. static __init int module_ei_init(void)
  132. {
  133. return register_module_notifier(&ei_module_nb);
  134. }
  135. #else /* !CONFIG_MODULES */
  136. #define module_ei_init() (0)
  137. #endif
  138. /*
  139. * error_injection/whitelist -- shows which functions can be overridden for
  140. * error injection.
  141. */
  142. static void *ei_seq_start(struct seq_file *m, loff_t *pos)
  143. {
  144. mutex_lock(&ei_mutex);
  145. return seq_list_start(&error_injection_list, *pos);
  146. }
  147. static void ei_seq_stop(struct seq_file *m, void *v)
  148. {
  149. mutex_unlock(&ei_mutex);
  150. }
  151. static void *ei_seq_next(struct seq_file *m, void *v, loff_t *pos)
  152. {
  153. return seq_list_next(v, &error_injection_list, pos);
  154. }
  155. static const char *error_type_string(int etype)
  156. {
  157. switch (etype) {
  158. case EI_ETYPE_NULL:
  159. return "NULL";
  160. case EI_ETYPE_ERRNO:
  161. return "ERRNO";
  162. case EI_ETYPE_ERRNO_NULL:
  163. return "ERRNO_NULL";
  164. case EI_ETYPE_TRUE:
  165. return "TRUE";
  166. default:
  167. return "(unknown)";
  168. }
  169. }
  170. static int ei_seq_show(struct seq_file *m, void *v)
  171. {
  172. struct ei_entry *ent = list_entry(v, struct ei_entry, list);
  173. seq_printf(m, "%ps\t%s\n", (void *)ent->start_addr,
  174. error_type_string(ent->etype));
  175. return 0;
  176. }
  177. static const struct seq_operations ei_sops = {
  178. .start = ei_seq_start,
  179. .next = ei_seq_next,
  180. .stop = ei_seq_stop,
  181. .show = ei_seq_show,
  182. };
  183. DEFINE_SEQ_ATTRIBUTE(ei);
  184. static int __init ei_debugfs_init(void)
  185. {
  186. struct dentry *dir, *file;
  187. dir = debugfs_create_dir("error_injection", NULL);
  188. if (!dir)
  189. return -ENOMEM;
  190. file = debugfs_create_file("list", 0444, dir, NULL, &ei_fops);
  191. if (!file) {
  192. debugfs_remove(dir);
  193. return -ENOMEM;
  194. }
  195. return 0;
  196. }
  197. static int __init init_error_injection(void)
  198. {
  199. populate_kernel_ei_list();
  200. if (!module_ei_init())
  201. ei_debugfs_init();
  202. return 0;
  203. }
  204. late_initcall(init_error_injection);