module.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* Kernel module help for x86.
  3. Copyright (C) 2001 Rusty Russell.
  4. */
  5. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  6. #include <linux/moduleloader.h>
  7. #include <linux/elf.h>
  8. #include <linux/vmalloc.h>
  9. #include <linux/fs.h>
  10. #include <linux/string.h>
  11. #include <linux/kernel.h>
  12. #include <linux/kasan.h>
  13. #include <linux/bug.h>
  14. #include <linux/mm.h>
  15. #include <linux/gfp.h>
  16. #include <linux/jump_label.h>
  17. #include <linux/random.h>
  18. #include <linux/memory.h>
  19. #include <asm/text-patching.h>
  20. #include <asm/page.h>
  21. #include <asm/setup.h>
  22. #include <asm/unwind.h>
  23. #if 0
  24. #define DEBUGP(fmt, ...) \
  25. printk(KERN_DEBUG fmt, ##__VA_ARGS__)
  26. #else
  27. #define DEBUGP(fmt, ...) \
  28. do { \
  29. if (0) \
  30. printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
  31. } while (0)
  32. #endif
  33. #ifdef CONFIG_RANDOMIZE_BASE
  34. static unsigned long module_load_offset;
  35. /* Mutex protects the module_load_offset. */
  36. static DEFINE_MUTEX(module_kaslr_mutex);
  37. static unsigned long int get_module_load_offset(void)
  38. {
  39. if (kaslr_enabled()) {
  40. mutex_lock(&module_kaslr_mutex);
  41. /*
  42. * Calculate the module_load_offset the first time this
  43. * code is called. Once calculated it stays the same until
  44. * reboot.
  45. */
  46. if (module_load_offset == 0)
  47. module_load_offset =
  48. (prandom_u32_max(1024) + 1) * PAGE_SIZE;
  49. mutex_unlock(&module_kaslr_mutex);
  50. }
  51. return module_load_offset;
  52. }
  53. #else
  54. static unsigned long int get_module_load_offset(void)
  55. {
  56. return 0;
  57. }
  58. #endif
  59. void *module_alloc(unsigned long size)
  60. {
  61. gfp_t gfp_mask = GFP_KERNEL;
  62. void *p;
  63. if (PAGE_ALIGN(size) > MODULES_LEN)
  64. return NULL;
  65. p = __vmalloc_node_range(size, MODULE_ALIGN,
  66. MODULES_VADDR + get_module_load_offset(),
  67. MODULES_END, gfp_mask,
  68. PAGE_KERNEL, VM_DEFER_KMEMLEAK, NUMA_NO_NODE,
  69. __builtin_return_address(0));
  70. if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
  71. vfree(p);
  72. return NULL;
  73. }
  74. return p;
  75. }
  76. #ifdef CONFIG_X86_32
  77. int apply_relocate(Elf32_Shdr *sechdrs,
  78. const char *strtab,
  79. unsigned int symindex,
  80. unsigned int relsec,
  81. struct module *me)
  82. {
  83. unsigned int i;
  84. Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
  85. Elf32_Sym *sym;
  86. uint32_t *location;
  87. DEBUGP("Applying relocate section %u to %u\n",
  88. relsec, sechdrs[relsec].sh_info);
  89. for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
  90. /* This is where to make the change */
  91. location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
  92. + rel[i].r_offset;
  93. /* This is the symbol it is referring to. Note that all
  94. undefined symbols have been resolved. */
  95. sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
  96. + ELF32_R_SYM(rel[i].r_info);
  97. switch (ELF32_R_TYPE(rel[i].r_info)) {
  98. case R_386_32:
  99. /* We add the value into the location given */
  100. *location += sym->st_value;
  101. break;
  102. case R_386_PC32:
  103. case R_386_PLT32:
  104. /* Add the value, subtract its position */
  105. *location += sym->st_value - (uint32_t)location;
  106. break;
  107. default:
  108. pr_err("%s: Unknown relocation: %u\n",
  109. me->name, ELF32_R_TYPE(rel[i].r_info));
  110. return -ENOEXEC;
  111. }
  112. }
  113. return 0;
  114. }
  115. #else /*X86_64*/
  116. static int __apply_relocate_add(Elf64_Shdr *sechdrs,
  117. const char *strtab,
  118. unsigned int symindex,
  119. unsigned int relsec,
  120. struct module *me,
  121. void *(*write)(void *dest, const void *src, size_t len))
  122. {
  123. unsigned int i;
  124. Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
  125. Elf64_Sym *sym;
  126. void *loc;
  127. u64 val;
  128. DEBUGP("Applying relocate section %u to %u\n",
  129. relsec, sechdrs[relsec].sh_info);
  130. for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
  131. /* This is where to make the change */
  132. loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
  133. + rel[i].r_offset;
  134. /* This is the symbol it is referring to. Note that all
  135. undefined symbols have been resolved. */
  136. sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
  137. + ELF64_R_SYM(rel[i].r_info);
  138. DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
  139. (int)ELF64_R_TYPE(rel[i].r_info),
  140. sym->st_value, rel[i].r_addend, (u64)loc);
  141. val = sym->st_value + rel[i].r_addend;
  142. switch (ELF64_R_TYPE(rel[i].r_info)) {
  143. case R_X86_64_NONE:
  144. break;
  145. case R_X86_64_64:
  146. if (*(u64 *)loc != 0)
  147. goto invalid_relocation;
  148. write(loc, &val, 8);
  149. break;
  150. case R_X86_64_32:
  151. if (*(u32 *)loc != 0)
  152. goto invalid_relocation;
  153. write(loc, &val, 4);
  154. if (val != *(u32 *)loc)
  155. goto overflow;
  156. break;
  157. case R_X86_64_32S:
  158. if (*(s32 *)loc != 0)
  159. goto invalid_relocation;
  160. write(loc, &val, 4);
  161. if ((s64)val != *(s32 *)loc)
  162. goto overflow;
  163. break;
  164. case R_X86_64_PC32:
  165. case R_X86_64_PLT32:
  166. if (*(u32 *)loc != 0)
  167. goto invalid_relocation;
  168. val -= (u64)loc;
  169. write(loc, &val, 4);
  170. #if 0
  171. if ((s64)val != *(s32 *)loc)
  172. goto overflow;
  173. #endif
  174. break;
  175. case R_X86_64_PC64:
  176. if (*(u64 *)loc != 0)
  177. goto invalid_relocation;
  178. val -= (u64)loc;
  179. write(loc, &val, 8);
  180. break;
  181. default:
  182. pr_err("%s: Unknown rela relocation: %llu\n",
  183. me->name, ELF64_R_TYPE(rel[i].r_info));
  184. return -ENOEXEC;
  185. }
  186. }
  187. return 0;
  188. invalid_relocation:
  189. pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
  190. (int)ELF64_R_TYPE(rel[i].r_info), loc, val);
  191. return -ENOEXEC;
  192. overflow:
  193. pr_err("overflow in relocation type %d val %Lx\n",
  194. (int)ELF64_R_TYPE(rel[i].r_info), val);
  195. pr_err("`%s' likely not compiled with -mcmodel=kernel\n",
  196. me->name);
  197. return -ENOEXEC;
  198. }
  199. int apply_relocate_add(Elf64_Shdr *sechdrs,
  200. const char *strtab,
  201. unsigned int symindex,
  202. unsigned int relsec,
  203. struct module *me)
  204. {
  205. int ret;
  206. bool early = me->state == MODULE_STATE_UNFORMED;
  207. void *(*write)(void *, const void *, size_t) = memcpy;
  208. if (!early) {
  209. write = text_poke;
  210. mutex_lock(&text_mutex);
  211. }
  212. ret = __apply_relocate_add(sechdrs, strtab, symindex, relsec, me,
  213. write);
  214. if (!early) {
  215. text_poke_sync();
  216. mutex_unlock(&text_mutex);
  217. }
  218. return ret;
  219. }
  220. #endif
  221. int module_finalize(const Elf_Ehdr *hdr,
  222. const Elf_Shdr *sechdrs,
  223. struct module *me)
  224. {
  225. const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
  226. *para = NULL, *orc = NULL, *orc_ip = NULL,
  227. *retpolines = NULL, *returns = NULL, *ibt_endbr = NULL;
  228. char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
  229. for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
  230. if (!strcmp(".text", secstrings + s->sh_name))
  231. text = s;
  232. if (!strcmp(".altinstructions", secstrings + s->sh_name))
  233. alt = s;
  234. if (!strcmp(".smp_locks", secstrings + s->sh_name))
  235. locks = s;
  236. if (!strcmp(".parainstructions", secstrings + s->sh_name))
  237. para = s;
  238. if (!strcmp(".orc_unwind", secstrings + s->sh_name))
  239. orc = s;
  240. if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name))
  241. orc_ip = s;
  242. if (!strcmp(".retpoline_sites", secstrings + s->sh_name))
  243. retpolines = s;
  244. if (!strcmp(".return_sites", secstrings + s->sh_name))
  245. returns = s;
  246. if (!strcmp(".ibt_endbr_seal", secstrings + s->sh_name))
  247. ibt_endbr = s;
  248. }
  249. /*
  250. * See alternative_instructions() for the ordering rules between the
  251. * various patching types.
  252. */
  253. if (para) {
  254. void *pseg = (void *)para->sh_addr;
  255. apply_paravirt(pseg, pseg + para->sh_size);
  256. }
  257. if (retpolines) {
  258. void *rseg = (void *)retpolines->sh_addr;
  259. apply_retpolines(rseg, rseg + retpolines->sh_size);
  260. }
  261. if (returns) {
  262. void *rseg = (void *)returns->sh_addr;
  263. apply_returns(rseg, rseg + returns->sh_size);
  264. }
  265. if (alt) {
  266. /* patch .altinstructions */
  267. void *aseg = (void *)alt->sh_addr;
  268. apply_alternatives(aseg, aseg + alt->sh_size);
  269. }
  270. if (ibt_endbr) {
  271. void *iseg = (void *)ibt_endbr->sh_addr;
  272. apply_ibt_endbr(iseg, iseg + ibt_endbr->sh_size);
  273. }
  274. if (locks && text) {
  275. void *lseg = (void *)locks->sh_addr;
  276. void *tseg = (void *)text->sh_addr;
  277. alternatives_smp_module_add(me, me->name,
  278. lseg, lseg + locks->sh_size,
  279. tseg, tseg + text->sh_size);
  280. }
  281. if (orc && orc_ip)
  282. unwind_module_init(me, (void *)orc_ip->sh_addr, orc_ip->sh_size,
  283. (void *)orc->sh_addr, orc->sh_size);
  284. return 0;
  285. }
  286. void module_arch_cleanup(struct module *mod)
  287. {
  288. alternatives_smp_module_del(mod);
  289. }