module.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Kernel module help for s390.
  4. *
  5. * S390 version
  6. * Copyright IBM Corp. 2002, 2003
  7. * Author(s): Arnd Bergmann ([email protected])
  8. * Martin Schwidefsky ([email protected])
  9. *
  10. * based on i386 version
  11. * Copyright (C) 2001 Rusty Russell.
  12. */
  13. #include <linux/module.h>
  14. #include <linux/elf.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/fs.h>
  17. #include <linux/ftrace.h>
  18. #include <linux/string.h>
  19. #include <linux/kernel.h>
  20. #include <linux/kasan.h>
  21. #include <linux/moduleloader.h>
  22. #include <linux/bug.h>
  23. #include <linux/memory.h>
  24. #include <asm/alternative.h>
  25. #include <asm/nospec-branch.h>
  26. #include <asm/facility.h>
  27. #include <asm/ftrace.lds.h>
  28. #include <asm/set_memory.h>
  29. #if 0
  30. #define DEBUGP printk
  31. #else
  32. #define DEBUGP(fmt , ...)
  33. #endif
  34. #define PLT_ENTRY_SIZE 22
  35. void *module_alloc(unsigned long size)
  36. {
  37. gfp_t gfp_mask = GFP_KERNEL;
  38. void *p;
  39. if (PAGE_ALIGN(size) > MODULES_LEN)
  40. return NULL;
  41. p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
  42. gfp_mask, PAGE_KERNEL_EXEC, VM_DEFER_KMEMLEAK, NUMA_NO_NODE,
  43. __builtin_return_address(0));
  44. if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
  45. vfree(p);
  46. return NULL;
  47. }
  48. return p;
  49. }
  50. #ifdef CONFIG_FUNCTION_TRACER
  51. void module_arch_cleanup(struct module *mod)
  52. {
  53. module_memfree(mod->arch.trampolines_start);
  54. }
  55. #endif
  56. void module_arch_freeing_init(struct module *mod)
  57. {
  58. if (is_livepatch_module(mod) &&
  59. mod->state == MODULE_STATE_LIVE)
  60. return;
  61. vfree(mod->arch.syminfo);
  62. mod->arch.syminfo = NULL;
  63. }
  64. static void check_rela(Elf_Rela *rela, struct module *me)
  65. {
  66. struct mod_arch_syminfo *info;
  67. info = me->arch.syminfo + ELF_R_SYM (rela->r_info);
  68. switch (ELF_R_TYPE (rela->r_info)) {
  69. case R_390_GOT12: /* 12 bit GOT offset. */
  70. case R_390_GOT16: /* 16 bit GOT offset. */
  71. case R_390_GOT20: /* 20 bit GOT offset. */
  72. case R_390_GOT32: /* 32 bit GOT offset. */
  73. case R_390_GOT64: /* 64 bit GOT offset. */
  74. case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */
  75. case R_390_GOTPLT12: /* 12 bit offset to jump slot. */
  76. case R_390_GOTPLT16: /* 16 bit offset to jump slot. */
  77. case R_390_GOTPLT20: /* 20 bit offset to jump slot. */
  78. case R_390_GOTPLT32: /* 32 bit offset to jump slot. */
  79. case R_390_GOTPLT64: /* 64 bit offset to jump slot. */
  80. case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */
  81. if (info->got_offset == -1UL) {
  82. info->got_offset = me->arch.got_size;
  83. me->arch.got_size += sizeof(void*);
  84. }
  85. break;
  86. case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */
  87. case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */
  88. case R_390_PLT32: /* 32 bit PC relative PLT address. */
  89. case R_390_PLT64: /* 64 bit PC relative PLT address. */
  90. case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */
  91. case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
  92. case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
  93. if (info->plt_offset == -1UL) {
  94. info->plt_offset = me->arch.plt_size;
  95. me->arch.plt_size += PLT_ENTRY_SIZE;
  96. }
  97. break;
  98. case R_390_COPY:
  99. case R_390_GLOB_DAT:
  100. case R_390_JMP_SLOT:
  101. case R_390_RELATIVE:
  102. /* Only needed if we want to support loading of
  103. modules linked with -shared. */
  104. break;
  105. }
  106. }
  107. /*
  108. * Account for GOT and PLT relocations. We can't add sections for
  109. * got and plt but we can increase the core module size.
  110. */
  111. int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
  112. char *secstrings, struct module *me)
  113. {
  114. Elf_Shdr *symtab;
  115. Elf_Sym *symbols;
  116. Elf_Rela *rela;
  117. char *strings;
  118. int nrela, i, j;
  119. /* Find symbol table and string table. */
  120. symtab = NULL;
  121. for (i = 0; i < hdr->e_shnum; i++)
  122. switch (sechdrs[i].sh_type) {
  123. case SHT_SYMTAB:
  124. symtab = sechdrs + i;
  125. break;
  126. }
  127. if (!symtab) {
  128. printk(KERN_ERR "module %s: no symbol table\n", me->name);
  129. return -ENOEXEC;
  130. }
  131. /* Allocate one syminfo structure per symbol. */
  132. me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym);
  133. me->arch.syminfo = vmalloc(array_size(sizeof(struct mod_arch_syminfo),
  134. me->arch.nsyms));
  135. if (!me->arch.syminfo)
  136. return -ENOMEM;
  137. symbols = (void *) hdr + symtab->sh_offset;
  138. strings = (void *) hdr + sechdrs[symtab->sh_link].sh_offset;
  139. for (i = 0; i < me->arch.nsyms; i++) {
  140. if (symbols[i].st_shndx == SHN_UNDEF &&
  141. strcmp(strings + symbols[i].st_name,
  142. "_GLOBAL_OFFSET_TABLE_") == 0)
  143. /* "Define" it as absolute. */
  144. symbols[i].st_shndx = SHN_ABS;
  145. me->arch.syminfo[i].got_offset = -1UL;
  146. me->arch.syminfo[i].plt_offset = -1UL;
  147. me->arch.syminfo[i].got_initialized = 0;
  148. me->arch.syminfo[i].plt_initialized = 0;
  149. }
  150. /* Search for got/plt relocations. */
  151. me->arch.got_size = me->arch.plt_size = 0;
  152. for (i = 0; i < hdr->e_shnum; i++) {
  153. if (sechdrs[i].sh_type != SHT_RELA)
  154. continue;
  155. nrela = sechdrs[i].sh_size / sizeof(Elf_Rela);
  156. rela = (void *) hdr + sechdrs[i].sh_offset;
  157. for (j = 0; j < nrela; j++)
  158. check_rela(rela + j, me);
  159. }
  160. /* Increase core size by size of got & plt and set start
  161. offsets for got and plt. */
  162. me->core_layout.size = ALIGN(me->core_layout.size, 4);
  163. me->arch.got_offset = me->core_layout.size;
  164. me->core_layout.size += me->arch.got_size;
  165. me->arch.plt_offset = me->core_layout.size;
  166. if (me->arch.plt_size) {
  167. if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable)
  168. me->arch.plt_size += PLT_ENTRY_SIZE;
  169. me->core_layout.size += me->arch.plt_size;
  170. }
  171. return 0;
  172. }
  173. static int apply_rela_bits(Elf_Addr loc, Elf_Addr val,
  174. int sign, int bits, int shift,
  175. void *(*write)(void *dest, const void *src, size_t len))
  176. {
  177. unsigned long umax;
  178. long min, max;
  179. void *dest = (void *)loc;
  180. if (val & ((1UL << shift) - 1))
  181. return -ENOEXEC;
  182. if (sign) {
  183. val = (Elf_Addr)(((long) val) >> shift);
  184. min = -(1L << (bits - 1));
  185. max = (1L << (bits - 1)) - 1;
  186. if ((long) val < min || (long) val > max)
  187. return -ENOEXEC;
  188. } else {
  189. val >>= shift;
  190. umax = ((1UL << (bits - 1)) << 1) - 1;
  191. if ((unsigned long) val > umax)
  192. return -ENOEXEC;
  193. }
  194. if (bits == 8) {
  195. unsigned char tmp = val;
  196. write(dest, &tmp, 1);
  197. } else if (bits == 12) {
  198. unsigned short tmp = (val & 0xfff) |
  199. (*(unsigned short *) loc & 0xf000);
  200. write(dest, &tmp, 2);
  201. } else if (bits == 16) {
  202. unsigned short tmp = val;
  203. write(dest, &tmp, 2);
  204. } else if (bits == 20) {
  205. unsigned int tmp = (val & 0xfff) << 16 |
  206. (val & 0xff000) >> 4 | (*(unsigned int *) loc & 0xf00000ff);
  207. write(dest, &tmp, 4);
  208. } else if (bits == 32) {
  209. unsigned int tmp = val;
  210. write(dest, &tmp, 4);
  211. } else if (bits == 64) {
  212. unsigned long tmp = val;
  213. write(dest, &tmp, 8);
  214. }
  215. return 0;
  216. }
  217. static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
  218. const char *strtab, struct module *me,
  219. void *(*write)(void *dest, const void *src, size_t len))
  220. {
  221. struct mod_arch_syminfo *info;
  222. Elf_Addr loc, val;
  223. int r_type, r_sym;
  224. int rc = -ENOEXEC;
  225. /* This is where to make the change */
  226. loc = base + rela->r_offset;
  227. /* This is the symbol it is referring to. Note that all
  228. undefined symbols have been resolved. */
  229. r_sym = ELF_R_SYM(rela->r_info);
  230. r_type = ELF_R_TYPE(rela->r_info);
  231. info = me->arch.syminfo + r_sym;
  232. val = symtab[r_sym].st_value;
  233. switch (r_type) {
  234. case R_390_NONE: /* No relocation. */
  235. rc = 0;
  236. break;
  237. case R_390_8: /* Direct 8 bit. */
  238. case R_390_12: /* Direct 12 bit. */
  239. case R_390_16: /* Direct 16 bit. */
  240. case R_390_20: /* Direct 20 bit. */
  241. case R_390_32: /* Direct 32 bit. */
  242. case R_390_64: /* Direct 64 bit. */
  243. val += rela->r_addend;
  244. if (r_type == R_390_8)
  245. rc = apply_rela_bits(loc, val, 0, 8, 0, write);
  246. else if (r_type == R_390_12)
  247. rc = apply_rela_bits(loc, val, 0, 12, 0, write);
  248. else if (r_type == R_390_16)
  249. rc = apply_rela_bits(loc, val, 0, 16, 0, write);
  250. else if (r_type == R_390_20)
  251. rc = apply_rela_bits(loc, val, 1, 20, 0, write);
  252. else if (r_type == R_390_32)
  253. rc = apply_rela_bits(loc, val, 0, 32, 0, write);
  254. else if (r_type == R_390_64)
  255. rc = apply_rela_bits(loc, val, 0, 64, 0, write);
  256. break;
  257. case R_390_PC16: /* PC relative 16 bit. */
  258. case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */
  259. case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */
  260. case R_390_PC32: /* PC relative 32 bit. */
  261. case R_390_PC64: /* PC relative 64 bit. */
  262. val += rela->r_addend - loc;
  263. if (r_type == R_390_PC16)
  264. rc = apply_rela_bits(loc, val, 1, 16, 0, write);
  265. else if (r_type == R_390_PC16DBL)
  266. rc = apply_rela_bits(loc, val, 1, 16, 1, write);
  267. else if (r_type == R_390_PC32DBL)
  268. rc = apply_rela_bits(loc, val, 1, 32, 1, write);
  269. else if (r_type == R_390_PC32)
  270. rc = apply_rela_bits(loc, val, 1, 32, 0, write);
  271. else if (r_type == R_390_PC64)
  272. rc = apply_rela_bits(loc, val, 1, 64, 0, write);
  273. break;
  274. case R_390_GOT12: /* 12 bit GOT offset. */
  275. case R_390_GOT16: /* 16 bit GOT offset. */
  276. case R_390_GOT20: /* 20 bit GOT offset. */
  277. case R_390_GOT32: /* 32 bit GOT offset. */
  278. case R_390_GOT64: /* 64 bit GOT offset. */
  279. case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */
  280. case R_390_GOTPLT12: /* 12 bit offset to jump slot. */
  281. case R_390_GOTPLT20: /* 20 bit offset to jump slot. */
  282. case R_390_GOTPLT16: /* 16 bit offset to jump slot. */
  283. case R_390_GOTPLT32: /* 32 bit offset to jump slot. */
  284. case R_390_GOTPLT64: /* 64 bit offset to jump slot. */
  285. case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */
  286. if (info->got_initialized == 0) {
  287. Elf_Addr *gotent = me->core_layout.base +
  288. me->arch.got_offset +
  289. info->got_offset;
  290. write(gotent, &val, sizeof(*gotent));
  291. info->got_initialized = 1;
  292. }
  293. val = info->got_offset + rela->r_addend;
  294. if (r_type == R_390_GOT12 ||
  295. r_type == R_390_GOTPLT12)
  296. rc = apply_rela_bits(loc, val, 0, 12, 0, write);
  297. else if (r_type == R_390_GOT16 ||
  298. r_type == R_390_GOTPLT16)
  299. rc = apply_rela_bits(loc, val, 0, 16, 0, write);
  300. else if (r_type == R_390_GOT20 ||
  301. r_type == R_390_GOTPLT20)
  302. rc = apply_rela_bits(loc, val, 1, 20, 0, write);
  303. else if (r_type == R_390_GOT32 ||
  304. r_type == R_390_GOTPLT32)
  305. rc = apply_rela_bits(loc, val, 0, 32, 0, write);
  306. else if (r_type == R_390_GOT64 ||
  307. r_type == R_390_GOTPLT64)
  308. rc = apply_rela_bits(loc, val, 0, 64, 0, write);
  309. else if (r_type == R_390_GOTENT ||
  310. r_type == R_390_GOTPLTENT) {
  311. val += (Elf_Addr) me->core_layout.base - loc;
  312. rc = apply_rela_bits(loc, val, 1, 32, 1, write);
  313. }
  314. break;
  315. case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */
  316. case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */
  317. case R_390_PLT32: /* 32 bit PC relative PLT address. */
  318. case R_390_PLT64: /* 64 bit PC relative PLT address. */
  319. case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */
  320. case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
  321. case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
  322. if (info->plt_initialized == 0) {
  323. unsigned char insn[PLT_ENTRY_SIZE];
  324. char *plt_base;
  325. char *ip;
  326. plt_base = me->core_layout.base + me->arch.plt_offset;
  327. ip = plt_base + info->plt_offset;
  328. *(int *)insn = 0x0d10e310; /* basr 1,0 */
  329. *(int *)&insn[4] = 0x100c0004; /* lg 1,12(1) */
  330. if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
  331. char *jump_r1;
  332. jump_r1 = plt_base + me->arch.plt_size -
  333. PLT_ENTRY_SIZE;
  334. /* brcl 0xf,__jump_r1 */
  335. *(short *)&insn[8] = 0xc0f4;
  336. *(int *)&insn[10] = (jump_r1 - (ip + 8)) / 2;
  337. } else {
  338. *(int *)&insn[8] = 0x07f10000; /* br %r1 */
  339. }
  340. *(long *)&insn[14] = val;
  341. write(ip, insn, sizeof(insn));
  342. info->plt_initialized = 1;
  343. }
  344. if (r_type == R_390_PLTOFF16 ||
  345. r_type == R_390_PLTOFF32 ||
  346. r_type == R_390_PLTOFF64)
  347. val = me->arch.plt_offset - me->arch.got_offset +
  348. info->plt_offset + rela->r_addend;
  349. else {
  350. if (!((r_type == R_390_PLT16DBL &&
  351. val - loc + 0xffffUL < 0x1ffffeUL) ||
  352. (r_type == R_390_PLT32DBL &&
  353. val - loc + 0xffffffffULL < 0x1fffffffeULL)))
  354. val = (Elf_Addr) me->core_layout.base +
  355. me->arch.plt_offset +
  356. info->plt_offset;
  357. val += rela->r_addend - loc;
  358. }
  359. if (r_type == R_390_PLT16DBL)
  360. rc = apply_rela_bits(loc, val, 1, 16, 1, write);
  361. else if (r_type == R_390_PLTOFF16)
  362. rc = apply_rela_bits(loc, val, 0, 16, 0, write);
  363. else if (r_type == R_390_PLT32DBL)
  364. rc = apply_rela_bits(loc, val, 1, 32, 1, write);
  365. else if (r_type == R_390_PLT32 ||
  366. r_type == R_390_PLTOFF32)
  367. rc = apply_rela_bits(loc, val, 0, 32, 0, write);
  368. else if (r_type == R_390_PLT64 ||
  369. r_type == R_390_PLTOFF64)
  370. rc = apply_rela_bits(loc, val, 0, 64, 0, write);
  371. break;
  372. case R_390_GOTOFF16: /* 16 bit offset to GOT. */
  373. case R_390_GOTOFF32: /* 32 bit offset to GOT. */
  374. case R_390_GOTOFF64: /* 64 bit offset to GOT. */
  375. val = val + rela->r_addend -
  376. ((Elf_Addr) me->core_layout.base + me->arch.got_offset);
  377. if (r_type == R_390_GOTOFF16)
  378. rc = apply_rela_bits(loc, val, 0, 16, 0, write);
  379. else if (r_type == R_390_GOTOFF32)
  380. rc = apply_rela_bits(loc, val, 0, 32, 0, write);
  381. else if (r_type == R_390_GOTOFF64)
  382. rc = apply_rela_bits(loc, val, 0, 64, 0, write);
  383. break;
  384. case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
  385. case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
  386. val = (Elf_Addr) me->core_layout.base + me->arch.got_offset +
  387. rela->r_addend - loc;
  388. if (r_type == R_390_GOTPC)
  389. rc = apply_rela_bits(loc, val, 1, 32, 0, write);
  390. else if (r_type == R_390_GOTPCDBL)
  391. rc = apply_rela_bits(loc, val, 1, 32, 1, write);
  392. break;
  393. case R_390_COPY:
  394. case R_390_GLOB_DAT: /* Create GOT entry. */
  395. case R_390_JMP_SLOT: /* Create PLT entry. */
  396. case R_390_RELATIVE: /* Adjust by program base. */
  397. /* Only needed if we want to support loading of
  398. modules linked with -shared. */
  399. return -ENOEXEC;
  400. default:
  401. printk(KERN_ERR "module %s: unknown relocation: %u\n",
  402. me->name, r_type);
  403. return -ENOEXEC;
  404. }
  405. if (rc) {
  406. printk(KERN_ERR "module %s: relocation error for symbol %s "
  407. "(r_type %i, value 0x%lx)\n",
  408. me->name, strtab + symtab[r_sym].st_name,
  409. r_type, (unsigned long) val);
  410. return rc;
  411. }
  412. return 0;
  413. }
  414. static int __apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
  415. unsigned int symindex, unsigned int relsec,
  416. struct module *me,
  417. void *(*write)(void *dest, const void *src, size_t len))
  418. {
  419. Elf_Addr base;
  420. Elf_Sym *symtab;
  421. Elf_Rela *rela;
  422. unsigned long i, n;
  423. int rc;
  424. DEBUGP("Applying relocate section %u to %u\n",
  425. relsec, sechdrs[relsec].sh_info);
  426. base = sechdrs[sechdrs[relsec].sh_info].sh_addr;
  427. symtab = (Elf_Sym *) sechdrs[symindex].sh_addr;
  428. rela = (Elf_Rela *) sechdrs[relsec].sh_addr;
  429. n = sechdrs[relsec].sh_size / sizeof(Elf_Rela);
  430. for (i = 0; i < n; i++, rela++) {
  431. rc = apply_rela(rela, base, symtab, strtab, me, write);
  432. if (rc)
  433. return rc;
  434. }
  435. return 0;
  436. }
  437. int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
  438. unsigned int symindex, unsigned int relsec,
  439. struct module *me)
  440. {
  441. bool early = me->state == MODULE_STATE_UNFORMED;
  442. void *(*write)(void *, const void *, size_t) = memcpy;
  443. if (!early)
  444. write = s390_kernel_write;
  445. return __apply_relocate_add(sechdrs, strtab, symindex, relsec, me,
  446. write);
  447. }
  448. #ifdef CONFIG_FUNCTION_TRACER
  449. static int module_alloc_ftrace_hotpatch_trampolines(struct module *me,
  450. const Elf_Shdr *s)
  451. {
  452. char *start, *end;
  453. int numpages;
  454. size_t size;
  455. size = FTRACE_HOTPATCH_TRAMPOLINES_SIZE(s->sh_size);
  456. numpages = DIV_ROUND_UP(size, PAGE_SIZE);
  457. start = module_alloc(numpages * PAGE_SIZE);
  458. if (!start)
  459. return -ENOMEM;
  460. set_memory_ro((unsigned long)start, numpages);
  461. end = start + size;
  462. me->arch.trampolines_start = (struct ftrace_hotpatch_trampoline *)start;
  463. me->arch.trampolines_end = (struct ftrace_hotpatch_trampoline *)end;
  464. me->arch.next_trampoline = me->arch.trampolines_start;
  465. return 0;
  466. }
  467. #endif /* CONFIG_FUNCTION_TRACER */
  468. int module_finalize(const Elf_Ehdr *hdr,
  469. const Elf_Shdr *sechdrs,
  470. struct module *me)
  471. {
  472. const Elf_Shdr *s;
  473. char *secstrings, *secname;
  474. void *aseg;
  475. #ifdef CONFIG_FUNCTION_TRACER
  476. int ret;
  477. #endif
  478. if (IS_ENABLED(CONFIG_EXPOLINE) &&
  479. !nospec_disable && me->arch.plt_size) {
  480. unsigned int *ij;
  481. ij = me->core_layout.base + me->arch.plt_offset +
  482. me->arch.plt_size - PLT_ENTRY_SIZE;
  483. ij[0] = 0xc6000000; /* exrl %r0,.+10 */
  484. ij[1] = 0x0005a7f4; /* j . */
  485. ij[2] = 0x000007f1; /* br %r1 */
  486. }
  487. secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
  488. for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
  489. aseg = (void *) s->sh_addr;
  490. secname = secstrings + s->sh_name;
  491. if (!strcmp(".altinstructions", secname))
  492. /* patch .altinstructions */
  493. apply_alternatives(aseg, aseg + s->sh_size);
  494. if (IS_ENABLED(CONFIG_EXPOLINE) &&
  495. (str_has_prefix(secname, ".s390_indirect")))
  496. nospec_revert(aseg, aseg + s->sh_size);
  497. if (IS_ENABLED(CONFIG_EXPOLINE) &&
  498. (str_has_prefix(secname, ".s390_return")))
  499. nospec_revert(aseg, aseg + s->sh_size);
  500. #ifdef CONFIG_FUNCTION_TRACER
  501. if (!strcmp(FTRACE_CALLSITE_SECTION, secname)) {
  502. ret = module_alloc_ftrace_hotpatch_trampolines(me, s);
  503. if (ret < 0)
  504. return ret;
  505. }
  506. #endif /* CONFIG_FUNCTION_TRACER */
  507. }
  508. return 0;
  509. }