module.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* Kernel dynamically loadable module help for PARISC.
  3. *
  4. * The best reference for this stuff is probably the Processor-
  5. * Specific ELF Supplement for PA-RISC:
  6. * https://parisc.wiki.kernel.org/index.php/File:Elf-pa-hp.pdf
  7. *
  8. * Linux/PA-RISC Project
  9. * Copyright (C) 2003 Randolph Chung <tausq at debian . org>
  10. * Copyright (C) 2008 Helge Deller <[email protected]>
  11. *
  12. * Notes:
  13. * - PLT stub handling
  14. * On 32bit (and sometimes 64bit) and with big kernel modules like xfs or
  15. * ipv6 the relocation types R_PARISC_PCREL17F and R_PARISC_PCREL22F may
  16. * fail to reach their PLT stub if we only create one big stub array for
  17. * all sections at the beginning of the core or init section.
  18. * Instead we now insert individual PLT stub entries directly in front of
  19. * of the code sections where the stubs are actually called.
  20. * This reduces the distance between the PCREL location and the stub entry
  21. * so that the relocations can be fulfilled.
  22. * While calculating the final layout of the kernel module in memory, the
  23. * kernel module loader calls arch_mod_section_prepend() to request the
  24. * to be reserved amount of memory in front of each individual section.
  25. *
  26. * - SEGREL32 handling
  27. * We are not doing SEGREL32 handling correctly. According to the ABI, we
  28. * should do a value offset, like this:
  29. * if (in_init(me, (void *)val))
  30. * val -= (uint32_t)me->init_layout.base;
  31. * else
  32. * val -= (uint32_t)me->core_layout.base;
  33. * However, SEGREL32 is used only for PARISC unwind entries, and we want
  34. * those entries to have an absolute address, and not just an offset.
  35. *
  36. * The unwind table mechanism has the ability to specify an offset for
  37. * the unwind table; however, because we split off the init functions into
  38. * a different piece of memory, it is not possible to do this using a
  39. * single offset. Instead, we use the above hack for now.
  40. */
  41. #include <linux/moduleloader.h>
  42. #include <linux/elf.h>
  43. #include <linux/vmalloc.h>
  44. #include <linux/fs.h>
  45. #include <linux/ftrace.h>
  46. #include <linux/string.h>
  47. #include <linux/kernel.h>
  48. #include <linux/bug.h>
  49. #include <linux/mm.h>
  50. #include <linux/slab.h>
  51. #include <asm/unwind.h>
  52. #include <asm/sections.h>
  53. #define RELOC_REACHABLE(val, bits) \
  54. (( ( !((val) & (1<<((bits)-1))) && ((val)>>(bits)) != 0 ) || \
  55. ( ((val) & (1<<((bits)-1))) && ((val)>>(bits)) != (((__typeof__(val))(~0))>>((bits)+2)))) ? \
  56. 0 : 1)
  57. #define CHECK_RELOC(val, bits) \
  58. if (!RELOC_REACHABLE(val, bits)) { \
  59. printk(KERN_ERR "module %s relocation of symbol %s is out of range (0x%lx in %d bits)\n", \
  60. me->name, strtab + sym->st_name, (unsigned long)val, bits); \
  61. return -ENOEXEC; \
  62. }
  63. /* Maximum number of GOT entries. We use a long displacement ldd from
  64. * the bottom of the table, which has a maximum signed displacement of
  65. * 0x3fff; however, since we're only going forward, this becomes
  66. * 0x1fff, and thus, since each GOT entry is 8 bytes long we can have
  67. * at most 1023 entries.
  68. * To overcome this 14bit displacement with some kernel modules, we'll
  69. * use instead the unusal 16bit displacement method (see reassemble_16a)
  70. * which gives us a maximum positive displacement of 0x7fff, and as such
  71. * allows us to allocate up to 4095 GOT entries. */
  72. #define MAX_GOTS 4095
  73. /* three functions to determine where in the module core
  74. * or init pieces the location is */
  75. static inline int in_init(struct module *me, void *loc)
  76. {
  77. return (loc >= me->init_layout.base &&
  78. loc <= (me->init_layout.base + me->init_layout.size));
  79. }
  80. static inline int in_core(struct module *me, void *loc)
  81. {
  82. return (loc >= me->core_layout.base &&
  83. loc <= (me->core_layout.base + me->core_layout.size));
  84. }
  85. static inline int in_local(struct module *me, void *loc)
  86. {
  87. return in_init(me, loc) || in_core(me, loc);
  88. }
  89. #ifndef CONFIG_64BIT
  90. struct got_entry {
  91. Elf32_Addr addr;
  92. };
  93. struct stub_entry {
  94. Elf32_Word insns[2]; /* each stub entry has two insns */
  95. };
  96. #else
  97. struct got_entry {
  98. Elf64_Addr addr;
  99. };
  100. struct stub_entry {
  101. Elf64_Word insns[4]; /* each stub entry has four insns */
  102. };
  103. #endif
  104. /* Field selection types defined by hppa */
  105. #define rnd(x) (((x)+0x1000)&~0x1fff)
  106. /* fsel: full 32 bits */
  107. #define fsel(v,a) ((v)+(a))
  108. /* lsel: select left 21 bits */
  109. #define lsel(v,a) (((v)+(a))>>11)
  110. /* rsel: select right 11 bits */
  111. #define rsel(v,a) (((v)+(a))&0x7ff)
  112. /* lrsel with rounding of addend to nearest 8k */
  113. #define lrsel(v,a) (((v)+rnd(a))>>11)
  114. /* rrsel with rounding of addend to nearest 8k */
  115. #define rrsel(v,a) ((((v)+rnd(a))&0x7ff)+((a)-rnd(a)))
  116. #define mask(x,sz) ((x) & ~((1<<(sz))-1))
  117. /* The reassemble_* functions prepare an immediate value for
  118. insertion into an opcode. pa-risc uses all sorts of weird bitfields
  119. in the instruction to hold the value. */
  120. static inline int sign_unext(int x, int len)
  121. {
  122. int len_ones;
  123. len_ones = (1 << len) - 1;
  124. return x & len_ones;
  125. }
  126. static inline int low_sign_unext(int x, int len)
  127. {
  128. int sign, temp;
  129. sign = (x >> (len-1)) & 1;
  130. temp = sign_unext(x, len-1);
  131. return (temp << 1) | sign;
  132. }
  133. static inline int reassemble_14(int as14)
  134. {
  135. return (((as14 & 0x1fff) << 1) |
  136. ((as14 & 0x2000) >> 13));
  137. }
  138. static inline int reassemble_16a(int as16)
  139. {
  140. int s, t;
  141. /* Unusual 16-bit encoding, for wide mode only. */
  142. t = (as16 << 1) & 0xffff;
  143. s = (as16 & 0x8000);
  144. return (t ^ s ^ (s >> 1)) | (s >> 15);
  145. }
  146. static inline int reassemble_17(int as17)
  147. {
  148. return (((as17 & 0x10000) >> 16) |
  149. ((as17 & 0x0f800) << 5) |
  150. ((as17 & 0x00400) >> 8) |
  151. ((as17 & 0x003ff) << 3));
  152. }
  153. static inline int reassemble_21(int as21)
  154. {
  155. return (((as21 & 0x100000) >> 20) |
  156. ((as21 & 0x0ffe00) >> 8) |
  157. ((as21 & 0x000180) << 7) |
  158. ((as21 & 0x00007c) << 14) |
  159. ((as21 & 0x000003) << 12));
  160. }
  161. static inline int reassemble_22(int as22)
  162. {
  163. return (((as22 & 0x200000) >> 21) |
  164. ((as22 & 0x1f0000) << 5) |
  165. ((as22 & 0x00f800) << 5) |
  166. ((as22 & 0x000400) >> 8) |
  167. ((as22 & 0x0003ff) << 3));
  168. }
  169. void *module_alloc(unsigned long size)
  170. {
  171. /* using RWX means less protection for modules, but it's
  172. * easier than trying to map the text, data, init_text and
  173. * init_data correctly */
  174. return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
  175. GFP_KERNEL,
  176. PAGE_KERNEL_RWX, 0, NUMA_NO_NODE,
  177. __builtin_return_address(0));
  178. }
  179. #ifndef CONFIG_64BIT
  180. static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n)
  181. {
  182. return 0;
  183. }
  184. static inline unsigned long count_fdescs(const Elf_Rela *rela, unsigned long n)
  185. {
  186. return 0;
  187. }
  188. static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
  189. {
  190. unsigned long cnt = 0;
  191. for (; n > 0; n--, rela++)
  192. {
  193. switch (ELF32_R_TYPE(rela->r_info)) {
  194. case R_PARISC_PCREL17F:
  195. case R_PARISC_PCREL22F:
  196. cnt++;
  197. }
  198. }
  199. return cnt;
  200. }
  201. #else
  202. static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n)
  203. {
  204. unsigned long cnt = 0;
  205. for (; n > 0; n--, rela++)
  206. {
  207. switch (ELF64_R_TYPE(rela->r_info)) {
  208. case R_PARISC_LTOFF21L:
  209. case R_PARISC_LTOFF14R:
  210. case R_PARISC_PCREL22F:
  211. cnt++;
  212. }
  213. }
  214. return cnt;
  215. }
  216. static inline unsigned long count_fdescs(const Elf_Rela *rela, unsigned long n)
  217. {
  218. unsigned long cnt = 0;
  219. for (; n > 0; n--, rela++)
  220. {
  221. switch (ELF64_R_TYPE(rela->r_info)) {
  222. case R_PARISC_FPTR64:
  223. cnt++;
  224. }
  225. }
  226. return cnt;
  227. }
  228. static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
  229. {
  230. unsigned long cnt = 0;
  231. for (; n > 0; n--, rela++)
  232. {
  233. switch (ELF64_R_TYPE(rela->r_info)) {
  234. case R_PARISC_PCREL22F:
  235. cnt++;
  236. }
  237. }
  238. return cnt;
  239. }
  240. #endif
  241. void module_arch_freeing_init(struct module *mod)
  242. {
  243. kfree(mod->arch.section);
  244. mod->arch.section = NULL;
  245. }
  246. /* Additional bytes needed in front of individual sections */
  247. unsigned int arch_mod_section_prepend(struct module *mod,
  248. unsigned int section)
  249. {
  250. /* size needed for all stubs of this section (including
  251. * one additional for correct alignment of the stubs) */
  252. return (mod->arch.section[section].stub_entries + 1)
  253. * sizeof(struct stub_entry);
  254. }
  255. #define CONST
  256. int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
  257. CONST Elf_Shdr *sechdrs,
  258. CONST char *secstrings,
  259. struct module *me)
  260. {
  261. unsigned long gots = 0, fdescs = 0, len;
  262. unsigned int i;
  263. len = hdr->e_shnum * sizeof(me->arch.section[0]);
  264. me->arch.section = kzalloc(len, GFP_KERNEL);
  265. if (!me->arch.section)
  266. return -ENOMEM;
  267. for (i = 1; i < hdr->e_shnum; i++) {
  268. const Elf_Rela *rels = (void *)sechdrs[i].sh_addr;
  269. unsigned long nrels = sechdrs[i].sh_size / sizeof(*rels);
  270. unsigned int count, s;
  271. if (strncmp(secstrings + sechdrs[i].sh_name,
  272. ".PARISC.unwind", 14) == 0)
  273. me->arch.unwind_section = i;
  274. if (sechdrs[i].sh_type != SHT_RELA)
  275. continue;
  276. /* some of these are not relevant for 32-bit/64-bit
  277. * we leave them here to make the code common. the
  278. * compiler will do its thing and optimize out the
  279. * stuff we don't need
  280. */
  281. gots += count_gots(rels, nrels);
  282. fdescs += count_fdescs(rels, nrels);
  283. /* XXX: By sorting the relocs and finding duplicate entries
  284. * we could reduce the number of necessary stubs and save
  285. * some memory. */
  286. count = count_stubs(rels, nrels);
  287. if (!count)
  288. continue;
  289. /* so we need relocation stubs. reserve necessary memory. */
  290. /* sh_info gives the section for which we need to add stubs. */
  291. s = sechdrs[i].sh_info;
  292. /* each code section should only have one relocation section */
  293. WARN_ON(me->arch.section[s].stub_entries);
  294. /* store number of stubs we need for this section */
  295. me->arch.section[s].stub_entries += count;
  296. }
  297. /* align things a bit */
  298. me->core_layout.size = ALIGN(me->core_layout.size, 16);
  299. me->arch.got_offset = me->core_layout.size;
  300. me->core_layout.size += gots * sizeof(struct got_entry);
  301. me->core_layout.size = ALIGN(me->core_layout.size, 16);
  302. me->arch.fdesc_offset = me->core_layout.size;
  303. me->core_layout.size += fdescs * sizeof(Elf_Fdesc);
  304. me->arch.got_max = gots;
  305. me->arch.fdesc_max = fdescs;
  306. return 0;
  307. }
  308. #ifdef CONFIG_64BIT
  309. static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
  310. {
  311. unsigned int i;
  312. struct got_entry *got;
  313. value += addend;
  314. BUG_ON(value == 0);
  315. got = me->core_layout.base + me->arch.got_offset;
  316. for (i = 0; got[i].addr; i++)
  317. if (got[i].addr == value)
  318. goto out;
  319. BUG_ON(++me->arch.got_count > me->arch.got_max);
  320. got[i].addr = value;
  321. out:
  322. pr_debug("GOT ENTRY %d[%lx] val %lx\n", i, i*sizeof(struct got_entry),
  323. value);
  324. return i * sizeof(struct got_entry);
  325. }
  326. #endif /* CONFIG_64BIT */
  327. #ifdef CONFIG_64BIT
  328. static Elf_Addr get_fdesc(struct module *me, unsigned long value)
  329. {
  330. Elf_Fdesc *fdesc = me->core_layout.base + me->arch.fdesc_offset;
  331. if (!value) {
  332. printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
  333. return 0;
  334. }
  335. /* Look for existing fdesc entry. */
  336. while (fdesc->addr) {
  337. if (fdesc->addr == value)
  338. return (Elf_Addr)fdesc;
  339. fdesc++;
  340. }
  341. BUG_ON(++me->arch.fdesc_count > me->arch.fdesc_max);
  342. /* Create new one */
  343. fdesc->addr = value;
  344. fdesc->gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset;
  345. return (Elf_Addr)fdesc;
  346. }
  347. #endif /* CONFIG_64BIT */
  348. enum elf_stub_type {
  349. ELF_STUB_GOT,
  350. ELF_STUB_MILLI,
  351. ELF_STUB_DIRECT,
  352. };
  353. static Elf_Addr get_stub(struct module *me, unsigned long value, long addend,
  354. enum elf_stub_type stub_type, Elf_Addr loc0, unsigned int targetsec)
  355. {
  356. struct stub_entry *stub;
  357. int __maybe_unused d;
  358. /* initialize stub_offset to point in front of the section */
  359. if (!me->arch.section[targetsec].stub_offset) {
  360. loc0 -= (me->arch.section[targetsec].stub_entries + 1) *
  361. sizeof(struct stub_entry);
  362. /* get correct alignment for the stubs */
  363. loc0 = ALIGN(loc0, sizeof(struct stub_entry));
  364. me->arch.section[targetsec].stub_offset = loc0;
  365. }
  366. /* get address of stub entry */
  367. stub = (void *) me->arch.section[targetsec].stub_offset;
  368. me->arch.section[targetsec].stub_offset += sizeof(struct stub_entry);
  369. /* do not write outside available stub area */
  370. BUG_ON(0 == me->arch.section[targetsec].stub_entries--);
  371. #ifndef CONFIG_64BIT
  372. /* for 32-bit the stub looks like this:
  373. * ldil L'XXX,%r1
  374. * be,n R'XXX(%sr4,%r1)
  375. */
  376. //value = *(unsigned long *)((value + addend) & ~3); /* why? */
  377. stub->insns[0] = 0x20200000; /* ldil L'XXX,%r1 */
  378. stub->insns[1] = 0xe0202002; /* be,n R'XXX(%sr4,%r1) */
  379. stub->insns[0] |= reassemble_21(lrsel(value, addend));
  380. stub->insns[1] |= reassemble_17(rrsel(value, addend) / 4);
  381. #else
  382. /* for 64-bit we have three kinds of stubs:
  383. * for normal function calls:
  384. * ldd 0(%dp),%dp
  385. * ldd 10(%dp), %r1
  386. * bve (%r1)
  387. * ldd 18(%dp), %dp
  388. *
  389. * for millicode:
  390. * ldil 0, %r1
  391. * ldo 0(%r1), %r1
  392. * ldd 10(%r1), %r1
  393. * bve,n (%r1)
  394. *
  395. * for direct branches (jumps between different section of the
  396. * same module):
  397. * ldil 0, %r1
  398. * ldo 0(%r1), %r1
  399. * bve,n (%r1)
  400. */
  401. switch (stub_type) {
  402. case ELF_STUB_GOT:
  403. d = get_got(me, value, addend);
  404. if (d <= 15) {
  405. /* Format 5 */
  406. stub->insns[0] = 0x0f6010db; /* ldd 0(%dp),%dp */
  407. stub->insns[0] |= low_sign_unext(d, 5) << 16;
  408. } else {
  409. /* Format 3 */
  410. stub->insns[0] = 0x537b0000; /* ldd 0(%dp),%dp */
  411. stub->insns[0] |= reassemble_16a(d);
  412. }
  413. stub->insns[1] = 0x53610020; /* ldd 10(%dp),%r1 */
  414. stub->insns[2] = 0xe820d000; /* bve (%r1) */
  415. stub->insns[3] = 0x537b0030; /* ldd 18(%dp),%dp */
  416. break;
  417. case ELF_STUB_MILLI:
  418. stub->insns[0] = 0x20200000; /* ldil 0,%r1 */
  419. stub->insns[1] = 0x34210000; /* ldo 0(%r1), %r1 */
  420. stub->insns[2] = 0x50210020; /* ldd 10(%r1),%r1 */
  421. stub->insns[3] = 0xe820d002; /* bve,n (%r1) */
  422. stub->insns[0] |= reassemble_21(lrsel(value, addend));
  423. stub->insns[1] |= reassemble_14(rrsel(value, addend));
  424. break;
  425. case ELF_STUB_DIRECT:
  426. stub->insns[0] = 0x20200000; /* ldil 0,%r1 */
  427. stub->insns[1] = 0x34210000; /* ldo 0(%r1), %r1 */
  428. stub->insns[2] = 0xe820d002; /* bve,n (%r1) */
  429. stub->insns[0] |= reassemble_21(lrsel(value, addend));
  430. stub->insns[1] |= reassemble_14(rrsel(value, addend));
  431. break;
  432. }
  433. #endif
  434. return (Elf_Addr)stub;
  435. }
  436. #ifndef CONFIG_64BIT
  437. int apply_relocate_add(Elf_Shdr *sechdrs,
  438. const char *strtab,
  439. unsigned int symindex,
  440. unsigned int relsec,
  441. struct module *me)
  442. {
  443. int i;
  444. Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
  445. Elf32_Sym *sym;
  446. Elf32_Word *loc;
  447. Elf32_Addr val;
  448. Elf32_Sword addend;
  449. Elf32_Addr dot;
  450. Elf_Addr loc0;
  451. unsigned int targetsec = sechdrs[relsec].sh_info;
  452. //unsigned long dp = (unsigned long)$global$;
  453. register unsigned long dp asm ("r27");
  454. pr_debug("Applying relocate section %u to %u\n", relsec,
  455. targetsec);
  456. for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
  457. /* This is where to make the change */
  458. loc = (void *)sechdrs[targetsec].sh_addr
  459. + rel[i].r_offset;
  460. /* This is the start of the target section */
  461. loc0 = sechdrs[targetsec].sh_addr;
  462. /* This is the symbol it is referring to */
  463. sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
  464. + ELF32_R_SYM(rel[i].r_info);
  465. if (!sym->st_value) {
  466. printk(KERN_WARNING "%s: Unknown symbol %s\n",
  467. me->name, strtab + sym->st_name);
  468. return -ENOENT;
  469. }
  470. //dot = (sechdrs[relsec].sh_addr + rel->r_offset) & ~0x03;
  471. dot = (Elf32_Addr)loc & ~0x03;
  472. val = sym->st_value;
  473. addend = rel[i].r_addend;
  474. #if 0
  475. #define r(t) ELF32_R_TYPE(rel[i].r_info)==t ? #t :
  476. pr_debug("Symbol %s loc 0x%x val 0x%x addend 0x%x: %s\n",
  477. strtab + sym->st_name,
  478. (uint32_t)loc, val, addend,
  479. r(R_PARISC_PLABEL32)
  480. r(R_PARISC_DIR32)
  481. r(R_PARISC_DIR21L)
  482. r(R_PARISC_DIR14R)
  483. r(R_PARISC_SEGREL32)
  484. r(R_PARISC_DPREL21L)
  485. r(R_PARISC_DPREL14R)
  486. r(R_PARISC_PCREL17F)
  487. r(R_PARISC_PCREL22F)
  488. "UNKNOWN");
  489. #undef r
  490. #endif
  491. switch (ELF32_R_TYPE(rel[i].r_info)) {
  492. case R_PARISC_PLABEL32:
  493. /* 32-bit function address */
  494. /* no function descriptors... */
  495. *loc = fsel(val, addend);
  496. break;
  497. case R_PARISC_DIR32:
  498. /* direct 32-bit ref */
  499. *loc = fsel(val, addend);
  500. break;
  501. case R_PARISC_DIR21L:
  502. /* left 21 bits of effective address */
  503. val = lrsel(val, addend);
  504. *loc = mask(*loc, 21) | reassemble_21(val);
  505. break;
  506. case R_PARISC_DIR14R:
  507. /* right 14 bits of effective address */
  508. val = rrsel(val, addend);
  509. *loc = mask(*loc, 14) | reassemble_14(val);
  510. break;
  511. case R_PARISC_SEGREL32:
  512. /* 32-bit segment relative address */
  513. /* See note about special handling of SEGREL32 at
  514. * the beginning of this file.
  515. */
  516. *loc = fsel(val, addend);
  517. break;
  518. case R_PARISC_SECREL32:
  519. /* 32-bit section relative address. */
  520. *loc = fsel(val, addend);
  521. break;
  522. case R_PARISC_DPREL21L:
  523. /* left 21 bit of relative address */
  524. val = lrsel(val - dp, addend);
  525. *loc = mask(*loc, 21) | reassemble_21(val);
  526. break;
  527. case R_PARISC_DPREL14R:
  528. /* right 14 bit of relative address */
  529. val = rrsel(val - dp, addend);
  530. *loc = mask(*loc, 14) | reassemble_14(val);
  531. break;
  532. case R_PARISC_PCREL17F:
  533. /* 17-bit PC relative address */
  534. /* calculate direct call offset */
  535. val += addend;
  536. val = (val - dot - 8)/4;
  537. if (!RELOC_REACHABLE(val, 17)) {
  538. /* direct distance too far, create
  539. * stub entry instead */
  540. val = get_stub(me, sym->st_value, addend,
  541. ELF_STUB_DIRECT, loc0, targetsec);
  542. val = (val - dot - 8)/4;
  543. CHECK_RELOC(val, 17);
  544. }
  545. *loc = (*loc & ~0x1f1ffd) | reassemble_17(val);
  546. break;
  547. case R_PARISC_PCREL22F:
  548. /* 22-bit PC relative address; only defined for pa20 */
  549. /* calculate direct call offset */
  550. val += addend;
  551. val = (val - dot - 8)/4;
  552. if (!RELOC_REACHABLE(val, 22)) {
  553. /* direct distance too far, create
  554. * stub entry instead */
  555. val = get_stub(me, sym->st_value, addend,
  556. ELF_STUB_DIRECT, loc0, targetsec);
  557. val = (val - dot - 8)/4;
  558. CHECK_RELOC(val, 22);
  559. }
  560. *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
  561. break;
  562. case R_PARISC_PCREL32:
  563. /* 32-bit PC relative address */
  564. *loc = val - dot - 8 + addend;
  565. break;
  566. default:
  567. printk(KERN_ERR "module %s: Unknown relocation: %u\n",
  568. me->name, ELF32_R_TYPE(rel[i].r_info));
  569. return -ENOEXEC;
  570. }
  571. }
  572. return 0;
  573. }
  574. #else
  575. int apply_relocate_add(Elf_Shdr *sechdrs,
  576. const char *strtab,
  577. unsigned int symindex,
  578. unsigned int relsec,
  579. struct module *me)
  580. {
  581. int i;
  582. Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
  583. Elf64_Sym *sym;
  584. Elf64_Word *loc;
  585. Elf64_Xword *loc64;
  586. Elf64_Addr val;
  587. Elf64_Sxword addend;
  588. Elf64_Addr dot;
  589. Elf_Addr loc0;
  590. unsigned int targetsec = sechdrs[relsec].sh_info;
  591. pr_debug("Applying relocate section %u to %u\n", relsec,
  592. targetsec);
  593. for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
  594. /* This is where to make the change */
  595. loc = (void *)sechdrs[targetsec].sh_addr
  596. + rel[i].r_offset;
  597. /* This is the start of the target section */
  598. loc0 = sechdrs[targetsec].sh_addr;
  599. /* This is the symbol it is referring to */
  600. sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
  601. + ELF64_R_SYM(rel[i].r_info);
  602. if (!sym->st_value) {
  603. printk(KERN_WARNING "%s: Unknown symbol %s\n",
  604. me->name, strtab + sym->st_name);
  605. return -ENOENT;
  606. }
  607. //dot = (sechdrs[relsec].sh_addr + rel->r_offset) & ~0x03;
  608. dot = (Elf64_Addr)loc & ~0x03;
  609. loc64 = (Elf64_Xword *)loc;
  610. val = sym->st_value;
  611. addend = rel[i].r_addend;
  612. #if 0
  613. #define r(t) ELF64_R_TYPE(rel[i].r_info)==t ? #t :
  614. printk("Symbol %s loc %p val 0x%Lx addend 0x%Lx: %s\n",
  615. strtab + sym->st_name,
  616. loc, val, addend,
  617. r(R_PARISC_LTOFF14R)
  618. r(R_PARISC_LTOFF21L)
  619. r(R_PARISC_PCREL22F)
  620. r(R_PARISC_DIR64)
  621. r(R_PARISC_SEGREL32)
  622. r(R_PARISC_FPTR64)
  623. "UNKNOWN");
  624. #undef r
  625. #endif
  626. switch (ELF64_R_TYPE(rel[i].r_info)) {
  627. case R_PARISC_LTOFF21L:
  628. /* LT-relative; left 21 bits */
  629. val = get_got(me, val, addend);
  630. pr_debug("LTOFF21L Symbol %s loc %p val %llx\n",
  631. strtab + sym->st_name,
  632. loc, val);
  633. val = lrsel(val, 0);
  634. *loc = mask(*loc, 21) | reassemble_21(val);
  635. break;
  636. case R_PARISC_LTOFF14R:
  637. /* L(ltoff(val+addend)) */
  638. /* LT-relative; right 14 bits */
  639. val = get_got(me, val, addend);
  640. val = rrsel(val, 0);
  641. pr_debug("LTOFF14R Symbol %s loc %p val %llx\n",
  642. strtab + sym->st_name,
  643. loc, val);
  644. *loc = mask(*loc, 14) | reassemble_14(val);
  645. break;
  646. case R_PARISC_PCREL22F:
  647. /* PC-relative; 22 bits */
  648. pr_debug("PCREL22F Symbol %s loc %p val %llx\n",
  649. strtab + sym->st_name,
  650. loc, val);
  651. val += addend;
  652. /* can we reach it locally? */
  653. if (in_local(me, (void *)val)) {
  654. /* this is the case where the symbol is local
  655. * to the module, but in a different section,
  656. * so stub the jump in case it's more than 22
  657. * bits away */
  658. val = (val - dot - 8)/4;
  659. if (!RELOC_REACHABLE(val, 22)) {
  660. /* direct distance too far, create
  661. * stub entry instead */
  662. val = get_stub(me, sym->st_value,
  663. addend, ELF_STUB_DIRECT,
  664. loc0, targetsec);
  665. } else {
  666. /* Ok, we can reach it directly. */
  667. val = sym->st_value;
  668. val += addend;
  669. }
  670. } else {
  671. val = sym->st_value;
  672. if (strncmp(strtab + sym->st_name, "$$", 2)
  673. == 0)
  674. val = get_stub(me, val, addend, ELF_STUB_MILLI,
  675. loc0, targetsec);
  676. else
  677. val = get_stub(me, val, addend, ELF_STUB_GOT,
  678. loc0, targetsec);
  679. }
  680. pr_debug("STUB FOR %s loc %px, val %llx+%llx at %llx\n",
  681. strtab + sym->st_name, loc, sym->st_value,
  682. addend, val);
  683. val = (val - dot - 8)/4;
  684. CHECK_RELOC(val, 22);
  685. *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
  686. break;
  687. case R_PARISC_PCREL32:
  688. /* 32-bit PC relative address */
  689. *loc = val - dot - 8 + addend;
  690. break;
  691. case R_PARISC_PCREL64:
  692. /* 64-bit PC relative address */
  693. *loc64 = val - dot - 8 + addend;
  694. break;
  695. case R_PARISC_DIR64:
  696. /* 64-bit effective address */
  697. *loc64 = val + addend;
  698. break;
  699. case R_PARISC_SEGREL32:
  700. /* 32-bit segment relative address */
  701. /* See note about special handling of SEGREL32 at
  702. * the beginning of this file.
  703. */
  704. *loc = fsel(val, addend);
  705. break;
  706. case R_PARISC_SECREL32:
  707. /* 32-bit section relative address. */
  708. *loc = fsel(val, addend);
  709. break;
  710. case R_PARISC_FPTR64:
  711. /* 64-bit function address */
  712. if(in_local(me, (void *)(val + addend))) {
  713. *loc64 = get_fdesc(me, val+addend);
  714. pr_debug("FDESC for %s at %llx points to %llx\n",
  715. strtab + sym->st_name, *loc64,
  716. ((Elf_Fdesc *)*loc64)->addr);
  717. } else {
  718. /* if the symbol is not local to this
  719. * module then val+addend is a pointer
  720. * to the function descriptor */
  721. pr_debug("Non local FPTR64 Symbol %s loc %p val %llx\n",
  722. strtab + sym->st_name,
  723. loc, val);
  724. *loc64 = val + addend;
  725. }
  726. break;
  727. default:
  728. printk(KERN_ERR "module %s: Unknown relocation: %Lu\n",
  729. me->name, ELF64_R_TYPE(rel[i].r_info));
  730. return -ENOEXEC;
  731. }
  732. }
  733. return 0;
  734. }
  735. #endif
  736. static void
  737. register_unwind_table(struct module *me,
  738. const Elf_Shdr *sechdrs)
  739. {
  740. unsigned char *table, *end;
  741. unsigned long gp;
  742. if (!me->arch.unwind_section)
  743. return;
  744. table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
  745. end = table + sechdrs[me->arch.unwind_section].sh_size;
  746. gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset;
  747. pr_debug("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
  748. me->arch.unwind_section, table, end, gp);
  749. me->arch.unwind = unwind_table_add(me->name, 0, gp, table, end);
  750. }
  751. static void
  752. deregister_unwind_table(struct module *me)
  753. {
  754. if (me->arch.unwind)
  755. unwind_table_remove(me->arch.unwind);
  756. }
  757. int module_finalize(const Elf_Ehdr *hdr,
  758. const Elf_Shdr *sechdrs,
  759. struct module *me)
  760. {
  761. int i;
  762. unsigned long nsyms;
  763. const char *strtab = NULL;
  764. const Elf_Shdr *s;
  765. char *secstrings;
  766. int symindex = -1;
  767. Elf_Sym *newptr, *oldptr;
  768. Elf_Shdr *symhdr = NULL;
  769. #ifdef DEBUG
  770. Elf_Fdesc *entry;
  771. u32 *addr;
  772. entry = (Elf_Fdesc *)me->init;
  773. printk("FINALIZE, ->init FPTR is %p, GP %lx ADDR %lx\n", entry,
  774. entry->gp, entry->addr);
  775. addr = (u32 *)entry->addr;
  776. printk("INSNS: %x %x %x %x\n",
  777. addr[0], addr[1], addr[2], addr[3]);
  778. printk("got entries used %ld, gots max %ld\n"
  779. "fdescs used %ld, fdescs max %ld\n",
  780. me->arch.got_count, me->arch.got_max,
  781. me->arch.fdesc_count, me->arch.fdesc_max);
  782. #endif
  783. register_unwind_table(me, sechdrs);
  784. /* haven't filled in me->symtab yet, so have to find it
  785. * ourselves */
  786. for (i = 1; i < hdr->e_shnum; i++) {
  787. if(sechdrs[i].sh_type == SHT_SYMTAB
  788. && (sechdrs[i].sh_flags & SHF_ALLOC)) {
  789. int strindex = sechdrs[i].sh_link;
  790. symindex = i;
  791. /* FIXME: AWFUL HACK
  792. * The cast is to drop the const from
  793. * the sechdrs pointer */
  794. symhdr = (Elf_Shdr *)&sechdrs[i];
  795. strtab = (char *)sechdrs[strindex].sh_addr;
  796. break;
  797. }
  798. }
  799. pr_debug("module %s: strtab %p, symhdr %p\n",
  800. me->name, strtab, symhdr);
  801. if(me->arch.got_count > MAX_GOTS) {
  802. printk(KERN_ERR "%s: Global Offset Table overflow (used %ld, allowed %d)\n",
  803. me->name, me->arch.got_count, MAX_GOTS);
  804. return -EINVAL;
  805. }
  806. kfree(me->arch.section);
  807. me->arch.section = NULL;
  808. /* no symbol table */
  809. if(symhdr == NULL)
  810. return 0;
  811. oldptr = (void *)symhdr->sh_addr;
  812. newptr = oldptr + 1; /* we start counting at 1 */
  813. nsyms = symhdr->sh_size / sizeof(Elf_Sym);
  814. pr_debug("OLD num_symtab %lu\n", nsyms);
  815. for (i = 1; i < nsyms; i++) {
  816. oldptr++; /* note, count starts at 1 so preincrement */
  817. if(strncmp(strtab + oldptr->st_name,
  818. ".L", 2) == 0)
  819. continue;
  820. if(newptr != oldptr)
  821. *newptr++ = *oldptr;
  822. else
  823. newptr++;
  824. }
  825. nsyms = newptr - (Elf_Sym *)symhdr->sh_addr;
  826. pr_debug("NEW num_symtab %lu\n", nsyms);
  827. symhdr->sh_size = nsyms * sizeof(Elf_Sym);
  828. /* find .altinstructions section */
  829. secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
  830. for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
  831. void *aseg = (void *) s->sh_addr;
  832. char *secname = secstrings + s->sh_name;
  833. if (!strcmp(".altinstructions", secname))
  834. /* patch .altinstructions */
  835. apply_alternatives(aseg, aseg + s->sh_size, me->name);
  836. #ifdef CONFIG_DYNAMIC_FTRACE
  837. /* For 32 bit kernels we're compiling modules with
  838. * -ffunction-sections so we must relocate the addresses in the
  839. * ftrace callsite section.
  840. */
  841. if (symindex != -1 && !strcmp(secname, FTRACE_CALLSITE_SECTION)) {
  842. int err;
  843. if (s->sh_type == SHT_REL)
  844. err = apply_relocate((Elf_Shdr *)sechdrs,
  845. strtab, symindex,
  846. s - sechdrs, me);
  847. else if (s->sh_type == SHT_RELA)
  848. err = apply_relocate_add((Elf_Shdr *)sechdrs,
  849. strtab, symindex,
  850. s - sechdrs, me);
  851. if (err)
  852. return err;
  853. }
  854. #endif
  855. }
  856. return 0;
  857. }
  858. void module_arch_cleanup(struct module *mod)
  859. {
  860. deregister_unwind_table(mod);
  861. }
  862. #ifdef CONFIG_64BIT
  863. void *dereference_module_function_descriptor(struct module *mod, void *ptr)
  864. {
  865. unsigned long start_opd = (Elf64_Addr)mod->core_layout.base +
  866. mod->arch.fdesc_offset;
  867. unsigned long end_opd = start_opd +
  868. mod->arch.fdesc_count * sizeof(Elf64_Fdesc);
  869. if (ptr < (void *)start_opd || ptr >= (void *)end_opd)
  870. return ptr;
  871. return dereference_function_descriptor(ptr);
  872. }
  873. #endif